Compare commits

..

18 Commits

Author SHA1 Message Date
mnowicki
7a6098a70d Add managed-agents plugin (command + verifier agents) 2026-04-23 16:00:11 -04:00
Noah Zweben
5a71459c03 telegram: gate /start, /help, /status behind dmPolicy (#894)
The bot command handlers bypassed access control — they responded to
any DM user regardless of dmPolicy, leaking bot presence and
contradicting ACCESS.md's "Drop silently. No reply." contract for
allowlist mode.

Add dmCommandGate() that applies the same disabled/allowlist checks
as gate() without the pairing side effects, and route all three
handlers through it. Also prune expired pending codes before /status
iterates them.

Fixes #854

Co-authored-by: Claude <noreply@anthropic.com>
2026-04-23 12:02:34 -07:00
Noah Zweben
ae54b113c4 Add Apache 2.0 LICENSE to math-olympiad plugin (#868)
Co-authored-by: Claude <noreply@anthropic.com>
2026-04-23 12:02:30 -07:00
jschwar2552
2a40fd2e7c skill-creator: sync from anthropics/skills (drop ANTHROPIC_API_KEY requirement) (#1523)
Ports anthropics/skills#547 (b0cbd3d) so this repo matches the upstream
skills repo.

improve_description.py and run_loop.py now shell out to `claude -p` instead
of using the Anthropic SDK directly, so the description optimizer uses the
session's existing Claude Code auth and no longer requires a separate
ANTHROPIC_API_KEY. SKILL.md drops the stale extended-thinking reference and
adds guidance for updating an existing skill.

Several enterprise customers sync exclusively from this repo (not
anthropics/skills, whose README disclaims production use), so they have been
stuck on the old SDK-based path.
2026-04-23 12:02:26 -07:00
Bryan Thompson
95f6172405 Add zscaler plugin (#1552) 2026-04-23 12:01:19 -07:00
Bryan Thompson
7bbdb8434e Add data-agent-kit-starter-pack plugin (#1551) 2026-04-23 12:01:12 -07:00
Bryan Thompson
4bbf944de1 Add atlassian-forge-skills plugin (#1539) 2026-04-23 12:01:06 -07:00
Bryan Thompson
06830b2ccd Add apollo plugin (#1538) 2026-04-23 12:01:00 -07:00
Bryan Thompson
bd6f1d7f48 Add windsor-ai plugin (#1536) 2026-04-23 12:00:53 -07:00
Bryan Thompson
808e70ffb9 Add auth0 plugin (#1535) 2026-04-23 12:00:47 -07:00
Bryan Thompson
187a267738 Add cloud-sql-postgresql plugin (#1533)
* Add cloud-sql-postgresql plugin

* Remove SHA pin from cloud-sql-postgresql entry
2026-04-23 12:00:37 -07:00
Bryan Thompson
42e980340d Add alloydb plugin (#1532)
* Add alloydb plugin

* Remove SHA pin from alloydb entry
2026-04-23 12:00:31 -07:00
Bryan Thompson
c15eada2e9 Add qt-development-skills plugin (#1519) 2026-04-23 12:00:25 -07:00
Bryan Thompson
f9f07aa2d3 Add versori-skills plugin (#1501) 2026-04-23 12:00:18 -07:00
Bryan Thompson
81952cabc5 Merge pull request #1499 from anthropics/add-exa
Add exa plugin
2026-04-23 13:59:15 -05:00
Bryan Thompson
0852f6647a Merge pull request #1437 from anthropics/rename-azure-skills-to-azure
Rename azure-skills to azure
2026-04-23 13:58:58 -05:00
Bryan Thompson
b0724d7a16 Rename azure-skills to azure per developer request 2026-04-22 06:42:35 -05:00
Bryan Thompson
f1938a2dc2 Add exa plugin 2026-04-20 08:03:24 -05:00
11 changed files with 785 additions and 81 deletions

View File

@@ -71,6 +71,19 @@
},
"homepage": "https://github.com/AikidoSec/aikido-claude-plugin"
},
{
"name": "alloydb",
"description": "Create, connect, and interact with an AlloyDB for PostgreSQL database and data.",
"author": {
"name": "Google LLC"
},
"category": "database",
"source": {
"source": "url",
"url": "https://github.com/gemini-cli-extensions/alloydb.git"
},
"homepage": "https://cloud.google.com/alloydb"
},
{
"name": "amazon-location-service",
"description": "Guide developers through adding maps, places search, geocoding, routing, and other geospatial features with Amazon Location Service, including authentication setup, SDK integration, and best practices.",
@@ -95,6 +108,19 @@
"category": "monitoring",
"homepage": "https://github.com/amplitude/mcp-marketplace"
},
{
"name": "apollo",
"description": "Prospect, enrich leads, load outreach sequences, and query sales analytics with Apollo.io — one-click MCP server integration for Claude Code and Cowork.",
"author": {
"name": "Apollo.io"
},
"category": "productivity",
"source": {
"source": "url",
"url": "https://github.com/apolloio/apollo-mcp-plugin.git"
},
"homepage": "https://www.apollo.io/"
},
{
"name": "asana",
"description": "Asana project management integration. Create and manage tasks, search projects, update assignments, track progress, and integrate your development workflow with Asana's work management platform.",
@@ -133,6 +159,19 @@
},
"homepage": "https://github.com/atlassian/atlassian-mcp-server"
},
{
"name": "atlassian-forge-skills",
"description": "Forge-focused skill bundle and MCP tooling for Atlassian Forge: scaffold apps, review before deploy, debug production issues, and stay current on Forge APIs and the Atlassian Design System.",
"author": {
"name": "Atlassian Labs"
},
"category": "development",
"source": {
"source": "url",
"url": "https://github.com/atlassian/forge-skills.git"
},
"homepage": "https://developer.atlassian.com"
},
{
"name": "atomic-agents",
"description": "Comprehensive development workflow for building AI agents with the Atomic Agents framework. Includes specialized agents for schema design, architecture planning, code review, and tool development. Features guided workflows, progressive-disclosure skills, and best practice validation.",
@@ -147,6 +186,21 @@
"community-managed"
]
},
{
"name": "auth0",
"description": "Add authentication to any app with Auth0. This plugin detects your framework, scaffolds the right Auth0 SDK integration, and guides you through login, logout, sessions, and protected routes — using current SDK patterns.",
"author": {
"name": "Auth0"
},
"category": "security",
"source": {
"source": "git-subdir",
"url": "https://github.com/auth0/agent-skills.git",
"path": "plugins/auth0",
"ref": "main"
},
"homepage": "https://auth0.com/docs/quickstart/agent-skills"
},
{
"name": "autofix-bot",
"description": "Code review agent that detects security vulnerabilities, code quality issues, and hardcoded secrets. Combines 5,000+ static analyzers to scan your code and dependencies for CVEs.",
@@ -181,6 +235,16 @@
},
"homepage": "https://github.com/awslabs/agent-plugins"
},
{
"name": "azure",
"description": "Transform Claude into an Azure expert. This plugin integrates the Azure MCP server and specialized Azure skills to move beyond generic advice. It enables Claude to perform real-world tasks: listing resources, validating deployments, diagnosing infrastructure issues, and optimizing costs across 50+ Azure services.",
"category": "deployment",
"source": {
"source": "url",
"url": "https://github.com/microsoft/azure-skills.git"
},
"homepage": "https://github.com/microsoft/azure-skills"
},
{
"name": "azure-cosmos-db-assistant",
"source": {
@@ -192,16 +256,6 @@
"category": "database",
"homepage": "https://github.com/AzureCosmosDB/cosmosdb-claude-code-plugin"
},
{
"name": "azure-skills",
"description": "Microsoft Azure MCP integration for cloud resource management, deployments, and Azure services. Manage your Azure infrastructure, monitor applications, and deploy resources directly from Claude Code.",
"category": "deployment",
"source": {
"source": "url",
"url": "https://github.com/microsoft/azure-skills.git"
},
"homepage": "https://github.com/microsoft/azure-skills"
},
{
"name": "base44",
"description": "Build and deploy Base44 full-stack apps with CLI project management and JavaScript/TypeScript SDK development skills",
@@ -465,6 +519,19 @@
},
"homepage": "https://github.com/astronomer/agents"
},
{
"name": "data-agent-kit-starter-pack",
"description": "Specialized suite of skills for data engineers on Google Cloud — architect data pipelines, transform data with dbt, write Spark and BigQuery SQL notebooks, and orchestrate end-to-end workflows across BigQuery, Spanner, BigLake, and Dataproc.",
"author": {
"name": "Google LLC"
},
"category": "development",
"source": {
"source": "url",
"url": "https://github.com/gemini-cli-extensions/data-agent-kit-starter-pack.git"
},
"homepage": "https://cloud.google.com/bigquery"
},
{
"name": "data-engineering",
"description": "Data engineering plugin - warehouse exploration, pipeline authoring, Airflow integration",
@@ -540,6 +607,20 @@
},
"homepage": "https://elixir-lsp.github.io/elixir-ls/"
},
{
"name": "exa",
"description": "Exa AI web search, deep research, and content extraction. Provides MCP tools and research skills for comprehensive web search, people discovery, company research, academic papers, and more.",
"author": {
"name": "Exa"
},
"category": "productivity",
"source": {
"source": "url",
"url": "https://github.com/exa-labs/exa-mcp-server.git",
"sha": "bd2ccdd52ca7a35fbc2207ad266bb2a961c0e793"
},
"homepage": "https://exa.ai/docs/reference/exa-mcp"
},
{
"name": "explanatory-output-style",
"description": "Adds educational insights about implementation choices and codebase patterns (mimics the deprecated Explanatory output style)",
@@ -905,6 +986,17 @@
}
}
},
{
"name": "managed-agents",
"description": "Development kit for building on Claude Managed Agents",
"author": {
"name": "Anthropic",
"email": "support@anthropic.com"
},
"source": "./plugins/managed-agents",
"category": "development",
"homepage": "https://github.com/anthropics/claude-plugins-public/tree/main/plugins/managed-agents"
},
{
"name": "math-olympiad",
"description": "Solve competition math (IMO, Putnam, USAMO) with adversarial verification that catches what self-verification misses. Fresh-context verifiers attack proofs with specific failure patterns. Calibrated abstention over bluffing.",
@@ -1260,6 +1352,20 @@
},
"homepage": "https://github.com/qodo-ai/qodo-skills.git"
},
{
"name": "qt-development-skills",
"description": "Agentic engineering skills for Qt software development — Qt C++/QML code review, QML coding, and Qt C++/QML code documentation.",
"author": {
"name": "Qt Group"
},
"category": "development",
"source": {
"source": "url",
"url": "https://github.com/TheQtCompanyRnD/agent-skills.git",
"sha": "62a98e2339e6eefcff108cfc3fe9db8a7301856c"
},
"homepage": "https://www.qt.io/"
},
{
"name": "railway",
"description": "Deploy and manage apps, databases, and infrastructure on Railway. Covers project setup, deploys, environment configuration, networking, troubleshooting, and monitoring.",
@@ -1711,6 +1817,20 @@
},
"homepage": "https://github.com/vercel/vercel-plugin"
},
{
"name": "versori-skills",
"description": "Skills for building data integrations using the Versori platform and versori-run SDK. Claude can bootstrap projects, configure systems and connections, generate type-safe TypeScript workflows, run local validation via Deno, and deploy to production — with a research-first approach that grounds code generation in gathered API documentation.",
"author": {
"name": "Versori"
},
"category": "development",
"source": {
"source": "url",
"url": "https://github.com/versori/cli.git",
"sha": "134cf334c3065509eee39a5361fd0bcf969dc867"
},
"homepage": "https://docs.versori.com/latest/ai-tooling/overview"
},
{
"name": "voila-api",
"description": "Definitive guide for the Voila API. Covers shipment creation (Manual/Smart Shipping), real-time tracking, detailed history, manifesting, collections, webhooks, and third-party integrations (Sorted, Peoplevox, Mintsoft, Veeqo, JD).",
@@ -1721,6 +1841,20 @@
},
"homepage": "https://github.com/TSedmanDC/Voila-API-Skill"
},
{
"name": "windsor-ai",
"description": "Connect Claude Code to 325+ business data sources via Windsor.ai. Query marketing, sales, CRM, ecommerce, finance, and analytics data from Google Ads, Meta, HubSpot, Salesforce, Shopify, Stripe, and hundreds more — directly from your terminal.",
"author": {
"name": "Windsor.ai"
},
"category": "productivity",
"source": {
"source": "url",
"url": "https://github.com/windsor-ai/claude-windsor-ai-plugin.git",
"sha": "248a6994b15b410cc025b105bb4ed5558e9b1af9"
},
"homepage": "https://windsor.ai"
},
{
"name": "wix",
"description": "Build, manage, and deploy Wix sites and apps. CLI development skills for dashboard extensions, backend APIs, site widgets, and service plugins with the Wix Design System, plus MCP server for site management.",
@@ -1764,6 +1898,19 @@
"url": "https://github.com/zoom/zoom-plugin.git"
},
"homepage": "https://developers.zoom.us/"
},
{
"name": "zscaler",
"description": "Manage Zscaler cloud security platform including ZPA (private access), ZIA (internet access), ZDX (digital experience), ZCC (client connector), EASM (attack surface), and Z-Insights (analytics). Create and manage policies, troubleshoot connectivity, audit security configurations, and investigate incidents across the full Zscaler ecosystem.",
"author": {
"name": "Zscaler"
},
"category": "security",
"source": {
"source": "url",
"url": "https://github.com/zscaler/zscaler-mcp-server.git"
},
"homepage": "https://github.com/zscaler/zscaler-mcp-server"
}
]
}

View File

@@ -284,6 +284,19 @@ function gate(ctx: Context): GateResult {
return { action: 'drop' }
}
// Like gate() but for bot commands: no pairing side effects, just allow/drop.
function dmCommandGate(ctx: Context): { access: Access; senderId: string } | null {
if (ctx.chat?.type !== 'private') return null
if (!ctx.from) return null
const senderId = String(ctx.from.id)
const access = loadAccess()
const pruned = pruneExpired(access)
if (pruned) saveAccess(access)
if (access.dmPolicy === 'disabled') return null
if (access.dmPolicy === 'allowlist' && !access.allowFrom.includes(senderId)) return null
return { access, senderId }
}
function isMentioned(ctx: Context, extraPatterns?: string[]): boolean {
const entities = ctx.message?.entities ?? ctx.message?.caption_entities ?? []
const text = ctx.message?.text ?? ctx.message?.caption ?? ''
@@ -669,12 +682,7 @@ setInterval(() => {
// the gate's behavior for unrecognized groups.
bot.command('start', async ctx => {
if (ctx.chat?.type !== 'private') return
const access = loadAccess()
if (access.dmPolicy === 'disabled') {
await ctx.reply(`This bot isn't accepting new connections.`)
return
}
if (!dmCommandGate(ctx)) return
await ctx.reply(
`This bot bridges Telegram to a Claude Code session.\n\n` +
`To pair:\n` +
@@ -685,7 +693,7 @@ bot.command('start', async ctx => {
})
bot.command('help', async ctx => {
if (ctx.chat?.type !== 'private') return
if (!dmCommandGate(ctx)) return
await ctx.reply(
`Messages you send here route to a paired Claude Code session. ` +
`Text and photos are forwarded; replies and reactions come back.\n\n` +
@@ -695,14 +703,12 @@ bot.command('help', async ctx => {
})
bot.command('status', async ctx => {
if (ctx.chat?.type !== 'private') return
const from = ctx.from
if (!from) return
const senderId = String(from.id)
const access = loadAccess()
const gated = dmCommandGate(ctx)
if (!gated) return
const { access, senderId } = gated
if (access.allowFrom.includes(senderId)) {
const name = from.username ? `@${from.username}` : senderId
const name = ctx.from!.username ? `@${ctx.from!.username}` : senderId
await ctx.reply(`Paired as ${name}.`)
return
}

View File

@@ -0,0 +1,8 @@
{
"name": "managed-agents",
"description": "Claude Managed Agents Development Plugin",
"author": {
"name": "Anthropic",
"email": "support@anthropic.com"
}
}

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,39 @@
# Claude Managed Agents Development Plugin
A plugin for building applications on [Claude Managed Agents](https://platform.claude.com/docs/en/managed-agents/overview), Anthropic's hosted agent runtime.
## What's included
### `/new-managed-agent` slash command
Scaffolds a new Managed Agents application in Python or TypeScript. Walks through language and tooling choices, fetches the current documentation, and generates a two-file starter:
- a **setup** script that creates the agent and environment once and persists their IDs
- a **run** script that creates a session, sends a user message, and drives the event loop
The command emphasizes the agent/session split (agent is a one-time versioned config; sessions are per-run) and steers toward the SDK's `client.beta.*` resources rather than raw HTTP.
### Verifier subagents
- `managed-agent-verifier-py`
- `managed-agent-verifier-ts`
Invoked after scaffolding (or on an existing project) to check SDK version, agent/session split, event handling, secrets hygiene, and an optional end-to-end run.
## Installation
```
/plugin install managed-agents
```
## Usage
```
/new-managed-agent my-support-bot
```
## Documentation
- [Managed Agents overview](https://platform.claude.com/docs/en/managed-agents/overview)
- [Quickstart](https://platform.claude.com/docs/en/managed-agents/quickstart)
- [Sessions API reference](https://platform.claude.com/docs/en/managed-agents/sessions)

View File

@@ -0,0 +1,66 @@
---
name: managed-agent-verifier-py
description: Use this agent to verify that a Python Managed Agents application is properly configured, follows the agent/session model correctly, and is ready for deployment or testing. Invoke after a Python Managed Agents app has been created or modified.
model: sonnet
---
You are a Python Managed Agents application verifier. Your role is to inspect Python applications built on Claude Managed Agents for correct API usage, adherence to the documented agent/session model, and readiness for deployment.
## Reference Documentation
Before verifying, WebFetch the current documentation so your checks reflect the live API:
- https://platform.claude.com/docs/en/managed-agents/overview
- https://platform.claude.com/docs/en/managed-agents/quickstart
- https://platform.claude.com/docs/en/managed-agents/sessions
## Verification Checklist
### 1. SDK installation and version
- `anthropic` package is installed (check requirements.txt, pyproject.toml, or `pip show anthropic`)
- Version is recent enough to expose `client.beta.agents`, `client.beta.sessions`, and `client.beta.environments`
- Python version meets the SDK's minimum requirement
### 2. Agent/session split
- Agent creation (`client.beta.agents.create`) lives in a setup or one-time script, not in the per-run path
- The `agent_id` (and optionally `version`) is persisted to a file or config, not re-created on every run
- Session creation references the stored agent ID
- `model`, `system`, and `tools` are on the agent body, not the session body
### 3. API usage
- Uses `client.beta.*` SDK resources rather than raw `httpx`/`requests` against `/v1/agents` etc.
- If raw HTTP is used, confirm the beta header matches what the current documentation specifies (do not hardcode a header value here; check the docs)
- Custom tools include `"type": "custom"` in their definition
- Custom tool result events use the field names the current documentation specifies for the tool-use ID
### 4. Session driving
- After sending a user event, the code waits for the session to settle (idle) before reading results, either via SSE stream or a poll loop
- If polling, there is a settle check rather than a single status read (status can flip between running and idle while tool results are being acknowledged)
- If the agent uses custom tools, the run script handles the custom-tool-use event and replies with a corresponding result event
### 5. Environment and secrets
- `ANTHROPIC_API_KEY` is read from environment, not hardcoded
- `.env` is gitignored
- An environment ID is created or referenced for sessions
### 6. Runtime check
- Imports resolve (`python -c "import anthropic; anthropic.Anthropic().beta.agents"`)
- No syntax errors
- If a key is available and the user consents, run setup then run end-to-end and confirm a session reaches idle with at least one agent message event
## Report Format
Produce a short report with:
- **PASS** items (one line each)
- **FAIL** items with the file:line and a one-line fix
- **WARN** items for things that work but diverge from the documented pattern (e.g. agent created per-run, raw HTTP instead of SDK)
- A final **READY / NOT READY** verdict
Keep the report focused on Managed Agents correctness, not general Python style.

View File

@@ -0,0 +1,66 @@
---
name: managed-agent-verifier-ts
description: Use this agent to verify that a TypeScript Managed Agents application is properly configured, follows the agent/session model correctly, and is ready for deployment or testing. Invoke after a TypeScript Managed Agents app has been created or modified.
model: sonnet
---
You are a TypeScript Managed Agents application verifier. Your role is to inspect TypeScript/JavaScript applications built on Claude Managed Agents for correct API usage, adherence to the documented agent/session model, and readiness for deployment.
## Reference Documentation
Before verifying, WebFetch the current documentation so your checks reflect the live API:
- https://platform.claude.com/docs/en/managed-agents/overview
- https://platform.claude.com/docs/en/managed-agents/quickstart
- https://platform.claude.com/docs/en/managed-agents/sessions
## Verification Checklist
### 1. SDK installation and version
- `@anthropic-ai/sdk` is in package.json dependencies
- Installed version is recent enough to expose `client.beta.agents`, `client.beta.sessions`, and `client.beta.environments`
- Node.js version meets the SDK's minimum requirement
- `tsconfig.json` is configured for the SDK (module resolution, target)
### 2. Agent/session split
- Agent creation (`client.beta.agents.create`) lives in a setup or one-time script, not in the per-run path
- The `agent_id` (and optionally `version`) is persisted to a file or config, not re-created on every run
- Session creation references the stored agent ID
- `model`, `system`, and `tools` are on the agent body, not the session body
### 3. API usage
- Uses `client.beta.*` SDK resources rather than raw `fetch` against `/v1/agents` etc.
- If raw HTTP is used, confirm the beta header matches what the current documentation specifies (do not hardcode a header value here; check the docs)
- Custom tools include `type: "custom"` in their definition
- Custom tool result events use the field names the current documentation specifies for the tool-use ID
### 4. Session driving
- After sending a user event, the code waits for the session to settle (idle) before reading results, either via SSE stream or a poll loop
- If polling, there is a settle check rather than a single status read (status can flip between running and idle while tool results are being acknowledged)
- If the agent uses custom tools, the run script handles the custom-tool-use event and replies with a corresponding result event
### 5. Environment and secrets
- `ANTHROPIC_API_KEY` is read from environment, not hardcoded
- `.env` is gitignored
- An environment ID is created or referenced for sessions
### 6. Runtime check
- `npx tsc --noEmit` passes with no errors
- If a key is available and the user consents, run setup then run end-to-end and confirm a session reaches idle with at least one agent message event
## Report Format
Produce a short report with:
- **PASS** items (one line each)
- **FAIL** items with the file:line and a one-line fix
- **WARN** items for things that work but diverge from the documented pattern (e.g. agent created per-run, raw `fetch` instead of SDK)
- A final **READY / NOT READY** verdict
Keep the report focused on Managed Agents correctness, not general TypeScript style.

View File

@@ -0,0 +1,169 @@
---
description: Create and set up a new Claude Managed Agents application
argument-hint: [project-name]
---
You are tasked with helping the user create a new Claude Managed Agents application. Follow these steps carefully.
## Reference Documentation
Before starting, review the official documentation to ensure you provide accurate, up-to-date guidance. Use WebFetch to read these pages:
1. **Start with the overview**: https://platform.claude.com/docs/en/managed-agents/overview
2. **Then the quickstart**: https://platform.claude.com/docs/en/managed-agents/quickstart
3. **Based on the user's language choice, read the appropriate SDK reference**:
- Python: https://platform.claude.com/docs/en/managed-agents/python
- TypeScript: https://platform.claude.com/docs/en/managed-agents/typescript
4. **Read the relevant guides** based on the user's needs:
- Sessions API reference: https://platform.claude.com/docs/en/managed-agents/sessions
- Tools: https://platform.claude.com/docs/en/managed-agents/tools
- Environments: https://platform.claude.com/docs/en/managed-agents/environments
- Any other guides linked from the overview
**IMPORTANT**: Always check for and use the latest versions of packages. Use WebSearch or WebFetch to verify current versions before installation. The Managed Agents API is in beta and shapes may change between releases; the docs are authoritative.
## The Core Model (read this before scaffolding)
Managed Agents has a two-object model that is different from the Messages API:
| Object | What it holds | How often you create it |
|---|---|---|
| **Agent** | model, system prompt, tools, MCP servers, skills | **Once.** Persisted and versioned. Store the `agent_id`. |
| **Session** | a running instance of an agent in an environment | **Every run.** References the agent by ID. |
Do not call `agents.create()` on every run. The agent is a setup artifact; the session is the runtime. If you find yourself putting `model`, `system`, or `tools` on a session body, stop: those belong on the agent.
## Gather Requirements
IMPORTANT: Ask these questions one at a time. Wait for the user's response before asking the next question.
1. **Language** (ask first): "Would you like to use Python or TypeScript?"
- Wait for response before continuing
2. **Project name** (ask second): "What would you like to name your project?"
- If $ARGUMENTS is provided, use that as the project name and skip this question
- Wait for response before continuing
3. **Agent purpose** (ask third): "What will this agent do? Some examples:
- Customer support agent (answers questions, files tickets)
- Coding agent (reads/edits files, runs commands in a sandbox)
- Research agent (web search, document analysis)
- Custom (describe your use case)"
- Wait for response before continuing
4. **Tools** (ask fourth): "Which tools does the agent need?
- Built-in tools only (Bash, file operations, web search; runs entirely server-side)
- Custom tools (your application executes them and sends results back)
- MCP servers (connect to external tool providers)
- None (conversation only)"
- Wait for response before continuing
5. **Tooling choice** (ask fifth): Confirm package manager and runtime preferences (npm/pnpm/bun for TypeScript; pip/poetry/uv for Python).
After all questions are answered, proceed to create the setup plan.
## Setup Plan
Based on the user's answers, create a plan that includes:
1. **Project initialization**:
- Create project directory (if it doesn't exist)
- Initialize package manager:
- TypeScript: `npm init -y`, set `"type": "module"` in package.json, add a "typecheck" script
- Python: create `requirements.txt` or `pyproject.toml`
- Add config files:
- TypeScript: `tsconfig.json` configured for the SDK
- Python: optionally a `pyproject.toml`
2. **Check for latest SDK versions**:
- TypeScript: https://www.npmjs.com/package/@anthropic-ai/sdk
- Python: https://pypi.org/project/anthropic/
- Inform the user which version you're installing
3. **SDK installation**:
- TypeScript: `npm install @anthropic-ai/sdk@latest`
- Python: `pip install anthropic`
- After installation, verify the installed version
4. **Create starter files**:
The starter should have **two separate scripts** reflecting the agent/session split:
- `setup` (or `setup.ts` / `setup.py`): creates the agent once via `client.beta.agents.create(...)`, creates or reuses an environment via `client.beta.environments`, and writes both IDs to a local file (e.g. `.agent.json`). Re-running it should update the existing agent in place rather than creating a duplicate.
- `run` (or `run.ts` / `run.py`): reads the IDs file, creates a session via `client.beta.sessions.create(...)`, sends a user message event, and either streams or polls events until the session is idle. If the agent uses custom tools, this script handles `agent.custom_tool_use` events and replies with `user.custom_tool_result`.
Use the SDK's `client.beta.*` resources rather than raw HTTP. The SDK sets the required beta header and handles request encoding; raw HTTP requires you to track field names and headers manually and is a common source of 400 errors.
5. **Environment setup**:
- Create `.env.example` with `ANTHROPIC_API_KEY=your_api_key_here`
- Add `.env` to `.gitignore`
- Explain how to get an API key from https://console.anthropic.com/
6. **Optional**: offer to add a README explaining the agent/session split and how to extend the agent's tools.
## Implementation
After getting user confirmation on the plan:
1. Check for latest package versions
2. Execute the setup steps
3. Create all files
4. Install dependencies
5. Verify installed versions and inform the user
6. Create a working example based on their agent purpose and tool choice
7. Add brief comments explaining the agent/session split where it matters
8. **VERIFY THE CODE WORKS BEFORE FINISHING**:
- TypeScript: run `npx tsc --noEmit` and fix all type errors
- Python: verify imports resolve and there are no syntax errors
- If the user has `ANTHROPIC_API_KEY` set, offer to run `setup` and then `run` end-to-end so they see a real session execute
- Do NOT consider setup complete until verification passes
## Verification
After all files are created and dependencies installed, use the appropriate verifier agent to validate the application:
1. **For TypeScript projects**: launch the **managed-agent-verifier-ts** agent
2. **For Python projects**: launch the **managed-agent-verifier-py** agent
3. Review the verification report and address any issues
## Getting Started Guide
Once setup is complete and verified, give the user:
1. **Next steps**:
- How to set their API key
- How to run setup once: `python setup.py` / `npm run setup`
- How to run the agent: `python run.py` / `npm run start`
2. **Useful resources**:
- Overview: https://platform.claude.com/docs/en/managed-agents/overview
- Sessions API reference: https://platform.claude.com/docs/en/managed-agents/sessions
- Tools: https://platform.claude.com/docs/en/managed-agents/tools
3. **Common next steps**:
- How to add or change tools on the agent (update + re-run setup)
- How to attach MCP servers
- How to switch from polling to SSE streaming
- How to run fully server-side (built-in tools only, no local loop)
## Important Notes
- **ALWAYS USE LATEST VERSIONS** of the SDK; verify after install
- **USE THE SDK, NOT RAW HTTP**: `client.beta.agents` / `client.beta.sessions` / `client.beta.environments` handle the beta header and request encoding for you
- **AGENT ONCE, SESSION PER RUN**: keep agent creation in a separate setup script and persist the ID
- **VERIFY BEFORE FINISHING**: typecheck (TS) or import-check (Python), and offer an end-to-end run if a key is available
- Ask questions one at a time
- Check the docs for any version-specific requirements
Begin by asking the FIRST requirement question only. Wait for the user's answer before proceeding to the next question.

View File

@@ -1,6 +1,6 @@
---
name: skill-creator
description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, update or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy.
description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, edit, or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy.
---
# Skill Creator
@@ -391,7 +391,7 @@ Use the model ID from your system prompt (the one powering the current session)
While it runs, periodically tail the output to give the user updates on which iteration it's on and what the scores look like.
This handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude with extended thinking to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting.
This handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting.
### How skill triggering works
@@ -435,6 +435,11 @@ In Claude.ai, the core workflow is the same (draft → test → review → impro
**Packaging**: The `package_skill.py` script works anywhere with Python and a filesystem. On Claude.ai, you can run it and the user can download the resulting `.skill` file.
**Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. In this case:
- **Preserve the original name.** Note the skill's directory name and `name` frontmatter field -- use them unchanged. E.g., if the installed skill is `research-helper`, output `research-helper.skill` (not `research-helper-v2`).
- **Copy to a writeable location before editing.** The installed skill path may be read-only. Copy to `/tmp/skill-name/`, edit there, and package from the copy.
- **If packaging manually, stage in `/tmp/` first**, then copy to the output directory -- direct writes may fail due to permissions.
---
## Cowork-Specific Instructions
@@ -447,6 +452,7 @@ If you're in Cowork, the main things to know are:
- Feedback works differently: since there's no running server, the viewer's "Submit All Reviews" button will download `feedback.json` as a file. You can then read it from there (you may have to request access first).
- Packaging works — `package_skill.py` just needs Python and a filesystem.
- Description optimization (`run_loop.py` / `run_eval.py`) should work in Cowork just fine since it uses `claude -p` via subprocess, not a browser, but please save it until you've fully finished making the skill and the user agrees it's in good shape.
- **Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. Follow the update guidance in the claude.ai section above.
---

View File

@@ -2,22 +2,52 @@
"""Improve a skill description based on eval results.
Takes eval results (from run_eval.py) and generates an improved description
using Claude with extended thinking.
by calling `claude -p` as a subprocess (same auth pattern as run_eval.py —
uses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed).
"""
import argparse
import json
import os
import re
import subprocess
import sys
from pathlib import Path
import anthropic
from scripts.utils import parse_skill_md
def _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str:
"""Run `claude -p` with the prompt on stdin and return the text response.
Prompt goes over stdin (not argv) because it embeds the full SKILL.md
body and can easily exceed comfortable argv length.
"""
cmd = ["claude", "-p", "--output-format", "text"]
if model:
cmd.extend(["--model", model])
# Remove CLAUDECODE env var to allow nesting claude -p inside a
# Claude Code session. The guard is for interactive terminal conflicts;
# programmatic subprocess usage is safe. Same pattern as run_eval.py.
env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"}
result = subprocess.run(
cmd,
input=prompt,
capture_output=True,
text=True,
env=env,
timeout=timeout,
)
if result.returncode != 0:
raise RuntimeError(
f"claude -p exited {result.returncode}\nstderr: {result.stderr}"
)
return result.stdout
def improve_description(
client: anthropic.Anthropic,
skill_name: str,
skill_content: str,
current_description: str,
@@ -99,7 +129,7 @@ Based on the failures, write a new and improved description that is more likely
1. Avoid overfitting
2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description.
Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy.
Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. There is a hard limit of 1024 characters — descriptions over that will be truncated, so stay comfortably under it.
Here are some tips that we've found to work well in writing these descriptions:
- The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does"
@@ -111,70 +141,41 @@ I'd encourage you to be creative and mix up the style in different iterations si
Please respond with only the new description text in <new_description> tags, nothing else."""
response = client.messages.create(
model=model,
max_tokens=16000,
thinking={
"type": "enabled",
"budget_tokens": 10000,
},
messages=[{"role": "user", "content": prompt}],
)
text = _call_claude(prompt, model)
# Extract thinking and text from response
thinking_text = ""
text = ""
for block in response.content:
if block.type == "thinking":
thinking_text = block.thinking
elif block.type == "text":
text = block.text
# Parse out the <new_description> tags
match = re.search(r"<new_description>(.*?)</new_description>", text, re.DOTALL)
description = match.group(1).strip().strip('"') if match else text.strip().strip('"')
# Log the transcript
transcript: dict = {
"iteration": iteration,
"prompt": prompt,
"thinking": thinking_text,
"response": text,
"parsed_description": description,
"char_count": len(description),
"over_limit": len(description) > 1024,
}
# If over 1024 chars, ask the model to shorten it
# Safety net: the prompt already states the 1024-char hard limit, but if
# the model blew past it anyway, make one fresh single-turn call that
# quotes the too-long version and asks for a shorter rewrite. (The old
# SDK path did this as a true multi-turn; `claude -p` is one-shot, so we
# inline the prior output into the new prompt instead.)
if len(description) > 1024:
shorten_prompt = f"Your description is {len(description)} characters, which exceeds the hard 1024 character limit. Please rewrite it to be under 1024 characters while preserving the most important trigger words and intent coverage. Respond with only the new description in <new_description> tags."
shorten_response = client.messages.create(
model=model,
max_tokens=16000,
thinking={
"type": "enabled",
"budget_tokens": 10000,
},
messages=[
{"role": "user", "content": prompt},
{"role": "assistant", "content": text},
{"role": "user", "content": shorten_prompt},
],
shorten_prompt = (
f"{prompt}\n\n"
f"---\n\n"
f"A previous attempt produced this description, which at "
f"{len(description)} characters is over the 1024-character hard limit:\n\n"
f'"{description}"\n\n'
f"Rewrite it to be under 1024 characters while keeping the most "
f"important trigger words and intent coverage. Respond with only "
f"the new description in <new_description> tags."
)
shorten_thinking = ""
shorten_text = ""
for block in shorten_response.content:
if block.type == "thinking":
shorten_thinking = block.thinking
elif block.type == "text":
shorten_text = block.text
shorten_text = _call_claude(shorten_prompt, model)
match = re.search(r"<new_description>(.*?)</new_description>", shorten_text, re.DOTALL)
shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"')
transcript["rewrite_prompt"] = shorten_prompt
transcript["rewrite_thinking"] = shorten_thinking
transcript["rewrite_response"] = shorten_text
transcript["rewrite_description"] = shortened
transcript["rewrite_char_count"] = len(shortened)
@@ -216,9 +217,7 @@ def main():
print(f"Current: {current_description}", file=sys.stderr)
print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr)
client = anthropic.Anthropic()
new_description = improve_description(
client=client,
skill_name=name,
skill_content=content,
current_description=current_description,

View File

@@ -15,8 +15,6 @@ import time
import webbrowser
from pathlib import Path
import anthropic
from scripts.generate_report import generate_html
from scripts.improve_description import improve_description
from scripts.run_eval import find_project_root, run_eval
@@ -75,7 +73,6 @@ def run_loop(
train_set = eval_set
test_set = []
client = anthropic.Anthropic()
history = []
exit_reason = "unknown"
@@ -200,7 +197,6 @@ def run_loop(
for h in history
]
new_description = improve_description(
client=client,
skill_name=name,
skill_content=content,
current_description=current_description,