From ac62a974e341058b9d499afa444a560f22a24384 Mon Sep 17 00:00:00 2001 From: Sterling Chin Date: Mon, 2 Feb 2026 11:05:54 -0800 Subject: [PATCH] feat: Initial Postman plugin for Claude Code - /postman slash command for API lifecycle management - Collections, environments, monitors, mocks CRUD - Clara integration for agent-readiness analysis - Security auditing and code generation - Knowledge base for agent-ready API patterns Install with: /plugin install postman-skill Co-Authored-By: Claude Opus 4.5 --- .claude-plugin/plugin.json | 24 + .env.example | 25 + .gitignore | 25 + README.md | 99 ++ commands/postman.md | 281 +++++ requirements.txt | 8 + scripts/analyze_agent_readiness.py | 316 +++++ scripts/audit_security.py | 482 ++++++++ scripts/config.py | 166 +++ scripts/detect_breaking_changes.py | 481 ++++++++ scripts/generate_code.py | 421 +++++++ scripts/list_collections.py | 132 +++ scripts/list_workspaces.py | 128 ++ scripts/manage_api.py | 234 ++++ scripts/manage_collection_workflow.py | 348 ++++++ scripts/manage_collections.py | 287 +++++ scripts/manage_environments.py | 289 +++++ scripts/manage_mocks.py | 280 +++++ scripts/manage_monitors.py | 336 ++++++ scripts/manage_spec.py | 265 +++++ scripts/postman_client.py | 1542 +++++++++++++++++++++++++ scripts/publish_docs.py | 423 +++++++ scripts/run_collection.py | 306 +++++ scripts/validate_schema.py | 428 +++++++ scripts/validate_setup.py | 133 +++ skills/knowledge/agent-ready-apis.md | 190 +++ utils/exceptions.py | 432 +++++++ utils/formatters.py | 310 +++++ utils/retry_handler.py | 79 ++ 29 files changed, 8470 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 README.md create mode 100644 commands/postman.md create mode 100644 requirements.txt create mode 100644 scripts/analyze_agent_readiness.py create mode 100755 scripts/audit_security.py create mode 100644 scripts/config.py create mode 100755 scripts/detect_breaking_changes.py create mode 100755 scripts/generate_code.py create mode 100755 scripts/list_collections.py create mode 100644 scripts/list_workspaces.py create mode 100755 scripts/manage_api.py create mode 100755 scripts/manage_collection_workflow.py create mode 100644 scripts/manage_collections.py create mode 100644 scripts/manage_environments.py create mode 100755 scripts/manage_mocks.py create mode 100755 scripts/manage_monitors.py create mode 100755 scripts/manage_spec.py create mode 100644 scripts/postman_client.py create mode 100755 scripts/publish_docs.py create mode 100755 scripts/run_collection.py create mode 100755 scripts/validate_schema.py create mode 100644 scripts/validate_setup.py create mode 100644 skills/knowledge/agent-ready-apis.md create mode 100644 utils/exceptions.py create mode 100644 utils/formatters.py create mode 100644 utils/retry_handler.py diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..2222d78 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,24 @@ +{ + "name": "postman-skill", + "version": "1.0.0", + "description": "Postman API lifecycle management for Claude Code. Manage collections, environments, monitors, mocks, and analyze APIs for agent-readiness.", + "author": "Sterling Chin", + "repository": "https://github.com/sterlingchin/postman-slash-command", + "license": "MIT", + "keywords": [ + "postman", + "api", + "collections", + "testing", + "mocks", + "monitors", + "agent-ready", + "clara" + ], + "components": { + "commands": ["postman"] + }, + "dependencies": { + "python": ">=3.7" + } +} diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..ddf41a6 --- /dev/null +++ b/.env.example @@ -0,0 +1,25 @@ +# Postman Agent Skill - Environment Variables + +# Required: Your Postman API key +# Get yours from: https://web.postman.co/settings/me/api-keys +POSTMAN_API_KEY=PMAK-your-key-here + +# Recommended: Workspace ID to scope operations +# Find this in your workspace settings or URL +POSTMAN_WORKSPACE_ID=your-workspace-id-here + +# Optional: Rate limit handling (seconds to wait) +POSTMAN_RATE_LIMIT_DELAY=60 + +# Optional: Maximum retry attempts +POSTMAN_MAX_RETRIES=3 + +# Optional: Request timeout (seconds) +POSTMAN_TIMEOUT=30 + +# Optional: Logging level (DEBUG, INFO, WARNING, ERROR) +LOG_LEVEL=INFO + +# Optional: Enable proxy (default is false to avoid "403 Forbidden" proxy errors) +# Only set to true if you specifically need to use a proxy +# POSTMAN_USE_PROXY=false diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2a891c6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +# Environment +.env +.env.local + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +venv/ +ENV/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Local settings +.claude/settings.local.json diff --git a/README.md b/README.md new file mode 100644 index 0000000..40e505e --- /dev/null +++ b/README.md @@ -0,0 +1,99 @@ +# Postman Plugin for Claude Code + +Official Postman plugin for Claude Code. Manage your entire API lifecycle without leaving your editor. + +## Installation + +``` +/plugin install postman-skill +``` + +That's it. Type `/postman` and start managing your APIs. + +## Features + +- **Collection Management** - List, create, update, delete, and duplicate collections +- **Environment Management** - Manage variables across workspaces +- **Monitor Analysis** - Check API health and debug failures +- **Mock Servers** - Create and manage mock servers +- **Test Execution** - Run collection tests with environments +- **Agent-Readiness Analysis** - Grade your APIs for AI agent compatibility using Clara +- **Security Auditing** - Check APIs for security issues +- **Code Generation** - Generate code snippets in Python, JavaScript, Go, and more + +## Usage + +### Natural Language + +``` +/postman +Show me all my collections +``` + +``` +/postman +Create a mock server for my Payment API +``` + +``` +/postman +Is my API ready for AI agents? +``` + +### Common Operations + +| Task | Example | +|------|---------| +| List collections | "Show my collections" | +| Create collection | "Create a collection called User API" | +| Run tests | "Run tests for Payment API with staging environment" | +| Check monitors | "How are my monitors doing?" | +| Create mock | "Create a mock server for Order API" | +| Analyze agent-readiness | "Is my API ready for AI agents?" | +| Security audit | "Check my API for security issues" | + +## Setup + +### Prerequisites + +- Python 3.7+ +- Postman API Key + +### Configuration + +1. Get your API key from [Postman Account Settings](https://go.postman.co/settings/me/api-keys) + +2. Create a `.env` file: +```bash +cp .env.example .env +# Edit .env and add: POSTMAN_API_KEY=PMAK-your-key-here +``` + +## Agent-Readiness Analysis + +This plugin includes Clara integration for grading APIs on AI agent compatibility. + +Clara checks 8 pillars: +1. **Metadata** - operationIds, summaries, descriptions +2. **Errors** - Structured error responses +3. **Introspection** - Parameter types, examples +4. **Naming** - Consistent casing, RESTful paths +5. **Predictability** - Response schemas +6. **Documentation** - Auth docs, rate limits +7. **Performance** - Response times +8. **Discoverability** - Server URLs, spec accessibility + +**Score 70%+ with no critical failures = Agent Ready** + +## Works With + +This plugin complements [Postman's MCP Server](https://github.com/postmanlabs/postman-mcp-server) for live API operations. + +## License + +MIT + +## Author + +Sterling Chin ([@sterlingchin](https://twitter.com/sterlingchin)) +Developer Relations, Postman diff --git a/commands/postman.md b/commands/postman.md new file mode 100644 index 0000000..1e5d6b6 --- /dev/null +++ b/commands/postman.md @@ -0,0 +1,281 @@ +--- +description: Manage Postman collections, environments, monitors, and APIs +--- + +# Postman Command + +Execute Postman API operations to manage your API lifecycle. This command provides access to Postman collections, environments, monitors, and more through Python scripts. + +## Prerequisites + +Before using this command, ensure: +1. A `.env` file exists in the `postman-slash-command/` directory with `POSTMAN_API_KEY` set +2. Python 3 is available +3. `curl` is installed (usually pre-installed on macOS and Linux) +4. Optional: `python-dotenv` for easier .env loading (has fallback if not available) + +If the user hasn't set up their environment yet, guide them through: +```bash +cd postman-slash-command +cp .env.example .env +# Edit .env and add POSTMAN_API_KEY=PMAK-your-key-here +``` + +## Available Operations + +### Discovery & Listing + +**List all resources:** +```bash +python postman-slash-command/scripts/list_collections.py --all +``` + +**List collections only:** +```bash +python postman-slash-command/scripts/list_collections.py +``` + +**List environments:** +```bash +python postman-slash-command/scripts/list_collections.py --environments +``` + +**List monitors:** +```bash +python postman-slash-command/scripts/list_collections.py --monitors +``` + +**List APIs:** +```bash +python postman-slash-command/scripts/list_collections.py --apis +``` + +### Collection Management + +**List collections:** +```bash +python postman-slash-command/scripts/manage_collections.py --list +``` + +**Get collection details:** +```bash +python postman-slash-command/scripts/manage_collections.py --get +``` + +**Create new collection:** +```bash +python postman-slash-command/scripts/manage_collections.py --create --name="My Collection" +``` + +**Update collection:** +```bash +python postman-slash-command/scripts/manage_collections.py --update --name="New Name" +``` + +**Delete collection:** +```bash +python postman-slash-command/scripts/manage_collections.py --delete +``` + +**Duplicate collection:** +```bash +python postman-slash-command/scripts/manage_collections.py --duplicate --name="Copy" +``` + +### Environment Management + +**List environments:** +```bash +python postman-slash-command/scripts/manage_environments.py --list +``` + +**Create environment:** +```bash +python postman-slash-command/scripts/manage_environments.py --create --name="Development" +``` + +**Get environment details:** +```bash +python postman-slash-command/scripts/manage_environments.py --get +``` + +**Update environment:** +```bash +python postman-slash-command/scripts/manage_environments.py --update --name="New Name" +``` + +**Delete environment:** +```bash +python postman-slash-command/scripts/manage_environments.py --delete +``` + +**Duplicate environment:** +```bash +python postman-slash-command/scripts/manage_environments.py --duplicate --name="Copy" +``` + +### Monitor Management + +**List monitors:** +```bash +python postman-slash-command/scripts/manage_monitors.py --list +``` + +**Get monitor details:** +```bash +python postman-slash-command/scripts/manage_monitors.py --get +``` + +**Analyze monitor runs:** +```bash +python postman-slash-command/scripts/manage_monitors.py --analyze --limit 10 +``` + +### Test Execution + +**Run collection tests:** +```bash +python postman-slash-command/scripts/run_collection.py --collection="Collection Name" +``` + +**Run with environment:** +```bash +python postman-slash-command/scripts/run_collection.py --collection="Collection Name" --environment="Environment Name" +``` + +### Agent-Readiness Analysis + +Analyze APIs to determine if they're ready for AI agents to discover, understand, and self-heal from errors. Uses Clara, the AI Agent API Readiness Analyzer. + +**Analyze a local OpenAPI spec:** +```bash +python postman-slash-command/scripts/analyze_agent_readiness.py ./openapi.yaml +``` + +**Analyze a spec from URL:** +```bash +python postman-slash-command/scripts/analyze_agent_readiness.py --url https://petstore3.swagger.io/api/v3/openapi.json +``` + +**Analyze a Postman collection:** +```bash +python postman-slash-command/scripts/analyze_agent_readiness.py --collection +``` + +**Get detailed JSON output:** +```bash +python postman-slash-command/scripts/analyze_agent_readiness.py ./openapi.yaml --json +``` + +**Save reports to files:** +```bash +python postman-slash-command/scripts/analyze_agent_readiness.py ./openapi.yaml -o report.json -o report.md +``` + +#### What Clara Checks + +Clara analyzes APIs across 8 pillars: +1. **Metadata** - operationIds, summaries, descriptions +2. **Errors** - Structured error responses with codes and details +3. **Introspection** - Parameter types, required fields, examples +4. **Naming** - Consistent casing, RESTful paths +5. **Predictability** - Response schemas, pagination patterns +6. **Documentation** - Auth docs, rate limits +7. **Performance** - Response times, rate limit headers +8. **Discoverability** - Server URLs, spec accessibility + +#### Understanding Results + +- **Score 70%+ with no critical failures** = Agent Ready +- **Below 70% or critical failures** = Not Agent Ready +- **Priority Fixes** = Most impactful improvements, ranked by severity x endpoints affected + +For conceptual questions about agent-ready APIs, refer to `skills/knowledge/agent-ready-apis.md`. + +## How to Respond + +When the user asks about Postman operations: + +1. **Understand the intent** - What do they want to do? + - List resources? → Use list_collections.py + - Manage collections? → Use manage_collections.py + - Manage environments? → Use manage_environments.py + - Check monitors? → Use manage_monitors.py + - Run tests? → Use run_collection.py + - Check agent-readiness? → Use analyze_agent_readiness.py + - Ask about API design for AI? → Reference skills/knowledge/agent-ready-apis.md + +2. **Execute the appropriate script** - Run the Python script with correct arguments + +3. **Parse and present results** - Format the output in a user-friendly way + +4. **Suggest next steps** - Based on what they accomplished, suggest related actions + +## Examples + +### Example 1: User wants to see their collections + +**User:** "Show me my Postman collections" + +**You should:** +1. Run: `python postman-slash-command/scripts/list_collections.py` +2. Parse the output +3. Present in a clear format +4. Ask if they want to do anything with the collections + +### Example 2: User wants to create a new collection + +**User:** "Create a new collection called 'Payment API Tests'" + +**You should:** +1. Run: `python postman-slash-command/scripts/manage_collections.py --create --name="Payment API Tests"` +2. Confirm success +3. Show the collection ID +4. Ask if they want to add requests to it + +### Example 3: User wants to check monitor status + +**User:** "How is my API monitoring doing?" + +**You should:** +1. First list monitors: `python postman-slash-command/scripts/manage_monitors.py --list` +2. For each monitor, analyze runs: `python postman-slash-command/scripts/manage_monitors.py --analyze --limit 5` +3. Summarize the health (success rates, response times, recent failures) +4. Alert if any monitors are failing + +### Example 4: User wants to check if their API is agent-ready + +**User:** "Is my Payment API ready for AI agents?" + +**You should:** +1. Find or ask for the OpenAPI spec location +2. Run: `python postman-slash-command/scripts/analyze_agent_readiness.py ./payment-api.yaml` +3. Present the overall score and agent-ready status +4. Highlight critical issues that block agent usage +5. Show top priority fixes with specific recommendations +6. Offer to explain any pillar in more detail + +**User:** "Why can't AI agents use my API well?" + +**You should:** +1. Run agent-readiness analysis +2. Focus on failed checks, especially critical ones +3. Explain the impact: "Missing operationIds means agents can't reference your endpoints uniquely" +4. Reference `skills/knowledge/agent-ready-apis.md` for detailed explanations +5. Suggest specific fixes with examples + +## Error Handling + +If a script fails: +1. Check if `.env` file exists and has `POSTMAN_API_KEY` +2. Verify the API key is valid (starts with `PMAK-`) +3. Check if the resource ID/name is correct +4. Provide helpful error messages to the user + +## Notes + +- All scripts automatically load configuration from `.env` file +- Scripts use retry logic for rate limits +- Collection/environment IDs are UUIDs (e.g., `12345678-1234-1234-1234-123456789012`) +- Monitor IDs are shorter alphanumeric strings +- Use `--help` flag on any script to see all available options diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b7c45df --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +# Postman Slash Command Dependencies + +# Optional: For easier .env file loading (has fallback if not available) +python-dotenv>=0.19.0 + +# Note: This project uses curl via subprocess for HTTP requests +# to avoid external Python dependencies. Make sure curl is installed +# on your system (usually pre-installed on macOS and Linux). diff --git a/scripts/analyze_agent_readiness.py b/scripts/analyze_agent_readiness.py new file mode 100644 index 0000000..cec520f --- /dev/null +++ b/scripts/analyze_agent_readiness.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +""" +Agent-readiness analyzer for APIs using Clara. + +Analyzes OpenAPI specs to determine if APIs are ready for AI agents to: +1. Discover what the API does +2. Understand how to use it correctly +3. Self-heal from errors without human intervention + +Uses Clara (AI Agent API Readiness Analyzer) under the hood. + +Usage: + python analyze_agent_readiness.py + python analyze_agent_readiness.py --url https://api.example.com/openapi.json + python analyze_agent_readiness.py --collection + python analyze_agent_readiness.py --help +""" + +import sys +import os +import argparse +import json +import subprocess +import tempfile +from pathlib import Path + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +CLARA_PATH = os.path.expanduser("~/work/clara/dist/cli/index.js") +MIN_NODE_VERSION = "20.0.0" + + +def check_node_version(): + """Check if Node.js is installed and meets version requirements.""" + try: + result = subprocess.run( + ["node", "--version"], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode != 0: + return None, "Node.js not found" + + version = result.stdout.strip().lstrip('v') + major = int(version.split('.')[0]) + + if major < 20: + return None, f"Node.js {version} found, but Clara requires >= {MIN_NODE_VERSION}" + + return version, None + except FileNotFoundError: + return None, "Node.js not installed" + except Exception as e: + return None, f"Error checking Node.js: {str(e)}" + + +def find_clara(): + """Find Clara CLI installation.""" + if os.path.exists(CLARA_PATH): + return CLARA_PATH, "direct" + + # Try npx fallback + try: + result = subprocess.run( + ["npx", "@postman/clara", "--version"], + capture_output=True, + text=True, + timeout=10 + ) + if result.returncode == 0: + return "@postman/clara", "npx" + except: + pass + + return None, None + + +def run_clara(args, clara_path, clara_type): + """Run Clara CLI with given arguments.""" + if clara_type == "direct": + cmd = ["node", clara_path] + args + else: + cmd = ["npx", clara_path] + args + + return subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=120 + ) + + +def analyze_spec(spec_path, options, clara_path, clara_type): + """Analyze an OpenAPI spec for agent-readiness.""" + args = ["analyze", spec_path] + + if options.get('verbose'): + args.append("--verbose") + if options.get('json'): + args.append("--json") + if options.get('quiet'): + args.append("--quiet") + + for output_path in options.get('output', []): + args.extend(["-o", output_path]) + + if options.get('probe'): + args.append("--probe") + if options.get('base_url'): + args.extend(["--base-url", options['base_url']]) + if options.get('sandbox'): + args.append("--sandbox") + if options.get('auth'): + args.extend(["--auth", options['auth']]) + + result = run_clara(args, clara_path, clara_type) + + if result.returncode == 2: + raise RuntimeError(f"Clara analysis failed: {result.stderr}") + + if options.get('json'): + try: + return json.loads(result.stdout) + except json.JSONDecodeError: + return {"error": "Failed to parse Clara output", "raw": result.stdout} + + return { + "stdout": result.stdout, + "stderr": result.stderr, + "returncode": result.returncode, + "agent_ready": result.returncode == 0 + } + + +def export_collection_as_openapi(collection_id, client): + """Export a Postman collection for analysis.""" + collection = client.get_collection(collection_id) + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + json.dump({"collection": collection}, f) + return f.name + + +def format_summary(report): + """Format a human-readable summary from the Clara report.""" + if "error" in report: + return f"Error: {report.get('error')}" + + if "stdout" in report: + return report["stdout"] + + summary = report.get("summary", {}) + pillars = report.get("pillars", []) + priority_fixes = report.get("priorityFixes", []) + + lines = [] + lines.append("=" * 60) + lines.append("AI AGENT READINESS ANALYSIS") + lines.append("=" * 60) + lines.append("") + + score = summary.get("overallScore", 0) + ready = summary.get("agentReady", False) + status = "READY" if ready else "NOT READY" + status_emoji = "✓" if ready else "✗" + + lines.append(f"Overall Score: {score}% ({status_emoji} {status})") + lines.append(f"Total Endpoints: {summary.get('totalEndpoints', 0)}") + lines.append(f" Passed: {summary.get('passed', 0)}") + lines.append(f" Warnings: {summary.get('warnings', 0)}") + lines.append(f" Failed: {summary.get('failed', 0)}") + lines.append(f" Critical: {summary.get('criticalFailures', 0)}") + lines.append("") + + lines.append("Pillar Scores:") + lines.append("-" * 40) + for pillar in pillars: + name = pillar.get("name", "Unknown") + pillar_score = pillar.get("score", 0) + passed = pillar.get("checksPassed", 0) + failed = pillar.get("checksFailed", 0) + total = passed + failed + bar = "█" * (pillar_score // 10) + "░" * (10 - pillar_score // 10) + lines.append(f" {bar} {pillar_score:3d}% {name} ({passed}/{total})") + lines.append("") + + if priority_fixes: + lines.append("Top Priority Fixes:") + lines.append("-" * 40) + for fix in priority_fixes[:5]: + rank = fix.get("rank", 0) + check_name = fix.get("checkName", "Unknown") + severity = fix.get("severity", "unknown").upper() + affected = fix.get("endpointsAffected", 0) + fix_summary = fix.get("summary", "") + lines.append(f" #{rank} [{severity}] {check_name}") + lines.append(f" {affected} endpoint(s) affected") + lines.append(f" {fix_summary}") + lines.append("") + + lines.append("=" * 60) + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description='Analyze API agent-readiness using Clara', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python analyze_agent_readiness.py ./openapi.yaml + python analyze_agent_readiness.py --url https://petstore3.swagger.io/api/v3/openapi.json + python analyze_agent_readiness.py --collection 12345-abcd + python analyze_agent_readiness.py ./openapi.yaml --json + python analyze_agent_readiness.py ./openapi.yaml -o report.json -o report.md + """ + ) + + input_group = parser.add_mutually_exclusive_group(required=True) + input_group.add_argument('spec', nargs='?', help='Path to OpenAPI spec file') + input_group.add_argument('--url', metavar='URL', help='URL to OpenAPI spec') + input_group.add_argument('--collection', metavar='ID', help='Postman collection ID') + + parser.add_argument('--json', action='store_true', help='Output raw JSON') + parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output') + parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode') + parser.add_argument('-o', '--output', action='append', default=[], metavar='FILE', + help='Output file (.json, .csv, .md)') + + probe_group = parser.add_argument_group('Live Probing Options') + probe_group.add_argument('--probe', action='store_true', help='Enable live probing') + probe_group.add_argument('--base-url', metavar='URL', help='Base URL for probing') + probe_group.add_argument('--sandbox', action='store_true', help='Sandbox mode') + probe_group.add_argument('--auth', metavar='HEADER', help='Authorization header') + + args = parser.parse_args() + + node_version, node_error = check_node_version() + if node_error: + print(f"Error: {node_error}", file=sys.stderr) + print("\nClara requires Node.js >= 20.0.0", file=sys.stderr) + sys.exit(1) + + clara_path, clara_type = find_clara() + if not clara_path: + print("Error: Clara not found", file=sys.stderr) + print("\nExpected: ~/work/clara/dist/cli/index.js", file=sys.stderr) + print("\nTo install:", file=sys.stderr) + print(" cd ~/work/clara && npm install && npm run build", file=sys.stderr) + sys.exit(1) + + spec_path = None + temp_file = None + + if args.spec: + spec_path = args.spec + elif args.url: + spec_path = args.url + elif args.collection: + try: + from scripts.postman_client import PostmanClient + client = PostmanClient() + temp_file = export_collection_as_openapi(args.collection, client) + spec_path = temp_file + if not args.quiet: + print(f"Exported collection {args.collection}...") + except Exception as e: + print(f"Error exporting collection: {e}", file=sys.stderr) + sys.exit(1) + + options = { + 'json': args.json, + 'verbose': args.verbose, + 'quiet': args.quiet, + 'output': args.output, + 'probe': args.probe, + 'base_url': args.base_url, + 'sandbox': args.sandbox, + 'auth': args.auth, + } + + if args.probe and not args.base_url: + print("Error: --probe requires --base-url", file=sys.stderr) + sys.exit(1) + + try: + report = analyze_spec(spec_path, options, clara_path, clara_type) + + if args.json: + print(json.dumps(report, indent=2)) + elif args.quiet: + if "stdout" in report: + print(report["stdout"]) + else: + if "stdout" in report: + print(report["stdout"]) + else: + print(format_summary(report)) + + if args.quiet: + sys.exit(report.get("returncode", 1)) + elif "summary" in report: + sys.exit(0 if report["summary"].get("agentReady", False) else 1) + else: + sys.exit(report.get("returncode", 0)) + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(2) + finally: + if temp_file and os.path.exists(temp_file): + os.unlink(temp_file) + + +if __name__ == '__main__': + main() diff --git a/scripts/audit_security.py b/scripts/audit_security.py new file mode 100755 index 0000000..826b473 --- /dev/null +++ b/scripts/audit_security.py @@ -0,0 +1,482 @@ +#!/usr/bin/env python3 +""" +Security audit for Postman APIs and collections. + +Performs comprehensive security analysis including: +- OpenAPI security definition validation +- Collection authentication checks +- HTTPS enforcement verification +- Security header analysis +- Sensitive data exposure checks +- Security best practices compliance + +Usage: + python audit_security.py --api + python audit_security.py --collection + python audit_security.py --spec + python audit_security.py --all + python audit_security.py --help +""" + +import sys +import os +import argparse +import json +import re +from collections import defaultdict + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +class SecurityAuditor: + """Security auditor for Postman resources.""" + + def __init__(self, client): + self.client = client + self.findings = [] + self.severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0} + + def add_finding(self, severity, category, message, recommendation=None): + """Add a security finding.""" + finding = { + "severity": severity, + "category": category, + "message": message, + "recommendation": recommendation + } + self.findings.append(finding) + self.severity_counts[severity] += 1 + + def audit_openapi_security(self, spec_content): + """Audit OpenAPI specification security definitions.""" + print("🔍 Auditing OpenAPI Security Definitions...\n") + + try: + if isinstance(spec_content, str): + spec = json.loads(spec_content) + else: + spec = spec_content + + # Check for security schemes + security_schemes = spec.get('components', {}).get('securitySchemes', {}) + + if not security_schemes: + self.add_finding( + "high", + "Missing Security", + "No security schemes defined in OpenAPI spec", + "Add security schemes (OAuth2, API Key, JWT, etc.) in components.securitySchemes" + ) + else: + print(f"✓ Found {len(security_schemes)} security scheme(s)") + for name, scheme in security_schemes.items(): + scheme_type = scheme.get('type', 'unknown') + print(f" - {name}: {scheme_type}") + + # Check for insecure schemes + if scheme_type == 'http' and scheme.get('scheme') == 'basic': + self.add_finding( + "medium", + "Weak Authentication", + f"Basic authentication used in scheme '{name}'", + "Consider using OAuth2 or API keys with JWT" + ) + + # Check global security requirements + if 'security' not in spec: + self.add_finding( + "medium", + "Missing Security", + "No global security requirements defined", + "Add top-level 'security' array to enforce authentication on all endpoints" + ) + + # Check endpoints for HTTPS + servers = spec.get('servers', []) + for server in servers: + url = server.get('url', '') + if url.startswith('http://') and 'localhost' not in url and '127.0.0.1' not in url: + self.add_finding( + "critical", + "Insecure Protocol", + f"Server uses HTTP instead of HTTPS: {url}", + "Use HTTPS for all production endpoints" + ) + + # Check paths for security + paths = spec.get('paths', {}) + unsecured_endpoints = [] + + for path, methods in paths.items(): + for method, operation in methods.items(): + if method not in ['get', 'post', 'put', 'delete', 'patch']: + continue + + # Check if endpoint has security + endpoint_security = operation.get('security', spec.get('security', [])) + + if not endpoint_security or endpoint_security == [{}]: + unsecured_endpoints.append(f"{method.upper()} {path}") + + if unsecured_endpoints: + self.add_finding( + "high", + "Unsecured Endpoints", + f"{len(unsecured_endpoints)} endpoint(s) have no authentication", + f"Add security requirements to: {', '.join(unsecured_endpoints[:3])}" + + (f" and {len(unsecured_endpoints) - 3} more" if len(unsecured_endpoints) > 3 else "") + ) + + except json.JSONDecodeError: + self.add_finding( + "info", + "Parse Error", + "Could not parse OpenAPI spec as JSON", + None + ) + except Exception as e: + self.add_finding( + "info", + "Audit Error", + f"Error auditing OpenAPI spec: {str(e)}", + None + ) + + def audit_collection_security(self, collection): + """Audit Postman collection security.""" + print("🔍 Auditing Collection Security...\n") + + # Check collection-level auth + auth = collection.get('auth') + + if not auth or auth.get('type') == 'noauth': + self.add_finding( + "medium", + "Missing Authentication", + "Collection has no authentication configured", + "Add collection-level authentication (API Key, Bearer Token, OAuth2, etc.)" + ) + else: + auth_type = auth.get('type', 'unknown') + print(f"✓ Collection uses {auth_type} authentication") + + # Check for weak auth types + if auth_type == 'basic': + self.add_finding( + "medium", + "Weak Authentication", + "Collection uses Basic authentication", + "Consider using OAuth2 or API keys with JWT for better security" + ) + + # Check requests + def check_requests(items, parent_auth=auth): + """Recursively check requests in folders.""" + http_count = 0 + https_count = 0 + no_auth_count = 0 + + for item in items: + if 'request' in item: + request = item['request'] + + # Check URL protocol + url = request.get('url', {}) + if isinstance(url, dict): + url_raw = url.get('raw', '') + else: + url_raw = url + + if url_raw.startswith('http://') and 'localhost' not in url_raw and '127.0.0.1' not in url_raw: + http_count += 1 + + if url_raw.startswith('https://'): + https_count += 1 + + # Check request-level auth + request_auth = request.get('auth', parent_auth) + + if not request_auth or request_auth.get('type') == 'noauth': + no_auth_count += 1 + + # Check for sensitive data in URL + if re.search(r'(api[_-]?key|token|password|secret)=', url_raw, re.IGNORECASE): + self.add_finding( + "critical", + "Sensitive Data Exposure", + f"Potential sensitive data in URL: {item.get('name', 'Unnamed request')}", + "Move sensitive data to headers or request body" + ) + + # Check headers for security + headers = request.get('header', []) + has_auth_header = any( + h.get('key', '').lower() in ['authorization', 'x-api-key', 'api-key'] + for h in headers + ) + + # If no auth and no security headers, flag it + if not request_auth and not has_auth_header and not parent_auth: + # Skip if it's a GET to a public endpoint + if request.get('method', 'GET') != 'GET': + self.add_finding( + "medium", + "Unauthenticated Request", + f"Request '{item.get('name', 'Unnamed')}' has no authentication", + "Add authentication to prevent unauthorized access" + ) + + elif 'item' in item: # Folder + folder_auth = item.get('auth', parent_auth) + sub_http, sub_https, sub_no_auth = check_requests(item['item'], folder_auth) + http_count += sub_http + https_count += sub_https + no_auth_count += sub_no_auth + + return http_count, https_count, no_auth_count + + http_count, https_count, no_auth_count = check_requests(collection.get('item', [])) + + # Report HTTP usage + if http_count > 0: + self.add_finding( + "critical", + "Insecure Protocol", + f"{http_count} request(s) use HTTP instead of HTTPS", + "Update all requests to use HTTPS" + ) + else: + print(f"✓ All requests use HTTPS") + + if no_auth_count > 0: + self.add_finding( + "info", + "Unauthenticated Requests", + f"{no_auth_count} request(s) have no authentication configured", + "Review if these endpoints should be public" + ) + + def audit_api_security(self, api_id): + """Audit a Postman API.""" + print(f"=== Security Audit: API {api_id} ===\n") + + try: + # Get API details + api = self.client.get_api(api_id) + print(f"API: {api.get('name', 'Unnamed')}\n") + + # Get versions + versions = self.client.get_api_versions(api_id) + + if not versions: + self.add_finding( + "info", + "No Versions", + "API has no versions defined", + "Create at least one version with an OpenAPI schema" + ) + return + + # Audit most recent version + latest_version = versions[0] + version_id = latest_version.get('id') + + print(f"Auditing version: {latest_version.get('name', 'Unknown')}\n") + + # Get schema + schemas = self.client.get_api_schema(api_id, version_id) + + if schemas: + schema = schemas[0] + schema_content = schema.get('schema', '{}') + self.audit_openapi_security(schema_content) + else: + self.add_finding( + "medium", + "Missing Schema", + f"API version '{latest_version.get('name')}' has no schema", + "Add an OpenAPI schema with security definitions" + ) + + except Exception as e: + print(f"Error auditing API: {e}") + sys.exit(1) + + def audit_spec_security(self, spec_id): + """Audit a Spec Hub specification.""" + print(f"=== Security Audit: Spec {spec_id} ===\n") + + try: + spec = self.client.get_spec(spec_id) + print(f"Spec: {spec.get('name', 'Unnamed')}\n") + + files = spec.get('files', []) + + if not files: + self.add_finding( + "info", + "No Files", + "Spec has no files", + None + ) + return + + # Audit root file + root_file = next((f for f in files if f.get('root')), files[0]) + spec_content = root_file.get('content', '{}') + + self.audit_openapi_security(spec_content) + + except Exception as e: + print(f"Error auditing spec: {e}") + sys.exit(1) + + def audit_collection_security_by_id(self, collection_id): + """Audit a collection by ID.""" + print(f"=== Security Audit: Collection {collection_id} ===\n") + + try: + collection = self.client.get_collection(collection_id) + print(f"Collection: {collection.get('info', {}).get('name', 'Unnamed')}\n") + + self.audit_collection_security(collection) + + except Exception as e: + print(f"Error auditing collection: {e}") + sys.exit(1) + + def print_report(self): + """Print security audit report.""" + print("\n" + "=" * 70) + print("🛡️ SECURITY AUDIT REPORT") + print("=" * 70 + "\n") + + # Summary + total_findings = len(self.findings) + print(f"Total Findings: {total_findings}\n") + + if total_findings == 0: + print("✅ No security issues found!\n") + return + + # Severity breakdown + print("Severity Breakdown:") + for severity in ["critical", "high", "medium", "low", "info"]: + count = self.severity_counts[severity] + if count > 0: + emoji = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🔵", "info": "⚪"} + print(f" {emoji[severity]} {severity.upper()}: {count}") + print() + + # Group findings by severity + for severity in ["critical", "high", "medium", "low", "info"]: + severity_findings = [f for f in self.findings if f['severity'] == severity] + + if severity_findings: + print(f"\n{severity.upper()} Severity:") + print("-" * 70) + + for i, finding in enumerate(severity_findings, 1): + print(f"\n{i}. [{finding['category']}] {finding['message']}") + if finding['recommendation']: + print(f" 💡 Recommendation: {finding['recommendation']}") + + # Security score + print("\n" + "=" * 70) + score = max(0, 100 - ( + self.severity_counts['critical'] * 20 + + self.severity_counts['high'] * 10 + + self.severity_counts['medium'] * 5 + + self.severity_counts['low'] * 2 + )) + print(f"Security Score: {score}/100") + + if score >= 90: + print("Grade: A (Excellent) ✅") + elif score >= 75: + print("Grade: B (Good) ✔️") + elif score >= 60: + print("Grade: C (Fair) ⚠️") + elif score >= 40: + print("Grade: D (Poor) ❌") + else: + print("Grade: F (Critical) 🔴") + + print("=" * 70 + "\n") + + +def main(): + """Main entry point for security audit.""" + + parser = argparse.ArgumentParser( + description='Security audit for Postman APIs and collections', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Audit an API + python audit_security.py --api abc-123 + + # Audit a collection + python audit_security.py --collection col-456 + + # Audit a Spec Hub specification + python audit_security.py --spec spec-789 + + # Audit all collections in workspace + python audit_security.py --all-collections + + # Audit all APIs in workspace + python audit_security.py --all-apis + """ + ) + + parser.add_argument('--api', metavar='API_ID', help='Audit a specific API') + parser.add_argument('--collection', metavar='COLLECTION_ID', help='Audit a specific collection') + parser.add_argument('--spec', metavar='SPEC_ID', help='Audit a Spec Hub specification') + parser.add_argument('--all-collections', action='store_true', help='Audit all collections') + parser.add_argument('--all-apis', action='store_true', help='Audit all APIs') + + args = parser.parse_args() + + if not any([args.api, args.collection, args.spec, args.all_collections, args.all_apis]): + parser.print_help() + return + + # Initialize + client = PostmanClient() + auditor = SecurityAuditor(client) + + # Execute audit + if args.api: + auditor.audit_api_security(args.api) + + elif args.collection: + auditor.audit_collection_security_by_id(args.collection) + + elif args.spec: + auditor.audit_spec_security(args.spec) + + elif args.all_collections: + collections = client.list_collections() + print(f"Auditing {len(collections)} collection(s)...\n") + for col in collections: + auditor.audit_collection_security_by_id(col.get('uid')) + print() + + elif args.all_apis: + apis = client.list_apis() + print(f"Auditing {len(apis)} API(s)...\n") + for api in apis: + auditor.audit_api_security(api.get('id')) + print() + + # Print report + auditor.print_report() + + +if __name__ == '__main__': + main() diff --git a/scripts/config.py b/scripts/config.py new file mode 100644 index 0000000..746d0a7 --- /dev/null +++ b/scripts/config.py @@ -0,0 +1,166 @@ +""" +Configuration management for Postman Agent Skill. +Handles environment variables and provides validated configuration. +""" + +import os +import sys +from pathlib import Path + + +def load_env_file(): + """ + Load environment variables from .env file in the skill directory. + This ensures API keys are automatically loaded without user intervention. + """ + # Find the .env file in the skill directory (parent of scripts/) + script_dir = Path(__file__).parent + skill_dir = script_dir.parent + env_file = skill_dir / ".env" + + if not env_file.exists(): + return False + + # Try using python-dotenv if available (preferred method) + try: + from dotenv import load_dotenv + load_dotenv(env_file) + return True + except ImportError: + # Fallback: manually parse .env file + try: + with open(env_file, 'r') as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + # Parse KEY=VALUE format + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + # Remove quotes if present + if value.startswith('"') and value.endswith('"'): + value = value[1:-1] + elif value.startswith("'") and value.endswith("'"): + value = value[1:-1] + # Only set if not already in environment + if key and not os.getenv(key): + os.environ[key] = value + return True + except Exception as e: + print(f"Warning: Could not load .env file: {e}", file=sys.stderr) + return False + + +# Load .env file when this module is imported +_env_loaded = load_env_file() + + +class PostmanConfig: + """ + Manages configuration from environment variables. + Validates required settings and provides defaults. + """ + + def __init__(self): + # Required + self.api_key = os.getenv("POSTMAN_API_KEY") + + # Optional with defaults + self.workspace_id = os.getenv("POSTMAN_WORKSPACE_ID") + self.rate_limit_delay = int(os.getenv("POSTMAN_RATE_LIMIT_DELAY", "60")) + self.max_retries = int(os.getenv("POSTMAN_MAX_RETRIES", "3")) + self.timeout = int(os.getenv("POSTMAN_TIMEOUT", "10")) + + # Proxy settings + # By default, bypass all proxies to avoid "403 Forbidden" proxy errors + # Set POSTMAN_USE_PROXY=true to enable proxy (if needed) + use_proxy = os.getenv("POSTMAN_USE_PROXY", "false").lower() in ("true", "1", "yes") + + if not use_proxy: + # Disable all proxies for direct connection + self.proxies = { + "http": None, + "https": None, + } + else: + # Use system/environment proxy settings + self.proxies = None + + # Logging + self.log_level = os.getenv("LOG_LEVEL", "INFO") + + def validate(self): + """ + Validate that required configuration is present. + Raises ValueError with helpful message if configuration is invalid. + """ + if not self.api_key: + script_dir = Path(__file__).parent + skill_dir = script_dir.parent + env_file = skill_dir / ".env" + + error_msg = "POSTMAN_API_KEY not set.\n\n" + + if not env_file.exists(): + error_msg += ( + "The .env file is missing from the skill package.\n\n" + "To fix this:\n" + "1. Create a .env file in the skill directory:\n" + f" {skill_dir}\n" + "2. Add your Postman API key:\n" + " POSTMAN_API_KEY=PMAK-your-key-here\n\n" + "To get your API key:\n" + "- Visit: https://web.postman.co/settings/me/api-keys\n" + "- Click 'Generate API Key'\n" + "- Copy the key (starts with 'PMAK-')\n" + ) + else: + error_msg += ( + f"Found .env file at: {env_file}\n" + "But POSTMAN_API_KEY is not set or is empty.\n\n" + "Please check your .env file contains:\n" + "POSTMAN_API_KEY=PMAK-your-key-here\n\n" + "To get your API key:\n" + "- Visit: https://web.postman.co/settings/me/api-keys\n" + "- Click 'Generate API Key'\n" + "- Copy the key (starts with 'PMAK-')\n" + ) + + raise ValueError(error_msg) + + if not self.api_key.startswith("PMAK-"): + raise ValueError( + "Invalid POSTMAN_API_KEY format.\n" + "API keys should start with 'PMAK-'\n" + "Please check your key from: https://web.postman.co/settings/me/api-keys" + ) + + @property + def base_url(self): + """Base URL for Postman API""" + return "https://api.getpostman.com" + + @property + def headers(self): + """HTTP headers for API requests""" + return { + "X-API-Key": self.api_key, + "Content-Type": "application/json" + } + + +def get_config(): + """ + Get validated configuration instance. + Exits with error message if configuration is invalid. + """ + config = PostmanConfig() + try: + config.validate() + return config + except ValueError as e: + print(f"Configuration Error: {e}", file=sys.stderr) + sys.exit(1) diff --git a/scripts/detect_breaking_changes.py b/scripts/detect_breaking_changes.py new file mode 100755 index 0000000..f48c05a --- /dev/null +++ b/scripts/detect_breaking_changes.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python3 +""" +Detect breaking changes between API versions. + +Analyzes two OpenAPI specifications or Postman collections and identifies +breaking changes that would affect API consumers. + +Breaking changes include: +- Removed endpoints +- Removed required parameters +- Changed parameter types +- Removed response fields +- Changed authentication requirements +- HTTP method changes + +Usage: + python detect_breaking_changes.py --api --old-version --new-version + python detect_breaking_changes.py --spec + python detect_breaking_changes.py --collection + python detect_breaking_changes.py --file + python detect_breaking_changes.py --help +""" + +import sys +import os +import argparse +import json +from collections import defaultdict + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +class BreakingChangeDetector: + """Detect breaking changes between API versions.""" + + def __init__(self): + self.breaking_changes = [] + self.non_breaking_changes = [] + + def add_breaking(self, category, message, severity="high"): + """Add a breaking change.""" + self.breaking_changes.append({ + "category": category, + "message": message, + "severity": severity + }) + + def add_non_breaking(self, category, message): + """Add a non-breaking change.""" + self.non_breaking_changes.append({ + "category": category, + "message": message + }) + + def compare_openapi_specs(self, old_spec, new_spec): + """Compare two OpenAPI specifications.""" + print("🔍 Analyzing API Changes...\n") + + old_paths = old_spec.get('paths', {}) + new_paths = new_spec.get('paths', {}) + + # Check for removed endpoints + for path in old_paths: + if path not in new_paths: + self.add_breaking( + "Removed Endpoint", + f"Endpoint removed: {path}", + "critical" + ) + continue + + old_methods = old_paths[path] + new_methods = new_paths[path] + + # Check each method + for method in old_methods: + if method not in ['get', 'post', 'put', 'delete', 'patch', 'head', 'options']: + continue + + if method not in new_methods: + self.add_breaking( + "Removed Method", + f"Method removed: {method.upper()} {path}", + "critical" + ) + continue + + # Compare operations + old_op = old_methods[method] + new_op = new_methods[method] + + self._compare_operation(path, method, old_op, new_op) + + # Check for added endpoints (non-breaking) + for path in new_paths: + if path not in old_paths: + self.add_non_breaking( + "Added Endpoint", + f"New endpoint: {path}" + ) + continue + + new_methods = new_paths[path] + old_methods = old_paths.get(path, {}) + + for method in new_methods: + if method not in ['get', 'post', 'put', 'delete', 'patch', 'head', 'options']: + continue + + if method not in old_methods: + self.add_non_breaking( + "Added Method", + f"New method: {method.upper()} {path}" + ) + + def _compare_operation(self, path, method, old_op, new_op): + """Compare two operations.""" + location = f"{method.upper()} {path}" + + # Compare parameters + old_params = old_op.get('parameters', []) + new_params = new_op.get('parameters', []) + + old_param_map = {(p['name'], p.get('in', 'query')): p for p in old_params} + new_param_map = {(p['name'], p.get('in', 'query')): p for p in new_params} + + # Check for removed parameters + for key, param in old_param_map.items(): + name, location_type = key + + if key not in new_param_map: + if param.get('required', False): + self.add_breaking( + "Removed Required Parameter", + f"{location}: Removed required {location_type} parameter '{name}'", + "critical" + ) + else: + self.add_non_breaking( + "Removed Optional Parameter", + f"{location}: Removed optional {location_type} parameter '{name}'" + ) + else: + # Check if parameter changed + new_param = new_param_map[key] + + # Check if optional became required + if not param.get('required', False) and new_param.get('required', False): + self.add_breaking( + "Parameter Now Required", + f"{location}: Parameter '{name}' is now required", + "high" + ) + + # Check if type changed + old_type = param.get('schema', {}).get('type') + new_type = new_param.get('schema', {}).get('type') + + if old_type and new_type and old_type != new_type: + self.add_breaking( + "Parameter Type Changed", + f"{location}: Parameter '{name}' type changed from {old_type} to {new_type}", + "high" + ) + + # Check for added required parameters + for key, param in new_param_map.items(): + name, location_type = key + + if key not in old_param_map and param.get('required', False): + self.add_breaking( + "New Required Parameter", + f"{location}: Added required {location_type} parameter '{name}'", + "high" + ) + + # Compare request body + old_body = old_op.get('requestBody', {}) + new_body = new_op.get('requestBody', {}) + + # Check if request body removed + if old_body and not new_body: + self.add_breaking( + "Request Body Removed", + f"{location}: Request body removed", + "critical" + ) + + # Check if request body became required + if not old_body.get('required', False) and new_body.get('required', False): + self.add_breaking( + "Request Body Now Required", + f"{location}: Request body is now required", + "high" + ) + + # Compare responses + old_responses = old_op.get('responses', {}) + new_responses = new_op.get('responses', {}) + + # Check for removed success responses + for code in old_responses: + if code.startswith('2') and code not in new_responses: + self.add_breaking( + "Response Removed", + f"{location}: Success response {code} removed", + "high" + ) + + # Compare security + old_security = old_op.get('security', []) + new_security = new_op.get('security', []) + + if old_security != new_security: + if not old_security and new_security: + self.add_breaking( + "Authentication Required", + f"{location}: Authentication now required", + "high" + ) + elif old_security and not new_security: + self.add_non_breaking( + "Authentication Removed", + f"{location}: Authentication no longer required" + ) + + def compare_collections(self, old_col, new_col): + """Compare two Postman collections.""" + print("🔍 Analyzing Collection Changes...\n") + + def extract_requests(collection): + """Extract all requests from collection.""" + requests = {} + + def extract(items, path=""): + for item in items: + if 'request' in item: + name = item.get('name', 'Unnamed') + full_path = f"{path}/{name}" if path else name + + request = item['request'] + method = request.get('method', 'GET') + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + requests[full_path] = { + 'method': method, + 'url': url_str, + 'auth': request.get('auth'), + 'headers': request.get('header', []), + 'body': request.get('body') + } + + elif 'item' in item: + folder = item.get('name', 'Folder') + new_path = f"{path}/{folder}" if path else folder + extract(item['item'], new_path) + + extract(collection.get('item', [])) + return requests + + old_requests = extract_requests(old_col) + new_requests = extract_requests(new_col) + + # Check for removed requests + for name, request in old_requests.items(): + if name not in new_requests: + self.add_breaking( + "Request Removed", + f"Request removed: {request['method']} {name}", + "critical" + ) + else: + new_request = new_requests[name] + + # Check method change + if request['method'] != new_request['method']: + self.add_breaking( + "Method Changed", + f"{name}: Method changed from {request['method']} to {new_request['method']}", + "critical" + ) + + # Check URL change + if request['url'] != new_request['url']: + self.add_breaking( + "URL Changed", + f"{name}: URL changed", + "high" + ) + + # Check auth change + old_auth = request.get('auth', {}).get('type') if request.get('auth') else None + new_auth = new_request.get('auth', {}).get('type') if new_request.get('auth') else None + + if old_auth != new_auth: + if not old_auth and new_auth: + self.add_breaking( + "Authentication Added", + f"{name}: Authentication now required", + "high" + ) + elif old_auth and not new_auth: + self.add_non_breaking( + "Authentication Removed", + f"{name}: Authentication removed" + ) + + # Check for added requests (non-breaking) + for name in new_requests: + if name not in old_requests: + self.add_non_breaking( + "Request Added", + f"New request: {new_requests[name]['method']} {name}" + ) + + def print_report(self): + """Print breaking changes report.""" + print("\n" + "=" * 70) + print("🔴 BREAKING CHANGES REPORT") + print("=" * 70 + "\n") + + if not self.breaking_changes and not self.non_breaking_changes: + print("✅ No changes detected\n") + return + + # Breaking changes + if self.breaking_changes: + print(f"🔴 BREAKING CHANGES ({len(self.breaking_changes)}):") + print("-" * 70) + + # Group by severity + critical = [c for c in self.breaking_changes if c['severity'] == 'critical'] + high = [c for c in self.breaking_changes if c['severity'] == 'high'] + + if critical: + print("\n⛔ CRITICAL (Will break existing clients):") + for i, change in enumerate(critical, 1): + print(f"{i}. [{change['category']}] {change['message']}") + + if high: + print("\n🔴 HIGH (May break existing clients):") + for i, change in enumerate(high, 1): + print(f"{i}. [{change['category']}] {change['message']}") + + print() + + # Non-breaking changes + if self.non_breaking_changes: + print(f"\n✅ NON-BREAKING CHANGES ({len(self.non_breaking_changes)}):") + print("-" * 70) + for i, change in enumerate(self.non_breaking_changes[:10], 1): + print(f"{i}. [{change['category']}] {change['message']}") + + if len(self.non_breaking_changes) > 10: + print(f"... and {len(self.non_breaking_changes) - 10} more") + print() + + # Summary + print("=" * 70) + if self.breaking_changes: + print("⚠️ WARNING: Breaking changes detected!") + print(f" {len([c for c in self.breaking_changes if c['severity'] == 'critical'])} critical changes") + print(f" {len([c for c in self.breaking_changes if c['severity'] == 'high'])} high-impact changes") + print("\n💡 Recommendations:") + print(" • Increment major version (e.g., v1 → v2)") + print(" • Provide migration guide for consumers") + print(" • Consider deprecation period") + print(" • Update API documentation") + else: + print("✅ No breaking changes - backward compatible!") + print(" Safe to deploy as minor/patch version") + + print("=" * 70 + "\n") + + +def main(): + """Main entry point.""" + + parser = argparse.ArgumentParser( + description='Detect breaking changes between API versions', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Compare API versions + python detect_breaking_changes.py --api --old-version --new-version + + # Compare two specs + python detect_breaking_changes.py --spec + + # Compare two collections + python detect_breaking_changes.py --collection + + # Compare local files + python detect_breaking_changes.py --file old.json new.json + """ + ) + + parser.add_argument('--api', metavar='API_ID', help='API ID') + parser.add_argument('--old-version', metavar='VERSION_ID', help='Old version ID') + parser.add_argument('--new-version', metavar='VERSION_ID', help='New version ID') + parser.add_argument('--spec', nargs=2, metavar=('OLD_SPEC', 'NEW_SPEC'), help='Compare two specs') + parser.add_argument('--collection', nargs=2, metavar=('OLD_COL', 'NEW_COL'), help='Compare two collections') + parser.add_argument('--file', nargs=2, metavar=('OLD_FILE', 'NEW_FILE'), help='Compare two local files') + + args = parser.parse_args() + + detector = BreakingChangeDetector() + + try: + if args.api and args.old_version and args.new_version: + client = PostmanClient() + print(f"=== Comparing API Versions ===\n") + + # Get old version schema + old_schemas = client.get_api_schema(args.api, args.old_version) + old_spec = json.loads(old_schemas[0].get('schema', '{}')) + + # Get new version schema + new_schemas = client.get_api_schema(args.api, args.new_version) + new_spec = json.loads(new_schemas[0].get('schema', '{}')) + + detector.compare_openapi_specs(old_spec, new_spec) + + elif args.spec: + client = PostmanClient() + print(f"=== Comparing Specs ===\n") + + # Get old spec + old_spec_data = client.get_spec(args.spec[0]) + old_file = next((f for f in old_spec_data['files'] if f.get('root')), old_spec_data['files'][0]) + old_spec = json.loads(old_file.get('content', '{}')) + + # Get new spec + new_spec_data = client.get_spec(args.spec[1]) + new_file = next((f for f in new_spec_data['files'] if f.get('root')), new_spec_data['files'][0]) + new_spec = json.loads(new_file.get('content', '{}')) + + detector.compare_openapi_specs(old_spec, new_spec) + + elif args.collection: + client = PostmanClient() + print(f"=== Comparing Collections ===\n") + + old_col = client.get_collection(args.collection[0]) + new_col = client.get_collection(args.collection[1]) + + detector.compare_collections(old_col, new_col) + + elif args.file: + print(f"=== Comparing Files ===\n") + + with open(args.file[0], 'r') as f: + old_spec = json.load(f) + + with open(args.file[1], 'r') as f: + new_spec = json.load(f) + + detector.compare_openapi_specs(old_spec, new_spec) + + else: + parser.print_help() + return + + detector.print_report() + + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/scripts/generate_code.py b/scripts/generate_code.py new file mode 100755 index 0000000..c45a686 --- /dev/null +++ b/scripts/generate_code.py @@ -0,0 +1,421 @@ +#!/usr/bin/env python3 +""" +Generate code snippets from Postman collections and requests. + +Generates client code in multiple languages for API requests including: +- curl +- Python (requests library) +- JavaScript (fetch API) +- Node.js (axios) +- Go (net/http) + +Usage: + python generate_code.py --collection --language python + python generate_code.py --collection --all + python generate_code.py --request --language curl + python generate_code.py --help +""" + +import sys +import os +import argparse +import json +from urllib.parse import urlencode, urlparse, parse_qs + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +class CodeGenerator: + """Generate code snippets from Postman requests.""" + + @staticmethod + def generate_curl(request, name=""): + """Generate curl command.""" + method = request.get('method', 'GET') + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + # Build curl command + parts = ['curl'] + + # Add method if not GET + if method != 'GET': + parts.extend(['-X', method]) + + # Add headers + headers = request.get('header', []) + for header in headers: + if not header.get('disabled', False): + parts.extend(['-H', f'"{header.get("key")}: {header.get("value")}"']) + + # Add body + body = request.get('body', {}) + if body: + mode = body.get('mode', '') + + if mode == 'raw': + raw_body = body.get('raw', '') + parts.extend(['-d', f"'{raw_body}'"]) + + elif mode == 'urlencoded': + urlencoded = body.get('urlencoded', []) + data = {item['key']: item['value'] for item in urlencoded if not item.get('disabled', False)} + parts.extend(['-d', f"'{urlencode(data)}'"]) + + elif mode == 'formdata': + formdata = body.get('formdata', []) + for item in formdata: + if not item.get('disabled', False): + parts.extend(['-F', f'"{item["key"]}={item["value"]}"']) + + # Add URL + parts.append(f'"{url_str}"') + + return ' \\\n '.join(parts) + + @staticmethod + def generate_python(request, name=""): + """Generate Python code using requests library.""" + method = request.get('method', 'GET').lower() + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + code = [] + code.append("import requests") + code.append("") + + # Headers + headers = request.get('header', []) + if headers: + code.append("headers = {") + for header in headers: + if not header.get('disabled', False): + code.append(f' "{header.get("key")}": "{header.get("value")}",') + code.append("}") + code.append("") + + # Body + body = request.get('body', {}) + has_body = False + + if body: + mode = body.get('mode', '') + + if mode == 'raw': + raw_body = body.get('raw', '') + try: + # Try to parse as JSON + json_body = json.loads(raw_body) + code.append("data = " + json.dumps(json_body, indent=4)) + has_body = True + except: + code.append(f"data = '''{raw_body}'''") + has_body = True + code.append("") + + elif mode == 'urlencoded': + urlencoded = body.get('urlencoded', []) + code.append("data = {") + for item in urlencoded: + if not item.get('disabled', False): + code.append(f' "{item["key"]}": "{item["value"]}",') + code.append("}") + code.append("") + has_body = True + + # Request + params = [] + params.append(f'"{url_str}"') + + if headers: + params.append("headers=headers") + + if has_body: + if mode == 'raw' and body.get('options', {}).get('raw', {}).get('language') == 'json': + params.append("json=data") + else: + params.append("data=data") + + code.append(f"response = requests.{method}(") + code.append(" " + ",\n ".join(params)) + code.append(")") + code.append("") + code.append("print(response.status_code)") + code.append("print(response.json())") + + return "\n".join(code) + + @staticmethod + def generate_javascript(request, name=""): + """Generate JavaScript code using fetch API.""" + method = request.get('method', 'GET') + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + code = [] + + # Build fetch options + code.append("const options = {") + code.append(f' method: "{method}",') + + # Headers + headers = request.get('header', []) + if headers: + code.append(" headers: {") + for header in headers: + if not header.get('disabled', False): + code.append(f' "{header.get("key")}": "{header.get("value")}",') + code.append(" },") + + # Body + body = request.get('body', {}) + if body and method != 'GET': + mode = body.get('mode', '') + + if mode == 'raw': + raw_body = body.get('raw', '') + code.append(f" body: `{raw_body}`,") + + code.append("};") + code.append("") + + # Fetch call + code.append(f'fetch("{url_str}", options)') + code.append(" .then(response => response.json())") + code.append(" .then(data => console.log(data))") + code.append(" .catch(error => console.error('Error:', error));") + + return "\n".join(code) + + @staticmethod + def generate_nodejs(request, name=""): + """Generate Node.js code using axios.""" + method = request.get('method', 'GET').lower() + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + code = [] + code.append("const axios = require('axios');") + code.append("") + + # Build config + code.append("const config = {") + code.append(f' method: "{method}",') + code.append(f' url: "{url_str}",') + + # Headers + headers = request.get('header', []) + if headers: + code.append(" headers: {") + for header in headers: + if not header.get('disabled', False): + code.append(f' "{header.get("key")}": "{header.get("value")}",') + code.append(" },") + + # Body + body = request.get('body', {}) + if body and method != 'get': + mode = body.get('mode', '') + + if mode == 'raw': + raw_body = body.get('raw', '') + try: + json_body = json.loads(raw_body) + code.append(f" data: {json.dumps(json_body, indent=4)},") + except: + code.append(f' data: `{raw_body}`,') + + code.append("};") + code.append("") + + # Axios call + code.append("axios(config)") + code.append(" .then(response => {") + code.append(" console.log(response.data);") + code.append(" })") + code.append(" .catch(error => {") + code.append(" console.error(error);") + code.append(" });") + + return "\n".join(code) + + @staticmethod + def generate_go(request, name=""): + """Generate Go code using net/http.""" + method = request.get('method', 'GET') + url = request.get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + code = [] + code.append("package main") + code.append("") + code.append("import (") + code.append('\t"fmt"') + code.append('\t"io"') + code.append('\t"net/http"') + + # Check if we need strings package + body = request.get('body', {}) + if body: + code.append('\t"strings"') + + code.append(")") + code.append("") + code.append("func main() {") + + # Body + if body: + mode = body.get('mode', '') + if mode == 'raw': + raw_body = body.get('raw', '').replace('"', '\\"') + code.append(f'\tpayload := strings.NewReader(`{raw_body}`)') + else: + code.append("\tpayload := nil") + + code.append("") + + # Create request + code.append(f'\treq, _ := http.NewRequest("{method}", "{url_str}", payload)') + code.append("") + + # Headers + headers = request.get('header', []) + if headers: + for header in headers: + if not header.get('disabled', False): + code.append(f'\treq.Header.Add("{header.get("key")}", "{header.get("value")}")') + code.append("") + + # Execute request + code.append("\tres, _ := http.DefaultClient.Do(req)") + code.append("\tdefer res.Body.Close()") + code.append("") + code.append("\tbody, _ := io.ReadAll(res.Body)") + code.append("\tfmt.Println(string(body))") + code.append("}") + + return "\n".join(code) + + +def generate_from_collection(client, collection_id, language='all'): + """Generate code snippets from all requests in a collection.""" + print(f"=== Generating Code for Collection ===\n") + + try: + collection = client.get_collection(collection_id) + collection_name = collection.get('info', {}).get('name', 'Unnamed') + + print(f"Collection: {collection_name}\n") + + def process_items(items, folder_path=""): + """Process collection items recursively.""" + for item in items: + if 'request' in item: + request = item['request'] + name = item.get('name', 'Unnamed') + full_name = f"{folder_path}/{name}" if folder_path else name + + print(f"\n{'=' * 70}") + print(f"Request: {full_name}") + print(f"Method: {request.get('method', 'GET')}") + print('=' * 70) + + generator = CodeGenerator() + + if language == 'all' or language == 'curl': + print("\n### curl ###\n") + print(generator.generate_curl(request, name)) + + if language == 'all' or language == 'python': + print("\n### Python ###\n") + print(generator.generate_python(request, name)) + + if language == 'all' or language == 'javascript': + print("\n### JavaScript ###\n") + print(generator.generate_javascript(request, name)) + + if language == 'all' or language == 'nodejs': + print("\n### Node.js ###\n") + print(generator.generate_nodejs(request, name)) + + if language == 'all' or language == 'go': + print("\n### Go ###\n") + print(generator.generate_go(request, name)) + + elif 'item' in item: + folder = item.get('name', 'Folder') + new_path = f"{folder_path}/{folder}" if folder_path else folder + process_items(item['item'], new_path) + + process_items(collection.get('item', [])) + + except Exception as e: + print(f"Error generating code: {e}") + sys.exit(1) + + +def main(): + """Main entry point.""" + + parser = argparse.ArgumentParser( + description='Generate code snippets from Postman collections', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Generate Python code for all requests + python generate_code.py --collection --language python + + # Generate all languages + python generate_code.py --collection --all + + # Generate curl commands only + python generate_code.py --collection --language curl + +Languages: curl, python, javascript, nodejs, go + """ + ) + + parser.add_argument('--collection', metavar='COLLECTION_ID', required=True, help='Collection ID') + parser.add_argument('--language', choices=['curl', 'python', 'javascript', 'nodejs', 'go'], + help='Programming language') + parser.add_argument('--all', action='store_true', help='Generate code for all languages') + + args = parser.parse_args() + + if not args.language and not args.all: + print("Error: Specify --language or --all") + parser.print_help() + return + + language = 'all' if args.all else args.language + + client = PostmanClient() + generate_from_collection(client, args.collection, language) + + +if __name__ == '__main__': + main() diff --git a/scripts/list_collections.py b/scripts/list_collections.py new file mode 100755 index 0000000..3d5b058 --- /dev/null +++ b/scripts/list_collections.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +""" +List Postman workspace resources (collections, environments, monitors, APIs). + +Usage: + python list_collections.py # List collections + python list_collections.py --all # List all resources + python list_collections.py --environments # List environments + python list_collections.py --monitors # List monitors + python list_collections.py --apis # List APIs +""" + +import sys +import os +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import get_config +from utils.formatters import ( + format_collections_list, + format_environments_list, + format_monitors_list, + format_apis_list, + format_workspace_summary, + format_error +) + + +def main(): + parser = argparse.ArgumentParser( + description='List Postman workspace resources' + ) + parser.add_argument( + '--all', + action='store_true', + help='List all resource types' + ) + parser.add_argument( + '--environments', + action='store_true', + help='List environments' + ) + parser.add_argument( + '--monitors', + action='store_true', + help='List monitors' + ) + parser.add_argument( + '--apis', + action='store_true', + help='List APIs' + ) + parser.add_argument( + '--workspace', + type=str, + help='Workspace ID (optional, uses POSTMAN_WORKSPACE_ID env var if not provided)' + ) + + args = parser.parse_args() + + try: + # Get configuration + config = get_config() + + # Override workspace if provided + if args.workspace: + config.workspace_id = args.workspace + + # Create client + client = PostmanClient(config) + + # Determine what to list + if args.all: + # List everything for workspace summary + print("Fetching workspace resources...") + collections = client.list_collections() + environments = client.list_environments() + monitors = client.list_monitors() + apis = client.list_apis() + + print(format_workspace_summary(collections, environments, monitors, apis)) + + elif args.environments: + print("Fetching environments...") + environments = client.list_environments() + print(format_environments_list(environments)) + + elif args.monitors: + print("Fetching monitors...") + monitors = client.list_monitors() + print(format_monitors_list(monitors)) + + elif args.apis: + print("Fetching APIs...") + apis = client.list_apis() + print(format_apis_list(apis)) + + else: + # Default: list collections + + # Show workspace context if configured + if config.workspace_id: + try: + workspace = client.get_workspace(config.workspace_id) + workspace_name = workspace.get('name', 'Unknown') + print(f"📁 Workspace: {workspace_name}\n") + except: + pass # Continue even if workspace fetch fails + + print("Fetching collections...") + collections = client.list_collections() + + if len(collections) == 0: + print("\n📭 No collections found in this workspace.") + print("\n💡 Quick actions:") + if config.workspace_id: + print(" • List workspaces: python scripts/list_workspaces.py") + print(" • Change workspace: Edit POSTMAN_WORKSPACE_ID in .env") + print(" • Run setup validation: python scripts/validate_setup.py") + else: + print(format_collections_list(collections)) + + except Exception as e: + print(format_error(e, "listing workspace resources"), file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/list_workspaces.py b/scripts/list_workspaces.py new file mode 100644 index 0000000..ff8082d --- /dev/null +++ b/scripts/list_workspaces.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +""" +List all accessible Postman workspaces with collection counts. + +Usage: + python list_workspaces.py + python list_workspaces.py --detailed # Show more info for each workspace +""" + +import sys +import os +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import get_config +from utils.formatters import format_error + + +def list_workspaces(detailed=False): + """ + List all accessible workspaces. + + Args: + detailed: If True, show additional details for each workspace + """ + config = get_config() + client = PostmanClient(config) + + print("📂 Your Postman Workspaces:\n") + + try: + # Get all workspaces + # Note: Postman API doesn't have a direct /workspaces endpoint for listing all + # We'll need to use a workaround by getting workspace info + + # First, try to get current workspace if configured + workspaces = [] + + if config.workspace_id: + try: + workspace = client.get_workspace(config.workspace_id) + workspaces.append(workspace) + except Exception as e: + print(f"⚠️ Could not fetch configured workspace: {e}\n") + + # If we don't have workspaces, we can't list them directly via API + # The Postman API requires knowing workspace IDs to get workspace info + if not workspaces: + print("ℹ️ Workspace listing requires the Postman API to support workspace enumeration.") + print(" Currently, you need to know the workspace ID to access it.\n") + print("💡 To use a specific workspace:") + print(" 1. Find your workspace ID from the Postman web app URL") + print(" (e.g., https://web.postman.co/workspace/YOUR-WORKSPACE-ID)") + print(" 2. Add to .env file: POSTMAN_WORKSPACE_ID=YOUR-WORKSPACE-ID\n") + print("🔗 Access workspaces: https://web.postman.co/home\n") + return + + # Display workspace information + for i, ws in enumerate(workspaces, 1): + is_current = "⭐" if ws.get('id') == config.workspace_id else " " + print(f"{is_current} {i}. {ws.get('name', 'Unknown')}") + print(f" Type: {ws.get('type', 'Unknown')} | ID: {ws.get('id', 'Unknown')}") + + if detailed: + print(f" Description: {ws.get('description', 'No description')}") + + # Show resource counts + try: + collections = client.list_collections(ws.get('id')) + collection_count = len(collections) + print(f" 📊 Collections: {collection_count}") + + if detailed: + environments = client.list_environments(ws.get('id')) + print(f" 🌍 Environments: {len(environments)}") + + try: + monitors = client.list_monitors(ws.get('id')) + print(f" 📈 Monitors: {len(monitors)}") + except: + pass + + try: + apis = client.list_apis(ws.get('id')) + print(f" 🔌 APIs: {len(apis)}") + except: + pass + + except Exception as e: + print(f" ⚠️ Could not fetch resource counts: {e}") + + print() + + print("\n💡 To switch workspace:") + print(" Edit POSTMAN_WORKSPACE_ID in your .env file\n") + + except Exception as e: + raise Exception(f"Failed to list workspaces: {e}") + + +def main(): + parser = argparse.ArgumentParser( + description='List all accessible Postman workspaces' + ) + parser.add_argument( + '--detailed', + action='store_true', + help='Show detailed information for each workspace' + ) + + args = parser.parse_args() + + try: + list_workspaces(detailed=args.detailed) + except Exception as e: + print(format_error(e, "listing workspaces"), file=sys.stderr) + print("\n🔧 Troubleshooting:") + print(" • Run: python scripts/validate_setup.py") + print(" • Check: https://status.postman.com/") + print(" • Verify your API key has workspace access\n") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/manage_api.py b/scripts/manage_api.py new file mode 100755 index 0000000..317b918 --- /dev/null +++ b/scripts/manage_api.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 +""" +Generic API management with versions and schemas. +Demonstrates the full Design phase workflow: create, validate, version, and compare. + +Usage: + python manage_api.py --name="My API" --description="API description" + python manage_api.py --name="My API" --spec-file=path/to/openapi.json + python manage_api.py --help +""" + +import sys +import os +import json +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +def load_openapi_spec(file_path): + """Load OpenAPI specification from a file (JSON or YAML).""" + with open(file_path, 'r') as f: + if file_path.endswith('.yaml') or file_path.endswith('.yml'): + try: + import yaml + return yaml.safe_load(f) + except ImportError: + print("Error: PyYAML not installed. Install with: pip install pyyaml") + sys.exit(1) + else: + return json.load(f) + + +def create_api_with_spec(client, api_name, description, spec_data): + """Create an API with a version and schema from an OpenAPI spec.""" + + print(f"=== Creating API: {api_name} ===\n") + + # Step 1: Create the API + print("Step 1: Creating API...") + api_data = { + "name": api_name, + "summary": description, + "description": description + } + + try: + api = client.create_api(api_data) + api_id = api.get('id') + print(f"✓ API created successfully!") + print(f" ID: {api_id}") + print(f" Name: {api.get('name')}") + print() + except Exception as e: + print(f"✗ Error creating API: {e}") + return None + + # Step 2: Create version and add schema + version_name = spec_data.get('info', {}).get('version', '1.0.0') + print(f"Step 2: Creating version {version_name} with OpenAPI schema...") + + try: + # Create version + version_data = {"name": version_name} + version_response = client._make_request( + 'POST', + f"/apis/{api_id}/versions", + json={'version': version_data} + ) + version_id = version_response.get('version', {}).get('id') + print(f"✓ Version {version_name} created!") + print(f" ID: {version_id}") + + # Add schema to version + schema_data = { + "type": "openapi3", + "language": "json", + "schema": json.dumps(spec_data) + } + + schema_response = client._make_request( + 'POST', + f"/apis/{api_id}/versions/{version_id}/schemas", + json={'schema': schema_data} + ) + schema_id = schema_response.get('schema', {}).get('id') + + # Count paths/endpoints + paths_count = len(spec_data.get('paths', {})) + + print(f"✓ OpenAPI 3.0 schema added to v{version_name}!") + print(f" Schema ID: {schema_id}") + print(f" API Title: {spec_data.get('info', {}).get('title')}") + print(f" Endpoints: {paths_count}") + print() + except Exception as e: + print(f"✗ Error creating version or schema: {e}") + return None + + # Step 3: Validate the schema + print("Step 3: Validating schema...") + try: + schemas = client.get_api_schema(api_id, version_id) + if schemas: + schema = schemas[0] + schema_content = json.loads(schema.get('schema', '{}')) + + print("✓ Schema validation successful!") + print(f" Type: {schema.get('type')}") + print(f" Language: {schema.get('language')}") + print(f" API Title: {schema_content.get('info', {}).get('title')}") + print(f" API Version: {schema_content.get('info', {}).get('version')}") + print(f" Paths defined: {len(schema_content.get('paths', {}))}") + print(f" Schemas defined: {len(schema_content.get('components', {}).get('schemas', {}))}") + print() + else: + print("✗ No schema found") + return None + except Exception as e: + print(f"✗ Error validating schema: {e}") + return None + + print("=== API Created Successfully ===") + print(f"API ID: {api_id}") + print(f"Version ID: {version_id}") + print(f"View in Postman: https://postman.postman.co/workspace/apis/{api_id}") + print() + + return { + 'api_id': api_id, + 'version_id': version_id, + 'schema_id': schema_id + } + + +def main(): + """Main workflow for generic API management.""" + + parser = argparse.ArgumentParser( + description='Create and manage Postman APIs with OpenAPI specifications', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Create API from OpenAPI spec file + python manage_api.py --name="Payment API" --spec-file=openapi.json + + # Create API with description + python manage_api.py --name="User API" --description="User management API" --spec-file=spec.yaml + """ + ) + + parser.add_argument('--name', required=True, help='API name') + parser.add_argument('--description', help='API description (defaults to spec description)') + parser.add_argument('--spec-file', required=True, help='Path to OpenAPI spec file (JSON or YAML)') + parser.add_argument('--list', action='store_true', help='List all APIs in workspace') + parser.add_argument('--get', metavar='API_ID', help='Get details of a specific API') + + args = parser.parse_args() + + # Initialize client + client = PostmanClient() + + # List APIs + if args.list: + print("=== Listing APIs ===\n") + try: + apis = client.list_apis() + if not apis: + print("No APIs found in workspace") + else: + for i, api in enumerate(apis, 1): + print(f"{i}. {api.get('name')} (ID: {api.get('id')})") + if api.get('summary'): + print(f" {api.get('summary')}") + except Exception as e: + print(f"Error listing APIs: {e}") + return + + # Get API details + if args.get: + print(f"=== Getting API {args.get} ===\n") + try: + api = client.get_api(args.get) + print(f"Name: {api.get('name')}") + print(f"ID: {api.get('id')}") + print(f"Description: {api.get('description')}") + print(f"Created: {api.get('createdAt')}") + print(f"Updated: {api.get('updatedAt')}") + + # Get versions + versions = client.get_api_versions(args.get) + print(f"\nVersions: {len(versions)}") + for v in versions: + print(f" - {v.get('name')} (ID: {v.get('id')})") + except Exception as e: + print(f"Error getting API: {e}") + return + + # Create API with spec + if not args.spec_file: + print("Error: --spec-file is required when creating an API") + parser.print_help() + return + + # Load OpenAPI spec + if not os.path.exists(args.spec_file): + print(f"Error: Spec file not found: {args.spec_file}") + return + + try: + spec_data = load_openapi_spec(args.spec_file) + except Exception as e: + print(f"Error loading spec file: {e}") + return + + # Use description from spec if not provided + description = args.description or spec_data.get('info', {}).get('description', 'API managed by Postman') + + # Create the API + result = create_api_with_spec(client, args.name, description, spec_data) + + if result: + print("\nNext Steps:") + print(f" - Get API details: python {sys.argv[0]} --get {result['api_id']}") + print(f" - List all APIs: python {sys.argv[0]} --list") + print(f" - View in Postman: https://postman.postman.co/workspace/apis/{result['api_id']}") + + +if __name__ == '__main__': + main() diff --git a/scripts/manage_collection_workflow.py b/scripts/manage_collection_workflow.py new file mode 100755 index 0000000..bea01a5 --- /dev/null +++ b/scripts/manage_collection_workflow.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python3 +""" +Generic collection-based API workflow management. + +Demonstrates Design phase workflow using collections as the primary artifact. +Collections can be created from scratch, imported from OpenAPI, or duplicated. + +Usage: + python manage_collection_workflow.py create --name="My API" --description="API description" + python manage_collection_workflow.py import --name="My API" --spec-file=openapi.json + python manage_collection_workflow.py duplicate --collection-id= --new-name="Copy" + python manage_collection_workflow.py compare --id1= --id2= + python manage_collection_workflow.py --help +""" + +import sys +import os +import json +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +def create_basic_collection(name, description, base_url="https://api.example.com"): + """Create a basic Postman collection structure.""" + return { + "info": { + "name": name, + "description": description, + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "variable": [ + { + "key": "baseUrl", + "value": base_url, + "type": "string" + } + ], + "item": [] + } + + +def openapi_to_collection(spec_data, collection_name): + """Convert OpenAPI specification to Postman collection structure.""" + + # Get base URL from servers + base_url = "https://api.example.com" + if 'servers' in spec_data and spec_data['servers']: + base_url = spec_data['servers'][0].get('url', base_url) + + collection = { + "info": { + "name": collection_name, + "description": spec_data.get('info', {}).get('description', ''), + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "variable": [ + { + "key": "baseUrl", + "value": base_url, + "type": "string" + } + ], + "item": [] + } + + # Convert paths to collection items + for path, methods in spec_data.get('paths', {}).items(): + for method, operation in methods.items(): + if method.upper() not in ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS']: + continue + + # Build request + request = { + "name": operation.get('summary', f"{method.upper()} {path}"), + "request": { + "method": method.upper(), + "header": [], + "url": { + "raw": f"{{{{baseUrl}}}}{path}", + "host": ["{{baseUrl}}"], + "path": [p for p in path.split('/') if p] + }, + "description": operation.get('description', '') + }, + "response": [] + } + + # Add query parameters + if 'parameters' in operation: + query = [] + for param in operation['parameters']: + if param.get('in') == 'query': + query.append({ + "key": param.get('name'), + "value": "", + "description": param.get('description', '') + }) + if query: + request['request']['url']['query'] = query + + # Add request body if present + if 'requestBody' in operation: + request['request']['header'].append({ + "key": "Content-Type", + "value": "application/json" + }) + request['request']['body'] = { + "mode": "raw", + "raw": "{}" + } + + collection['item'].append(request) + + return collection + + +def compare_collections(client, id1, id2): + """Compare two collections and identify differences.""" + + print(f"=== Comparing Collections ===\n") + + try: + col1 = client.get_collection(id1) + col2 = client.get_collection(id2) + + name1 = col1.get('info', {}).get('name', 'Collection 1') + name2 = col2.get('info', {}).get('name', 'Collection 2') + + # Count requests + def count_requests(collection): + count = 0 + for item in collection.get('item', []): + if 'request' in item: + count += 1 + elif 'item' in item: # Folder + count += len([i for i in item['item'] if 'request' in i]) + return count + + count1 = count_requests(col1) + count2 = count_requests(col2) + + # Extract request names + def get_request_names(collection): + names = [] + for item in collection.get('item', []): + if 'request' in item: + names.append(item.get('name')) + elif 'item' in item: # Folder + names.extend([i.get('name') for i in item['item'] if 'request' in i]) + return names + + names1 = set(get_request_names(col1)) + names2 = set(get_request_names(col2)) + + added = names2 - names1 + removed = names1 - names2 + common = names1 & names2 + + print(f"Collection 1: {name1}") + print(f" ID: {id1}") + print(f" Requests: {count1}") + print() + + print(f"Collection 2: {name2}") + print(f" ID: {id2}") + print(f" Requests: {count2}") + print() + + print("=== Differences ===") + print() + + if added: + print(f"✓ Added in Collection 2 ({len(added)} requests):") + for name in sorted(added): + print(f" + {name}") + print() + + if removed: + print(f"⚠ Removed from Collection 1 ({len(removed)} requests):") + for name in sorted(removed): + print(f" - {name}") + print() + + if common: + print(f"Unchanged: {len(common)} requests appear in both collections") + print() + + # Breaking changes analysis + if not removed: + print("✓ No breaking changes - all Collection 1 requests preserved") + else: + print("⚠ Breaking changes detected - removed requests may affect workflows") + + except Exception as e: + print(f"✗ Error comparing collections: {e}") + + +def main(): + """Main workflow for collection management.""" + + parser = argparse.ArgumentParser( + description='Create and manage Postman collections', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Create a basic collection + python manage_collection_workflow.py create --name="My API" --description="API description" + + # Import collection from OpenAPI spec + python manage_collection_workflow.py import --name="My API" --spec-file=openapi.json + + # Duplicate a collection + python manage_collection_workflow.py duplicate --collection-id=abc-123 --new-name="Copy" + + # Compare two collections + python manage_collection_workflow.py compare --id1=abc-123 --id2=def-456 + """ + ) + + subparsers = parser.add_subparsers(dest='command', help='Command to execute') + + # Create command + create_parser = subparsers.add_parser('create', help='Create a basic collection') + create_parser.add_argument('--name', required=True, help='Collection name') + create_parser.add_argument('--description', required=True, help='Collection description') + create_parser.add_argument('--base-url', default='https://api.example.com', help='Base URL for API') + + # Import command + import_parser = subparsers.add_parser('import', help='Import collection from OpenAPI spec') + import_parser.add_argument('--name', required=True, help='Collection name') + import_parser.add_argument('--spec-file', required=True, help='Path to OpenAPI spec file') + + # Duplicate command + dup_parser = subparsers.add_parser('duplicate', help='Duplicate an existing collection') + dup_parser.add_argument('--collection-id', required=True, help='Collection ID to duplicate') + dup_parser.add_argument('--new-name', required=True, help='Name for duplicated collection') + + # Compare command + cmp_parser = subparsers.add_parser('compare', help='Compare two collections') + cmp_parser.add_argument('--id1', required=True, help='First collection ID') + cmp_parser.add_argument('--id2', required=True, help='Second collection ID') + + # List command + list_parser = subparsers.add_parser('list', help='List all collections') + + args = parser.parse_args() + + if not args.command: + parser.print_help() + return + + # Initialize client + client = PostmanClient() + + # Execute command + if args.command == 'create': + print(f"=== Creating Collection: {args.name} ===\n") + + collection = create_basic_collection(args.name, args.description, args.base_url) + + try: + result = client.create_collection(collection) + collection_id = result.get('uid') + print(f"✓ Collection created successfully!") + print(f" ID: {collection_id}") + print(f" Name: {result.get('name')}") + print() + print("Note: This is a basic empty collection.") + print("Add requests manually in Postman or use 'import' command to create from OpenAPI spec.") + print() + print(f"View in Postman: https://postman.postman.co/collections/{collection_id}") + except Exception as e: + print(f"✗ Error creating collection: {e}") + + elif args.command == 'import': + print(f"=== Importing Collection from OpenAPI Spec ===\n") + + if not os.path.exists(args.spec_file): + print(f"Error: Spec file not found: {args.spec_file}") + return + + try: + # Load spec + with open(args.spec_file, 'r') as f: + if args.spec_file.endswith('.yaml') or args.spec_file.endswith('.yml'): + try: + import yaml + spec_data = yaml.safe_load(f) + except ImportError: + print("Error: PyYAML not installed. Install with: pip install pyyaml") + return + else: + spec_data = json.load(f) + + # Convert to collection + collection = openapi_to_collection(spec_data, args.name) + + # Create collection + result = client.create_collection(collection) + collection_id = result.get('uid') + print(f"✓ Collection imported successfully!") + print(f" ID: {collection_id}") + print(f" Name: {result.get('name')}") + print(f" Requests: {len(collection['item'])}") + print() + print(f"View in Postman: https://postman.postman.co/collections/{collection_id}") + except Exception as e: + print(f"✗ Error importing collection: {e}") + + elif args.command == 'duplicate': + print(f"=== Duplicating Collection ===\n") + + try: + result = client.duplicate_collection(args.collection_id, args.new_name) + new_id = result.get('uid') + print(f"✓ Collection duplicated successfully!") + print(f" Original ID: {args.collection_id}") + print(f" New ID: {new_id}") + print(f" New Name: {args.new_name}") + print() + print(f"View in Postman: https://postman.postman.co/collections/{new_id}") + except Exception as e: + print(f"✗ Error duplicating collection: {e}") + + elif args.command == 'compare': + compare_collections(client, args.id1, args.id2) + + elif args.command == 'list': + print("=== Listing Collections ===\n") + try: + collections = client.list_collections() + if not collections: + print("No collections found in workspace") + else: + print(f"Found {len(collections)} collection(s):") + for i, col in enumerate(collections, 1): + print(f" {i}. {col.get('name')} (ID: {col.get('uid')})") + except Exception as e: + print(f"✗ Error listing collections: {e}") + + +if __name__ == '__main__': + main() diff --git a/scripts/manage_collections.py b/scripts/manage_collections.py new file mode 100644 index 0000000..8b622fd --- /dev/null +++ b/scripts/manage_collections.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +""" +Manage Postman collections: create, update, delete, and get details. +""" + +import sys +import os +import json +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.config import PostmanConfig +from scripts.postman_client import PostmanClient + + +def create_minimal_collection(name, description=""): + """ + Create a minimal collection structure. + + Args: + name: Collection name + description: Collection description + + Returns: + Dictionary with minimal collection structure + """ + return { + "info": { + "name": name, + "description": description, + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [] + } + + +def add_request_to_collection(collection_data, request_name, method, url, description=""): + """ + Add a request to a collection. + + Args: + collection_data: Collection dictionary + request_name: Name of the request + method: HTTP method (GET, POST, etc.) + url: Request URL + description: Request description + + Returns: + Updated collection data + """ + request = { + "name": request_name, + "request": { + "method": method, + "header": [], + "url": { + "raw": url, + "host": url.split("://")[1].split("/")[0].split(".") if "://" in url else [], + "path": url.split("://")[1].split("/")[1:] if "://" in url and "/" in url.split("://")[1] else [] + }, + "description": description + } + } + + collection_data["item"].append(request) + return collection_data + + +def main(): + parser = argparse.ArgumentParser( + description='Manage Postman collections', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all collections + python manage_collections.py --list + + # Get collection details + python manage_collections.py --get + + # Create a new collection + python manage_collections.py --create --name "My API Tests" --description "API test collection" + + # Create a collection with a request + python manage_collections.py --create --name "My API" --add-request '{"name": "Get Users", "method": "GET", "url": "https://api.example.com/users"}' + + # Update a collection name + python manage_collections.py --update --name "New Name" + + # Delete a collection + python manage_collections.py --delete + + # Duplicate a collection + python manage_collections.py --duplicate --name "Copy of Collection" + """ + ) + + # Action arguments + parser.add_argument('--list', action='store_true', + help='List all collections') + parser.add_argument('--get', metavar='COLLECTION_ID', + help='Get detailed information about a collection') + parser.add_argument('--create', action='store_true', + help='Create a new collection') + parser.add_argument('--update', metavar='COLLECTION_ID', + help='Update an existing collection') + parser.add_argument('--delete', metavar='COLLECTION_ID', + help='Delete a collection') + parser.add_argument('--duplicate', metavar='COLLECTION_ID', + help='Duplicate an existing collection') + + # Collection data arguments + parser.add_argument('--name', help='Collection name') + parser.add_argument('--description', help='Collection description', default='') + parser.add_argument('--add-request', metavar='REQUEST_JSON', + help='Add a request to the collection (JSON format: {"name": "...", "method": "...", "url": "..."})') + parser.add_argument('--workspace', metavar='WORKSPACE_ID', + help='Workspace ID (overrides POSTMAN_WORKSPACE_ID env var)') + + args = parser.parse_args() + + # Validate arguments + action_count = sum([args.list, bool(args.get), args.create, bool(args.update), + bool(args.delete), bool(args.duplicate)]) + if action_count == 0: + parser.error("Please specify an action: --list, --get, --create, --update, --delete, or --duplicate") + if action_count > 1: + parser.error("Please specify only one action at a time") + + try: + # Initialize client + config = PostmanConfig() + if args.workspace: + config.workspace_id = args.workspace + client = PostmanClient(config) + + # Execute action + if args.list: + print("Fetching collections...") + collections = client.list_collections() + + if not collections: + print("\nNo collections found.") + return + + print(f"\nFound {len(collections)} collection(s):\n") + for i, collection in enumerate(collections, 1): + print(f"{i}. {collection.get('name', 'Unnamed')}") + print(f" UID: {collection.get('uid', 'N/A')}") + if collection.get('owner'): + print(f" Owner: {collection['owner']}") + print() + + elif args.get: + print(f"Fetching collection details for {args.get}...") + collection = client.get_collection(args.get) + + print(f"\nCollection: {collection.get('info', {}).get('name', 'Unnamed')}") + print(f"UID: {collection.get('info', {}).get('_postman_id', 'N/A')}") + print(f"Schema: {collection.get('info', {}).get('schema', 'N/A')}") + + if collection.get('info', {}).get('description'): + print(f"\nDescription:\n{collection['info']['description']}") + + items = collection.get('item', []) + if items: + print(f"\nRequests ({len(items)}):") + for item in items: + print(f" - {item.get('name', 'Unnamed')}") + if 'request' in item: + method = item['request'].get('method', 'N/A') + url = item['request'].get('url', {}) + if isinstance(url, dict): + url = url.get('raw', 'N/A') + print(f" {method} {url}") + else: + print("\nNo requests in this collection.") + + variables = collection.get('variable', []) + if variables: + print(f"\nVariables ({len(variables)}):") + for var in variables: + print(f" - {var.get('key', 'N/A')}: {var.get('value', 'N/A')}") + + elif args.create: + if not args.name: + parser.error("--name is required when creating a collection") + + print(f"Creating collection '{args.name}'...") + collection_data = create_minimal_collection(args.name, args.description) + + # Add request if specified + if args.add_request: + try: + request_data = json.loads(args.add_request) + collection_data = add_request_to_collection( + collection_data, + request_data['name'], + request_data['method'], + request_data['url'], + request_data.get('description', '') + ) + print(f" Added request: {request_data['name']}") + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in --add-request: {e}") + return 1 + except KeyError as e: + print(f"Error: Missing required field in request data: {e}") + return 1 + + result = client.create_collection(collection_data, workspace_id=args.workspace) + + print(f"\nCollection created successfully!") + print(f"Name: {result.get('name', 'N/A')}") + print(f"UID: {result.get('uid', 'N/A')}") + + elif args.update: + if not args.name: + parser.error("--name is required when updating a collection") + + print(f"Fetching current collection data...") + current_collection = client.get_collection(args.update) + + # Update the name + current_collection['info']['name'] = args.name + + # Update description if provided + if args.description: + current_collection['info']['description'] = args.description + + print(f"Updating collection '{args.name}'...") + result = client.update_collection(args.update, current_collection) + + print(f"\nCollection updated successfully!") + print(f"Name: {result.get('name', 'N/A')}") + print(f"UID: {result.get('uid', 'N/A')}") + + elif args.delete: + print(f"Deleting collection {args.delete}...") + + # Get collection name first + try: + collection = client.get_collection(args.delete) + name = collection.get('info', {}).get('name', args.delete) + except: + name = args.delete + + client.delete_collection(args.delete) + print(f"\nCollection '{name}' deleted successfully!") + + elif args.duplicate: + if not args.name: + parser.error("--name is required when duplicating a collection") + + print(f"Fetching collection to duplicate...") + source_collection = client.get_collection(args.duplicate) + + # Create a new collection data based on the source + new_collection = { + "info": { + "name": args.name, + "description": source_collection.get('info', {}).get('description', ''), + "schema": source_collection.get('info', {}).get('schema', + 'https://schema.getpostman.com/json/collection/v2.1.0/collection.json') + }, + "item": source_collection.get('item', []), + "variable": source_collection.get('variable', []) + } + + print(f"Creating duplicate collection '{args.name}'...") + result = client.create_collection(new_collection, workspace_id=args.workspace) + + print(f"\nCollection duplicated successfully!") + print(f"Original: {source_collection.get('info', {}).get('name', 'N/A')} ({args.duplicate})") + print(f"Duplicate: {result.get('name', 'N/A')} ({result.get('uid', 'N/A')})") + + return 0 + + except Exception as e: + print(f"\nError: {e}") + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/manage_environments.py b/scripts/manage_environments.py new file mode 100644 index 0000000..1386878 --- /dev/null +++ b/scripts/manage_environments.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 +""" +Manage Postman environments: create, update, delete, and get details. +""" + +import sys +import os +import json +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.config import PostmanConfig +from scripts.postman_client import PostmanClient + + +def create_minimal_environment(name, values=None): + """ + Create a minimal environment structure. + + Args: + name: Environment name + values: List of environment variables (dicts with key, value, type, enabled) + + Returns: + Dictionary with minimal environment structure + """ + return { + "name": name, + "values": values or [] + } + + +def add_variable_to_environment(environment_data, key, value, var_type="default", enabled=True): + """ + Add a variable to an environment. + + Args: + environment_data: Environment dictionary + key: Variable key/name + value: Variable value + var_type: Variable type (default, secret) + enabled: Whether the variable is enabled + + Returns: + Updated environment data + """ + variable = { + "key": key, + "value": value, + "type": var_type, + "enabled": enabled + } + + environment_data["values"].append(variable) + return environment_data + + +def main(): + parser = argparse.ArgumentParser( + description='Manage Postman environments', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all environments + python manage_environments.py --list + + # Get environment details + python manage_environments.py --get + + # Create a new environment + python manage_environments.py --create --name "Development" + + # Create an environment with variables + python manage_environments.py --create --name "Dev" --add-var '{"key": "API_URL", "value": "https://dev.api.com"}' + + # Add multiple variables (as JSON array) + python manage_environments.py --create --name "Staging" --variables '[{"key":"API_URL","value":"https://staging.api.com"},{"key":"API_KEY","value":"secret123","type":"secret"}]' + + # Update an environment name + python manage_environments.py --update --name "New Name" + + # Delete an environment + python manage_environments.py --delete + + # Duplicate an environment + python manage_environments.py --duplicate --name "Copy of Environment" + """ + ) + + # Action arguments + parser.add_argument('--list', action='store_true', + help='List all environments') + parser.add_argument('--get', metavar='ENVIRONMENT_ID', + help='Get detailed information about an environment') + parser.add_argument('--create', action='store_true', + help='Create a new environment') + parser.add_argument('--update', metavar='ENVIRONMENT_ID', + help='Update an existing environment') + parser.add_argument('--delete', metavar='ENVIRONMENT_ID', + help='Delete an environment') + parser.add_argument('--duplicate', metavar='ENVIRONMENT_ID', + help='Duplicate an existing environment') + + # Environment data arguments + parser.add_argument('--name', help='Environment name') + parser.add_argument('--add-var', metavar='VARIABLE_JSON', + help='Add a variable (JSON format: {"key": "...", "value": "...", "type": "default|secret"})') + parser.add_argument('--variables', metavar='VARIABLES_JSON', + help='Add multiple variables (JSON array format)') + parser.add_argument('--workspace', metavar='WORKSPACE_ID', + help='Workspace ID (overrides POSTMAN_WORKSPACE_ID env var)') + + args = parser.parse_args() + + # Validate arguments + action_count = sum([args.list, bool(args.get), args.create, bool(args.update), + bool(args.delete), bool(args.duplicate)]) + if action_count == 0: + parser.error("Please specify an action: --list, --get, --create, --update, --delete, or --duplicate") + if action_count > 1: + parser.error("Please specify only one action at a time") + + try: + # Initialize client + config = PostmanConfig() + if args.workspace: + config.workspace_id = args.workspace + client = PostmanClient(config) + + # Execute action + if args.list: + print("Fetching environments...") + environments = client.list_environments() + + if not environments: + print("\nNo environments found.") + return + + print(f"\nFound {len(environments)} environment(s):\n") + for i, env in enumerate(environments, 1): + print(f"{i}. {env.get('name', 'Unnamed')}") + print(f" UID: {env.get('uid', 'N/A')}") + if env.get('owner'): + print(f" Owner: {env['owner']}") + print() + + elif args.get: + print(f"Fetching environment details for {args.get}...") + environment = client.get_environment(args.get) + + print(f"\nEnvironment: {environment.get('name', 'Unnamed')}") + print(f"UID: {environment.get('uid', 'N/A')}") + + values = environment.get('values', []) + if values: + print(f"\nVariables ({len(values)}):") + for var in values: + key = var.get('key', 'N/A') + value = var.get('value', 'N/A') + var_type = var.get('type', 'default') + enabled = var.get('enabled', True) + + # Mask secret values + if var_type == 'secret': + value = '********' + + status = '✓' if enabled else '✗' + type_indicator = ' [secret]' if var_type == 'secret' else '' + print(f" {status} {key}: {value}{type_indicator}") + else: + print("\nNo variables in this environment.") + + elif args.create: + if not args.name: + parser.error("--name is required when creating an environment") + + print(f"Creating environment '{args.name}'...") + environment_data = create_minimal_environment(args.name) + + # Add single variable if specified + if args.add_var: + try: + var_data = json.loads(args.add_var) + environment_data = add_variable_to_environment( + environment_data, + var_data['key'], + var_data['value'], + var_data.get('type', 'default'), + var_data.get('enabled', True) + ) + print(f" Added variable: {var_data['key']}") + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in --add-var: {e}") + return 1 + except KeyError as e: + print(f"Error: Missing required field in variable data: {e}") + return 1 + + # Add multiple variables if specified + if args.variables: + try: + variables = json.loads(args.variables) + if not isinstance(variables, list): + print("Error: --variables must be a JSON array") + return 1 + + for var_data in variables: + environment_data = add_variable_to_environment( + environment_data, + var_data['key'], + var_data['value'], + var_data.get('type', 'default'), + var_data.get('enabled', True) + ) + print(f" Added variable: {var_data['key']}") + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in --variables: {e}") + return 1 + except KeyError as e: + print(f"Error: Missing required field in variable data: {e}") + return 1 + + result = client.create_environment(environment_data, workspace_id=args.workspace) + + print(f"\nEnvironment created successfully!") + print(f"Name: {result.get('name', 'N/A')}") + print(f"UID: {result.get('uid', 'N/A')}") + + elif args.update: + if not args.name: + parser.error("--name is required when updating an environment") + + print(f"Fetching current environment data...") + current_environment = client.get_environment(args.update) + + # Update the name + current_environment['name'] = args.name + + print(f"Updating environment '{args.name}'...") + result = client.update_environment(args.update, current_environment) + + print(f"\nEnvironment updated successfully!") + print(f"Name: {result.get('name', 'N/A')}") + print(f"UID: {result.get('uid', 'N/A')}") + + elif args.delete: + print(f"Deleting environment {args.delete}...") + + # Get environment name first + try: + environment = client.get_environment(args.delete) + name = environment.get('name', args.delete) + except: + name = args.delete + + client.delete_environment(args.delete) + print(f"\nEnvironment '{name}' deleted successfully!") + + elif args.duplicate: + if not args.name: + parser.error("--name is required when duplicating an environment") + + print(f"Fetching environment to duplicate...") + source_environment = client.get_environment(args.duplicate) + + # Create a new environment data based on the source + new_environment = { + "name": args.name, + "values": source_environment.get('values', []) + } + + print(f"Creating duplicate environment '{args.name}'...") + result = client.create_environment(new_environment, workspace_id=args.workspace) + + print(f"\nEnvironment duplicated successfully!") + print(f"Original: {source_environment.get('name', 'N/A')} ({args.duplicate})") + print(f"Duplicate: {result.get('name', 'N/A')} ({result.get('uid', 'N/A')})") + + return 0 + + except Exception as e: + print(f"\nError: {e}") + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/manage_mocks.py b/scripts/manage_mocks.py new file mode 100755 index 0000000..a550fba --- /dev/null +++ b/scripts/manage_mocks.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +""" +Manage Postman mock servers. + +Mock servers allow you to simulate API behavior for development and testing. +They can be created from collections and respond with example data. + +Usage: + python manage_mocks.py --list + python manage_mocks.py --get + python manage_mocks.py --create --name="My Mock" --collection= + python manage_mocks.py --delete + python manage_mocks.py --help +""" + +import sys +import os +import argparse +import json + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig +from utils.formatters import format_table, format_json + + +def list_mocks(client): + """List all mock servers in workspace.""" + print("=== Mock Servers ===\n") + + try: + mocks = client.list_mocks() + + if not mocks: + print("No mock servers found in workspace") + return + + print(f"Found {len(mocks)} mock server(s):\n") + + for i, mock in enumerate(mocks, 1): + print(f"{i}. {mock.get('name', 'Unnamed Mock')}") + print(f" ID: {mock.get('id')}") + print(f" Mock URL: {mock.get('mockUrl', 'N/A')}") + print(f" Collection: {mock.get('collection', 'N/A')}") + if mock.get('environment'): + print(f" Environment: {mock.get('environment')}") + print(f" Private: {mock.get('private', False)}") + print() + + except Exception as e: + print(f"Error listing mock servers: {e}") + sys.exit(1) + + +def get_mock(client, mock_id): + """Get detailed information about a mock server.""" + print(f"=== Mock Server Details ===\n") + + try: + mock = client.get_mock(mock_id) + + print(f"Name: {mock.get('name', 'Unnamed Mock')}") + print(f"ID: {mock.get('id')}") + print(f"Mock URL: {mock.get('mockUrl', 'N/A')}") + print(f"Collection ID: {mock.get('collection', 'N/A')}") + + if mock.get('environment'): + print(f"Environment ID: {mock.get('environment')}") + + print(f"Private: {mock.get('private', False)}") + + if mock.get('config'): + config = mock.get('config', {}) + print(f"\nConfiguration:") + if config.get('headers'): + print(f" Headers: {len(config.get('headers', []))} configured") + if config.get('delay'): + print(f" Delay: {config.get('delay')}ms") + + print(f"\nCreated: {mock.get('createdAt', 'N/A')}") + print(f"Updated: {mock.get('updatedAt', 'N/A')}") + + print(f"\n📋 Mock URL: {mock.get('mockUrl', 'N/A')}") + print(" Use this URL to make requests to your mock server") + + except Exception as e: + print(f"Error getting mock server: {e}") + sys.exit(1) + + +def create_mock(client, name, collection_id, environment_id=None, private=False, delay=None): + """Create a new mock server.""" + print(f"=== Creating Mock Server: {name} ===\n") + + try: + # Build mock data + mock_data = { + "name": name, + "collection": collection_id, + "private": private + } + + if environment_id: + mock_data["environment"] = environment_id + + if delay is not None: + mock_data["config"] = { + "delay": { + "type": "fixed", + "value": delay + } + } + + mock = client.create_mock(mock_data) + + print(f"✓ Mock server created successfully!") + print(f" Name: {mock.get('name')}") + print(f" ID: {mock.get('id')}") + print(f" Mock URL: {mock.get('mockUrl')}") + print(f" Collection: {collection_id}") + + if environment_id: + print(f" Environment: {environment_id}") + + print(f" Private: {private}") + + if delay: + print(f" Response Delay: {delay}ms") + + print(f"\n📋 Mock URL: {mock.get('mockUrl')}") + print(" Use this URL to make requests to your mock server") + print("\nExample:") + print(f" curl {mock.get('mockUrl')}/your-endpoint") + + except Exception as e: + print(f"Error creating mock server: {e}") + sys.exit(1) + + +def delete_mock(client, mock_id): + """Delete a mock server.""" + print(f"=== Deleting Mock Server {mock_id} ===\n") + + try: + # Get mock details first + mock = client.get_mock(mock_id) + print(f"Mock to delete: {mock.get('name', 'Unnamed Mock')}") + print(f"Mock URL: {mock.get('mockUrl', 'N/A')}\n") + + # Confirm deletion + response = input("Are you sure you want to delete this mock server? (yes/no): ") + if response.lower() not in ['yes', 'y']: + print("Deletion cancelled") + return + + client.delete_mock(mock_id) + + print(f"✓ Mock server deleted successfully!") + + except Exception as e: + print(f"Error deleting mock server: {e}") + sys.exit(1) + + +def update_mock(client, mock_id, name=None, private=None): + """Update a mock server.""" + print(f"=== Updating Mock Server {mock_id} ===\n") + + try: + # Build update data + update_data = {} + + if name: + update_data["name"] = name + + if private is not None: + update_data["private"] = private + + if not update_data: + print("No updates specified. Use --name or --private flags.") + return + + mock = client.update_mock(mock_id, update_data) + + print(f"✓ Mock server updated successfully!") + print(f" Name: {mock.get('name')}") + print(f" ID: {mock.get('id')}") + print(f" Mock URL: {mock.get('mockUrl')}") + print(f" Private: {mock.get('private', False)}") + + except Exception as e: + print(f"Error updating mock server: {e}") + sys.exit(1) + + +def main(): + """Main entry point for mock server management.""" + + parser = argparse.ArgumentParser( + description='Manage Postman mock servers', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all mock servers + python manage_mocks.py --list + + # Get mock server details + python manage_mocks.py --get abc-123 + + # Create a mock server + python manage_mocks.py --create --name="Payment Mock" --collection=col-456 + + # Create a private mock with delay + python manage_mocks.py --create --name="Test Mock" --collection=col-456 --private --delay=1000 + + # Update mock server + python manage_mocks.py --update abc-123 --name="New Name" --private + + # Delete mock server + python manage_mocks.py --delete abc-123 + """ + ) + + # Operation flags + parser.add_argument('--list', action='store_true', help='List all mock servers') + parser.add_argument('--get', metavar='MOCK_ID', help='Get mock server details') + parser.add_argument('--create', action='store_true', help='Create a new mock server') + parser.add_argument('--update', metavar='MOCK_ID', help='Update a mock server') + parser.add_argument('--delete', metavar='MOCK_ID', help='Delete a mock server') + + # Create/Update parameters + parser.add_argument('--name', help='Mock server name') + parser.add_argument('--collection', help='Collection ID to mock') + parser.add_argument('--environment', help='Environment ID (optional)') + parser.add_argument('--private', action='store_true', help='Make mock server private') + parser.add_argument('--delay', type=int, help='Response delay in milliseconds') + + args = parser.parse_args() + + # Check if any operation is specified + if not any([args.list, args.get, args.create, args.update, args.delete]): + parser.print_help() + return + + # Initialize client + client = PostmanClient() + + # Execute operations + if args.list: + list_mocks(client) + + elif args.get: + get_mock(client, args.get) + + elif args.create: + if not args.name or not args.collection: + print("Error: --name and --collection are required for creating a mock server") + parser.print_help() + sys.exit(1) + + create_mock( + client, + args.name, + args.collection, + args.environment, + args.private, + args.delay + ) + + elif args.update: + update_mock(client, args.update, args.name, args.private) + + elif args.delete: + delete_mock(client, args.delete) + + +if __name__ == '__main__': + main() diff --git a/scripts/manage_monitors.py b/scripts/manage_monitors.py new file mode 100755 index 0000000..19e2a9b --- /dev/null +++ b/scripts/manage_monitors.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python3 +""" +Manage Postman Monitors - Create, Update, Delete, and Analyze +Part of Phase 6: Observe - Monitor Operations +""" + +import sys +import os +import argparse +import json +from datetime import datetime + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.config import get_config +from scripts.postman_client import PostmanClient +from utils.formatters import format_error + + +def list_monitors(client, verbose=False): + """List all monitors in the workspace""" + try: + monitors = client.list_monitors() + + if not monitors: + print("No monitors found in this workspace.") + return + + print(f"Found {len(monitors)} monitor(s)") + print("=" * 80) + print() + + for monitor in monitors: + status = "✓ Active" if monitor.get('active', False) else "✗ Inactive" + print(f"{status} {monitor.get('name', 'Unnamed Monitor')}") + print(f" ID: {monitor.get('id')}") + print(f" UID: {monitor.get('uid')}") + + if verbose: + if monitor.get('collectionUid'): + print(f" Collection: {monitor.get('collectionUid')}") + if monitor.get('environmentUid'): + print(f" Environment: {monitor.get('environmentUid')}") + if monitor.get('schedule'): + schedule = monitor.get('schedule', {}) + print(f" Schedule: {schedule.get('cron', 'Not set')}") + if monitor.get('lastRun'): + print(f" Last Run: {monitor.get('lastRun', {}).get('finishedAt', 'Never')}") + + print() + + except Exception as e: + print(format_error("listing monitors", e)) + sys.exit(1) + + +def get_monitor_details(client, monitor_id): + """Get detailed information about a specific monitor""" + try: + monitor = client.get_monitor(monitor_id) + + if not monitor: + print(f"Monitor {monitor_id} not found.") + sys.exit(1) + + print("Monitor Details") + print("=" * 80) + print() + print(f"Name: {monitor.get('name', 'Unnamed')}") + print(f"ID: {monitor.get('id')}") + print(f"UID: {monitor.get('uid')}") + print(f"Status: {'Active' if monitor.get('active', False) else 'Inactive'}") + print() + + print("Configuration:") + print(f" Collection: {monitor.get('collection')}") + if monitor.get('environment'): + print(f" Environment: {monitor.get('environment')}") + + if monitor.get('schedule'): + schedule = monitor.get('schedule', {}) + print(f" Schedule: {schedule.get('cron', 'Not configured')}") + print(f" Timezone: {schedule.get('timezone', 'UTC')}") + + print() + + if monitor.get('lastRun'): + last_run = monitor.get('lastRun', {}) + print("Last Run:") + print(f" Status: {last_run.get('status', 'Unknown')}") + print(f" Started: {last_run.get('startedAt', 'N/A')}") + print(f" Finished: {last_run.get('finishedAt', 'N/A')}") + + stats = last_run.get('stats', {}) + if stats: + print(f" Assertions: {stats.get('assertions', {}).get('total', 0)} total, " + f"{stats.get('assertions', {}).get('failed', 0)} failed") + + print() + + except Exception as e: + print(format_error("getting monitor details", e)) + sys.exit(1) + + +def create_monitor(client, name, collection_uid, environment_uid=None, schedule_cron=None): + """Create a new monitor""" + try: + monitor_data = { + 'name': name, + 'collection': collection_uid + } + + if environment_uid: + monitor_data['environment'] = environment_uid + + if schedule_cron: + monitor_data['schedule'] = { + 'cron': schedule_cron, + 'timezone': 'UTC' + } + + print(f"Creating monitor '{name}'...") + monitor = client.create_monitor(monitor_data) + + print("✓ Monitor created successfully!") + print(f" Name: {monitor.get('name')}") + print(f" ID: {monitor.get('id')}") + print(f" UID: {monitor.get('uid')}") + print() + + except Exception as e: + print(format_error("creating monitor", e)) + sys.exit(1) + + +def update_monitor(client, monitor_id, name=None, active=None, schedule_cron=None): + """Update an existing monitor""" + try: + monitor_data = {} + + if name: + monitor_data['name'] = name + if active is not None: + monitor_data['active'] = active + if schedule_cron: + monitor_data['schedule'] = { + 'cron': schedule_cron, + 'timezone': 'UTC' + } + + if not monitor_data: + print("No updates specified.") + return + + print(f"Updating monitor {monitor_id}...") + monitor = client.update_monitor(monitor_id, monitor_data) + + print("✓ Monitor updated successfully!") + print(f" Name: {monitor.get('name')}") + print(f" Status: {'Active' if monitor.get('active', False) else 'Inactive'}") + print() + + except Exception as e: + print(format_error("updating monitor", e)) + sys.exit(1) + + +def delete_monitor(client, monitor_id, confirm=False): + """Delete a monitor""" + try: + if not confirm: + print(f"Are you sure you want to delete monitor {monitor_id}?") + print("Use --confirm to proceed.") + sys.exit(1) + + print(f"Deleting monitor {monitor_id}...") + client.delete_monitor(monitor_id) + + print("✓ Monitor deleted successfully!") + print() + + except Exception as e: + print(format_error("deleting monitor", e)) + sys.exit(1) + + +def analyze_monitor_runs(client, monitor_id, limit=10): + """Analyze monitor run history""" + try: + runs = client.get_monitor_runs(monitor_id, limit=limit) + + if not runs: + print(f"No run history found for monitor {monitor_id}.") + return + + print(f"Monitor Run History (Last {len(runs)} runs)") + print("=" * 80) + print() + + total_runs = len(runs) + successful_runs = sum(1 for run in runs if run.get('status') == 'success') + failed_runs = total_runs - successful_runs + + print(f"Summary:") + print(f" Total Runs: {total_runs}") + print(f" Successful: {successful_runs} ({successful_runs/total_runs*100:.1f}%)") + print(f" Failed: {failed_runs} ({failed_runs/total_runs*100:.1f}%)") + print() + + print("Recent Runs:") + print("-" * 80) + + for i, run in enumerate(runs, 1): + status = run.get('status', 'unknown') + status_icon = "✓" if status == 'success' else "✗" + + started = run.get('startedAt', 'N/A') + finished = run.get('finishedAt', 'N/A') + + # Calculate duration if both timestamps available + duration = "N/A" + if started != 'N/A' and finished != 'N/A': + try: + start_dt = datetime.fromisoformat(started.replace('Z', '+00:00')) + finish_dt = datetime.fromisoformat(finished.replace('Z', '+00:00')) + duration_seconds = (finish_dt - start_dt).total_seconds() + duration = f"{duration_seconds:.1f}s" + except: + pass + + print(f"{i}. {status_icon} {status.upper()}") + print(f" Started: {started}") + print(f" Duration: {duration}") + + stats = run.get('stats', {}) + if stats: + assertions = stats.get('assertions', {}) + requests = stats.get('requests', {}) + print(f" Requests: {requests.get('total', 0)} total, {requests.get('failed', 0)} failed") + print(f" Assertions: {assertions.get('total', 0)} total, {assertions.get('failed', 0)} failed") + + print() + + except Exception as e: + print(format_error("analyzing monitor runs", e)) + sys.exit(1) + + +def main(): + parser = argparse.ArgumentParser( + description='Manage Postman Monitors', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # List all monitors + python manage_monitors.py --list + + # Get monitor details + python manage_monitors.py --details + + # Create a monitor + python manage_monitors.py --create --name "API Health Check" --collection + + # Update a monitor + python manage_monitors.py --update --name "New Name" --activate + + # Delete a monitor + python manage_monitors.py --delete --confirm + + # Analyze monitor runs + python manage_monitors.py --analyze --limit 20 + """ + ) + + # Actions + parser.add_argument('--list', action='store_true', help='List all monitors') + parser.add_argument('--details', metavar='ID', help='Get monitor details') + parser.add_argument('--create', action='store_true', help='Create a new monitor') + parser.add_argument('--update', metavar='ID', help='Update a monitor') + parser.add_argument('--delete', metavar='ID', help='Delete a monitor') + parser.add_argument('--analyze', metavar='ID', help='Analyze monitor run history') + + # Options + parser.add_argument('--name', help='Monitor name') + parser.add_argument('--collection', help='Collection UID') + parser.add_argument('--environment', help='Environment UID') + parser.add_argument('--schedule', help='Cron schedule (e.g., "0 */6 * * *" for every 6 hours)') + parser.add_argument('--activate', action='store_true', help='Activate the monitor') + parser.add_argument('--deactivate', action='store_true', help='Deactivate the monitor') + parser.add_argument('--confirm', action='store_true', help='Confirm deletion') + parser.add_argument('--limit', type=int, default=10, help='Number of runs to analyze (default: 10)') + parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output') + + args = parser.parse_args() + + # Validate arguments + if not any([args.list, args.details, args.create, args.update, args.delete, args.analyze]): + parser.print_help() + sys.exit(1) + + # Get configuration and create client + config = get_config() + client = PostmanClient(config) + + # Execute action + if args.list: + list_monitors(client, verbose=args.verbose) + + elif args.details: + get_monitor_details(client, args.details) + + elif args.create: + if not args.name or not args.collection: + print("Error: --name and --collection are required for creating a monitor") + sys.exit(1) + create_monitor(client, args.name, args.collection, args.environment, args.schedule) + + elif args.update: + active = None + if args.activate: + active = True + elif args.deactivate: + active = False + update_monitor(client, args.update, args.name, active, args.schedule) + + elif args.delete: + delete_monitor(client, args.delete, args.confirm) + + elif args.analyze: + analyze_monitor_runs(client, args.analyze, args.limit) + + +if __name__ == '__main__': + main() diff --git a/scripts/manage_spec.py b/scripts/manage_spec.py new file mode 100755 index 0000000..8dd9a8d --- /dev/null +++ b/scripts/manage_spec.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +""" +Generic Spec Hub management for OpenAPI and AsyncAPI specifications. + +Spec Hub provides direct specification management with support for: +- OpenAPI 3.0 and AsyncAPI 2.0 +- Single-file and multi-file specifications +- Bidirectional collection generation +- Better version control integration + +Usage: + python manage_spec.py create --name="My API" --file=openapi.json + python manage_spec.py list + python manage_spec.py get --spec-id= + python manage_spec.py generate-collection --spec-id= --collection-name="My Collection" + python manage_spec.py --help +""" + +import sys +import os +import json +import argparse + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +def load_spec_file(file_path): + """Load specification from a file (JSON or YAML).""" + with open(file_path, 'r') as f: + content = f.read() + + # Try to detect format + if file_path.endswith('.yaml') or file_path.endswith('.yml'): + try: + import yaml + # Verify it's valid YAML and return as string + yaml.safe_load(content) + return content, 'yaml' + except ImportError: + print("Warning: PyYAML not installed. Treating as JSON.") + return content, 'json' + else: + # Verify it's valid JSON + json.loads(content) + return content, 'json' + + +def create_spec(client, name, description, file_path): + """Create a specification in Spec Hub.""" + + print(f"=== Creating Spec: {name} ===\n") + + # Load the spec file + try: + spec_content, file_format = load_spec_file(file_path) + file_name = f"spec.{'yaml' if file_format == 'yaml' else 'json'}" + except Exception as e: + print(f"✗ Error loading spec file: {e}") + return None + + # Prepare spec data + spec_data = { + "name": name, + "description": description or f"API specification: {name}", + "files": [ + { + "path": file_name, + "content": spec_content, + "root": True + } + ] + } + + try: + spec = client.create_spec(spec_data) + spec_id = spec.get('id') + print(f"✓ Specification created successfully in Spec Hub!") + print(f" Spec ID: {spec_id}") + print(f" Name: {spec.get('name')}") + print(f" Files: {len(spec.get('files', []))}") + print() + return spec_id + except Exception as e: + print(f"✗ Error creating specification: {e}") + return None + + +def get_spec(client, spec_id): + """Retrieve and display specification details.""" + + print(f"=== Getting Spec {spec_id} ===\n") + + try: + spec = client.get_spec(spec_id) + files = spec.get('files', []) + + print(f"✓ Specification retrieved successfully!") + print(f" Name: {spec.get('name')}") + print(f" Description: {spec.get('description')}") + print(f" Files: {len(files)}") + print() + + # Show file details + for file_obj in files: + root_marker = "[ROOT] " if file_obj.get('root') else "" + size = len(file_obj.get('content', '')) + print(f" - {root_marker}{file_obj.get('path')} ({size:,} bytes)") + + # Parse and show spec details if JSON + if files: + first_file = files[0] + try: + spec_parsed = json.loads(first_file.get('content', '{}')) + print(f"\n Spec Type: {spec_parsed.get('openapi', spec_parsed.get('asyncapi', 'Unknown'))}") + if 'info' in spec_parsed: + print(f" Title: {spec_parsed.get('info', {}).get('title')}") + print(f" Version: {spec_parsed.get('info', {}).get('version')}") + if 'paths' in spec_parsed: + print(f" Endpoints: {len(spec_parsed.get('paths', {}))}") + if 'components' in spec_parsed: + print(f" Schemas: {len(spec_parsed.get('components', {}).get('schemas', {}))}") + except: + pass # Not JSON or can't parse + print() + except Exception as e: + print(f"✗ Error retrieving specification: {e}") + + +def list_specs(client, limit=10): + """List all specifications in workspace.""" + + print("=== Listing Specifications ===\n") + + try: + specs = client.list_specs(limit=limit) + if not specs: + print("No specifications found in workspace") + else: + print(f"Found {len(specs)} specification(s):") + for i, s in enumerate(specs, 1): + print(f" {i}. {s.get('name')}") + print(f" ID: {s.get('id')}") + if s.get('description'): + print(f" {s.get('description')}") + print() + except Exception as e: + print(f"✗ Error listing specifications: {e}") + + +def generate_collection_from_spec(client, spec_id, collection_name): + """Generate a Postman collection from a specification.""" + + print(f"=== Generating Collection from Spec {spec_id} ===\n") + + try: + result = client.generate_collection_from_spec( + spec_id, + collection_name=collection_name + ) + + status = result.get('status', 'unknown') + print(f"✓ Collection generation initiated!") + print(f" Status: {status}") + + if status == 'completed' or 'data' in result: + data = result.get('data', {}) + collection_id = data.get('collectionId') + if collection_id: + print(f" Collection ID: {collection_id}") + print(f" Collection Name: {collection_name}") + print(f" View in Postman: https://postman.postman.co/collections/{collection_id}") + elif status == 'pending': + print(f" Generation is in progress...") + print(f" Check the spec later to see generated collections") + print() + except Exception as e: + print(f"✗ Error generating collection: {e}") + print(" Note: Collection generation may be asynchronous") + + +def main(): + """Main workflow for Spec Hub management.""" + + parser = argparse.ArgumentParser( + description='Create and manage API specifications in Postman Spec Hub', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Create a spec from file + python manage_spec.py create --name="Payment API" --file=openapi.json + + # List all specs + python manage_spec.py list + + # Get spec details + python manage_spec.py get --spec-id=abc-123 + + # Generate collection from spec + python manage_spec.py generate-collection --spec-id=abc-123 --collection-name="My API Collection" + """ + ) + + subparsers = parser.add_subparsers(dest='command', help='Command to execute') + + # Create command + create_parser = subparsers.add_parser('create', help='Create a new specification') + create_parser.add_argument('--name', required=True, help='Specification name') + create_parser.add_argument('--description', help='Specification description') + create_parser.add_argument('--file', required=True, help='Path to OpenAPI/AsyncAPI spec file') + + # List command + list_parser = subparsers.add_parser('list', help='List all specifications') + list_parser.add_argument('--limit', type=int, default=10, help='Maximum number of specs to return') + + # Get command + get_parser = subparsers.add_parser('get', help='Get specification details') + get_parser.add_argument('--spec-id', required=True, help='Specification ID') + + # Generate collection command + gen_parser = subparsers.add_parser('generate-collection', help='Generate collection from spec') + gen_parser.add_argument('--spec-id', required=True, help='Specification ID') + gen_parser.add_argument('--collection-name', required=True, help='Name for generated collection') + + args = parser.parse_args() + + if not args.command: + parser.print_help() + return + + # Initialize client + client = PostmanClient() + + # Execute command + if args.command == 'create': + if not os.path.exists(args.file): + print(f"Error: Spec file not found: {args.file}") + return + + spec_id = create_spec(client, args.name, args.description, args.file) + + if spec_id: + print("=== Spec Created Successfully ===") + print(f"Spec ID: {spec_id}") + print() + print("Next Steps:") + print(f" - View details: python {sys.argv[0]} get --spec-id {spec_id}") + print(f" - Generate collection: python {sys.argv[0]} generate-collection --spec-id {spec_id} --collection-name='My Collection'") + print(f" - View in Postman: https://postman.postman.co/workspace/specs") + + elif args.command == 'list': + list_specs(client, args.limit) + + elif args.command == 'get': + get_spec(client, args.spec_id) + + elif args.command == 'generate-collection': + generate_collection_from_spec(client, args.spec_id, args.collection_name) + + +if __name__ == '__main__': + main() diff --git a/scripts/postman_client.py b/scripts/postman_client.py new file mode 100644 index 0000000..729132c --- /dev/null +++ b/scripts/postman_client.py @@ -0,0 +1,1542 @@ +""" +Postman API client with retry logic and error handling. +""" + +import sys +import os +import warnings +import subprocess +import json + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.config import PostmanConfig +from utils.retry_handler import RetryHandler +from utils.exceptions import ( + create_exception_from_response, + NetworkError, + TimeoutError +) + + +class PostmanClient: + """ + Client for interacting with the Postman API. + Handles authentication, retries, and response parsing. + + Supports Postman v10+ APIs with backward compatibility detection. + """ + + def __init__(self, config=None): + self.config = config or PostmanConfig() + self.config.validate() + self.retry_handler = RetryHandler(max_retries=self.config.max_retries) + self.api_version = None # Will be detected on first request + self.api_version_warned = False # Track if we've warned about old version + + def _detect_api_version(self, response): + """ + Detect API version from response. + + This is primarily for logging and user awareness. + Args: + response: requests.Response object + """ + # Try X-API-Version header first + version_header = response.headers.get('X-API-Version') + if version_header: + self.api_version = version_header + return + + # Try to infer from response structure + try: + data = response.json() + # v10+ typically includes 'meta' fields + if self._has_v10_structure(data): + self.api_version = 'v10+' + else: + self.api_version = 'v9-or-earlier' + except: + self.api_version = 'unknown' + + # Warn if using old version + if self.api_version and not str(self.api_version).startswith('v10') and not self.api_version_warned: + self._warn_about_old_version() + + def _has_v10_structure(self, data): + """ + Check if response has v10+ structure indicators. + + Args: + data: Parsed JSON response + + Returns: + True if response appears to be v10+, False otherwise + """ + # Check for v10+ metadata indicators + if 'meta' in data: + return True + + # Check collection/environment structure + if 'collection' in data: + collection = data['collection'] + if 'fork' in collection or 'meta' in collection: + return True + + # Default to assuming v10+ (optimistic) + return True + + def _warn_about_old_version(self): + """Warn user about using older API version.""" + if not self.api_version_warned: + warnings.warn( + f"Detected API version: {self.api_version}. " + "This skill is optimized for Postman v10+ APIs. " + "Some features may not work correctly with older versions. " + "Please upgrade to Postman v10+ for best experience.", + UserWarning + ) + self.api_version_warned = True + + def _make_request(self, method, endpoint, **kwargs): + """ + Make an API request with retry logic and enhanced error handling. + + Args: + method: HTTP method (GET, POST, PUT, DELETE, PATCH) + endpoint: API endpoint path (without base URL) + **kwargs: Additional arguments (json, headers, etc.) + + Returns: + Parsed JSON response + + Raises: + AuthenticationError: If authentication fails (401) + PermissionError: If insufficient permissions (403) + ResourceNotFoundError: If resource not found (404) + ValidationError: If request validation fails (400) + RateLimitError: If rate limit exceeded (429) + ServerError: If server error occurs (5xx) + NetworkError: If network connection fails + TimeoutError: If request times out + PostmanAPIError: For other API errors + """ + url = f"{self.config.base_url}{endpoint}" + + # Build curl command + curl_cmd = ['curl', '-s', '-k', '-i'] # silent, skip cert verification, include headers + + # Add HTTP method for non-GET requests + if method.upper() != 'GET': + curl_cmd.extend(['-X', method.upper()]) + + # Add JSON body if provided (before headers to avoid duplicates) + has_json_body = 'json' in kwargs and kwargs['json'] + if has_json_body: + json_data = json.dumps(kwargs['json']) + curl_cmd.extend(['-d', json_data]) + curl_cmd.extend(['-H', 'Content-Type: application/json']) + + # Add headers (skip Content-Type if we already added it for JSON) + for key, value in self.config.headers.items(): + if key.lower() == 'content-type' and has_json_body: + continue # Skip duplicate Content-Type + curl_cmd.extend(['-H', f"{key}: {value}"]) + + # Add timeout + timeout = kwargs.get('timeout', self.config.timeout) + curl_cmd.extend(['--max-time', str(timeout)]) + + # Add URL + curl_cmd.append(url) + + def execute_curl(): + """Execute curl command and return parsed response.""" + try: + # Use environment as-is - the proxy is configured correctly + # and removing it breaks DNS resolution + env = os.environ.copy() + + # Debug: print curl command if POSTMAN_DEBUG is set + if os.getenv('POSTMAN_DEBUG'): + print(f"DEBUG: Executing curl command: {' '.join(curl_cmd[:10])}... {url}", file=sys.stderr) + print(f"DEBUG: Using proxy: {env.get('https_proxy', 'none')}", file=sys.stderr) + + result = subprocess.run( + curl_cmd, + capture_output=True, + text=True, + timeout=timeout + 5, # Add buffer to subprocess timeout + env=env # Use environment with proxy intact + ) + + if result.returncode != 0: + stderr = result.stderr.strip() if result.stderr else "" + stdout = result.stdout.strip() if result.stdout else "" + error_msg = stderr or stdout or "Unknown curl error" + raise NetworkError( + message=f"Curl failed (exit code {result.returncode}): {error_msg}\n" + f"Command: {' '.join(curl_cmd[:4])}... {url}" + ) + + # Parse response (headers + body) + output = result.stdout + if not output: + raise NetworkError(message="Empty response from curl") + + # Split headers and body + parts = output.split('\r\n\r\n', 1) + if len(parts) < 2: + parts = output.split('\n\n', 1) + + if len(parts) < 2: + raise NetworkError(message="Invalid response format from curl") + + headers_text, body = parts[0], parts[1] + + # Check if body contains another HTTP response (common with proxies/HTTP2) + if body.strip().startswith('HTTP/'): + # The body is actually another HTTP response - parse it instead + nested_parts = body.split('\n\n', 1) + if len(nested_parts) >= 2: + headers_text = nested_parts[0] + body = nested_parts[1] if len(nested_parts) > 1 else "" + + # Debug: print response details if POSTMAN_DEBUG is set + if os.getenv('POSTMAN_DEBUG'): + print(f"DEBUG: Headers (first 200 chars): {headers_text[:200]}", file=sys.stderr) + print(f"DEBUG: Response body length: {len(body)}", file=sys.stderr) + print(f"DEBUG: Response body (first 200 chars): {body[:200]}", file=sys.stderr) + + # Extract status code from headers + status_line = headers_text.split('\n')[0] + status_parts = status_line.split() + if len(status_parts) < 2: + raise NetworkError(message=f"Invalid HTTP status line: {status_line}") + status_code = int(status_parts[1]) + + # Parse response headers + response_headers = {} + for line in headers_text.split('\n')[1:]: + if ':' in line: + key, value = line.split(':', 1) + response_headers[key.strip()] = value.strip() + + # Create a mock response object for compatibility + class MockResponse: + def __init__(self, status_code, headers, body): + self.status_code = status_code + self.headers = headers + self._body = body + + def json(self): + return json.loads(self._body) if self._body else {} + + return MockResponse(status_code, response_headers, body) + + except subprocess.TimeoutExpired as e: + raise TimeoutError(timeout_seconds=timeout) from e + except json.JSONDecodeError as e: + # Better error for empty/invalid responses + error_msg = ( + "Received invalid JSON response from Postman API.\n" + "This usually means:\n" + " • The endpoint returned no data or empty response\n" + " • Network connectivity issue\n" + " • The API endpoint might not exist\n" + f"\nTried to access: {url}\n" + "\n🔧 Troubleshooting:\n" + " • Run: python scripts/validate_setup.py\n" + " • Check Postman API status: https://status.postman.com/" + ) + raise NetworkError(message=error_msg) from e + except Exception as e: + if isinstance(e, (NetworkError, TimeoutError)): + raise + raise NetworkError(message=f"Request failed: {str(e)}", original_error=e) from e + + try: + # Use retry handler + response = self.retry_handler.execute(execute_curl) + except TimeoutError: + raise + except NetworkError: + raise + except Exception as e: + raise NetworkError( + message=f"Request failed: {str(e)}", + original_error=e + ) from e + + # Detect API version on first request + if self.api_version is None: + self._detect_api_version(response) + + # Handle error responses + if response.status_code >= 400: + raise create_exception_from_response(response) + + # Return parsed response + return response.json() + + def list_collections(self, workspace_id=None): + """ + List all collections in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + List of collection objects + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/collections?workspace={workspace_id}" + else: + endpoint = "/collections" + + response = self._make_request('GET', endpoint) + collections = response.get('collections', []) + + # Provide helpful context for empty results (only in debug mode) + if len(collections) == 0 and os.getenv('POSTMAN_DEBUG'): + if workspace_id: + import sys + print(f"ℹ️ No collections found in workspace {workspace_id}", file=sys.stderr) + print(" 💡 Run 'python scripts/list_workspaces.py' to see other workspaces", file=sys.stderr) + + return collections + + def get_collection(self, collection_uid): + """ + Get detailed information about a specific collection. + + Args: + collection_uid: Unique identifier for the collection + + Returns: + Collection object with full details + """ + endpoint = f"/collections/{collection_uid}" + response = self._make_request('GET', endpoint) + return response.get('collection', {}) + + def create_collection(self, collection_data, workspace_id=None): + """ + Create a new collection. + + Args: + collection_data: Dictionary containing collection configuration: + - info: Collection metadata (name, description, schema) + - item: List of requests/folders (optional) + - variable: List of collection variables (optional) + workspace_id: Workspace ID to create collection in (uses config default if not provided) + + Returns: + Created collection object + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/collections?workspace={workspace_id}" + else: + endpoint = "/collections" + + response = self._make_request('POST', endpoint, json={'collection': collection_data}) + return response.get('collection', {}) + + def update_collection(self, collection_uid, collection_data): + """ + Update an existing collection. + + Args: + collection_uid: Unique identifier for the collection + collection_data: Dictionary containing fields to update + + Returns: + Updated collection object + """ + endpoint = f"/collections/{collection_uid}" + response = self._make_request('PUT', endpoint, json={'collection': collection_data}) + return response.get('collection', {}) + + def delete_collection(self, collection_uid): + """ + Delete a collection. + + Args: + collection_uid: Unique identifier for the collection + + Returns: + Deletion confirmation + """ + endpoint = f"/collections/{collection_uid}" + response = self._make_request('DELETE', endpoint) + return response + + def fork_collection(self, collection_uid, label=None, workspace_id=None): + """ + Create a fork of a collection. + + **Requires**: Postman v10+ API + + A fork is an independent copy of a collection that can be modified + separately. Forks enable version control workflows with pull requests. + + Args: + collection_uid: Collection UID to fork + label: Optional label/name for the fork + workspace_id: Workspace for the fork (uses config default if not provided) + + Returns: + Forked collection object with fork metadata + + Example: + >>> client.fork_collection( + ... collection_uid="12345-abcd", + ... label="my-feature-branch", + ... workspace_id="67890-efgh" + ... ) + """ + workspace_id = workspace_id or self.config.workspace_id + + endpoint = f"/collections/{collection_uid}/forks" + payload = {} + + if label: + payload['label'] = label + if workspace_id: + payload['workspace'] = workspace_id + + response = self._make_request('POST', endpoint, json=payload) + return response.get('fork', {}) + + def create_pull_request(self, collection_uid, source_collection_uid, + title=None, description=None, reviewers=None): + """ + Create a pull request to merge changes from a forked collection. + + **Requires**: Postman v10+ API + + Pull requests allow you to propose merging changes from a fork + back to the parent collection. + + Args: + collection_uid: Destination collection UID (parent) + source_collection_uid: Source collection UID (fork) + title: Pull request title + description: Pull request description + reviewers: List of reviewer user IDs (optional) + + Returns: + Pull request object + + Example: + >>> client.create_pull_request( + ... collection_uid="parent-12345", + ... source_collection_uid="fork-67890", + ... title="Add new authentication tests", + ... description="This PR adds comprehensive auth tests" + ... ) + """ + endpoint = f"/collections/{collection_uid}/pull-requests" + payload = { + 'source': source_collection_uid + } + + if title: + payload['title'] = title + if description: + payload['description'] = description + if reviewers: + payload['reviewers'] = reviewers + + response = self._make_request('POST', endpoint, json=payload) + return response.get('pull_request', {}) + + def get_pull_requests(self, collection_uid, status=None): + """ + Get pull requests for a collection. + + **Requires**: Postman v10+ API + + Args: + collection_uid: Collection UID + status: Filter by status ('open', 'closed', 'merged') (optional) + + Returns: + List of pull request objects + + Example: + >>> # Get all open PRs + >>> client.get_pull_requests("12345-abcd", status="open") + """ + endpoint = f"/collections/{collection_uid}/pull-requests" + + if status: + endpoint += f"?status={status}" + + response = self._make_request('GET', endpoint) + return response.get('pull_requests', []) + + def merge_pull_request(self, collection_uid, pull_request_id): + """ + Merge a pull request. + + **Requires**: Postman v10+ API + + Merges the changes from a fork into the parent collection. + + Args: + collection_uid: Collection UID + pull_request_id: Pull request ID to merge + + Returns: + Merged pull request object + + Example: + >>> client.merge_pull_request("12345-abcd", "pr-789") + """ + endpoint = f"/collections/{collection_uid}/pull-requests/{pull_request_id}/merge" + response = self._make_request('POST', endpoint) + return response.get('pull_request', {}) + + def duplicate_collection(self, collection_uid, name=None, workspace_id=None): + """ + Duplicate a collection (create a copy, not a fork). + + Creates a complete copy of a collection without version control linkage. + Unlike forking, duplicating creates a standalone collection. + + Args: + collection_uid: Collection UID to duplicate + name: Name for the duplicate (defaults to original name + " Copy") + workspace_id: Workspace for duplicate (uses config default if not provided) + + Returns: + Duplicated collection object + + Example: + >>> client.duplicate_collection( + ... collection_uid="12345-abcd", + ... name="My Collection Backup" + ... ) + """ + # Get original collection + original = self.get_collection(collection_uid) + + # Prepare new collection data + new_collection = original.copy() + + # Set new name + if name: + new_collection['info']['name'] = name + else: + original_name = original.get('info', {}).get('name', 'Collection') + new_collection['info']['name'] = f"{original_name} Copy" + + # Remove UID and other metadata that shouldn't be copied + if 'uid' in new_collection.get('info', {}): + del new_collection['info']['uid'] + if '_postman_id' in new_collection.get('info', {}): + del new_collection['info']['_postman_id'] + + # Create new collection + return self.create_collection(new_collection, workspace_id) + + def list_environments(self, workspace_id=None): + """ + List all environments in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + List of environment objects + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/environments?workspace={workspace_id}" + else: + endpoint = "/environments" + + response = self._make_request('GET', endpoint) + return response.get('environments', []) + + def get_environment(self, environment_uid): + """ + Get detailed information about a specific environment. + + Args: + environment_uid: Unique identifier for the environment + + Returns: + Environment object with full details + """ + endpoint = f"/environments/{environment_uid}" + response = self._make_request('GET', endpoint) + return response.get('environment', {}) + + def create_environment(self, name, values=None, workspace_id=None): + """ + Create a new environment with automatic secret detection. + + **Enhanced in v2.0**: Automatically detects sensitive variables and + marks them as secrets. + + Args: + name: Environment name + values: Dict of variable name -> value pairs, or list of variable objects + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + Created environment object + + Example: + >>> # Simple dict format with auto-secret detection + >>> client.create_environment( + ... name="Production", + ... values={ + ... "base_url": "https://api.example.com", + ... "api_key": "secret-key-123", # Auto-detected as secret + ... "timeout": "30" + ... } + ... ) + >>> + >>> # Advanced format with explicit types + >>> client.create_environment( + ... name="Production", + ... values=[ + ... {"key": "base_url", "value": "https://api.example.com", "type": "default"}, + ... {"key": "api_key", "value": "secret-key-123", "type": "secret"} + ... ] + ... ) + """ + workspace_id = workspace_id or self.config.workspace_id + + endpoint = "/environments" + if workspace_id: + endpoint += f"?workspace={workspace_id}" + + # Format variables + variables = [] + if values: + if isinstance(values, dict): + # Convert dict to variable list with auto-secret detection + for key, value in values.items(): + var = { + 'key': key, + 'value': str(value), + 'type': self._detect_secret_type(key), + 'enabled': True + } + variables.append(var) + elif isinstance(values, list): + # Use provided list, ensure all have required fields + for var in values: + if 'type' not in var: + var['type'] = self._detect_secret_type(var.get('key', '')) + if 'enabled' not in var: + var['enabled'] = True + variables.append(var) + + payload = { + 'environment': { + 'name': name, + 'values': variables + } + } + + response = self._make_request('POST', endpoint, json=payload) + return response.get('environment', {}) + + def _detect_secret_type(self, key): + """ + Detect if a variable should be marked as secret based on its name. + + Args: + key: Variable name + + Returns: + 'secret' if the variable appears sensitive, 'default' otherwise + """ + sensitive_keywords = [ + 'key', 'token', 'secret', 'password', 'passwd', + 'pwd', 'auth', 'credential', 'private', 'apikey', + 'api_key', 'bearer', 'authorization' + ] + + key_lower = key.lower() + for keyword in sensitive_keywords: + if keyword in key_lower: + return 'secret' + + return 'default' + + def update_environment(self, environment_uid, name=None, values=None): + """ + Update an existing environment. + + **Enhanced in v2.0**: Supports partial updates and automatic secret detection. + + Args: + environment_uid: Environment UID + name: New name (optional) + values: Dict of variable updates or list of variables (optional) + + Returns: + Updated environment object + + Example: + >>> # Update just the name + >>> client.update_environment("env-123", name="Staging v2") + >>> + >>> # Update/add variables + >>> client.update_environment( + ... "env-123", + ... values={ + ... "api_key": "new-secret-key", # Updates existing or adds new + ... "new_var": "value" + ... } + ... ) + """ + endpoint = f"/environments/{environment_uid}" + + # Get current environment + current = self.get_environment(environment_uid) + + # Update name if provided + if name: + current['name'] = name + + # Update variables if provided + if values: + current_vars = {v['key']: v for v in current.get('values', [])} + + if isinstance(values, dict): + # Update existing or add new variables + for key, value in values.items(): + if key in current_vars: + # Update existing variable + current_vars[key]['value'] = str(value) + # Preserve type unless it should be secret + if current_vars[key].get('type') != 'secret': + current_vars[key]['type'] = self._detect_secret_type(key) + else: + # Add new variable + var = { + 'key': key, + 'value': str(value), + 'type': self._detect_secret_type(key), + 'enabled': True + } + current_vars[key] = var + + current['values'] = list(current_vars.values()) + + elif isinstance(values, list): + # Replace all variables + for var in values: + if 'type' not in var: + var['type'] = self._detect_secret_type(var.get('key', '')) + if 'enabled' not in var: + var['enabled'] = True + current['values'] = values + + payload = {'environment': current} + response = self._make_request('PUT', endpoint, json=payload) + return response.get('environment', {}) + + def delete_environment(self, environment_uid): + """ + Delete an environment. + + Args: + environment_uid: Unique identifier for the environment + + Returns: + Deletion confirmation + """ + endpoint = f"/environments/{environment_uid}" + response = self._make_request('DELETE', endpoint) + return response + + def duplicate_environment(self, environment_uid, name=None, workspace_id=None): + """ + Duplicate an environment. + + Creates a complete copy of an environment including all variables. + Secret variables are preserved with their secret type. + + Args: + environment_uid: Environment UID to duplicate + name: Name for the duplicate (defaults to original name + " Copy") + workspace_id: Workspace for duplicate (uses config default if not provided) + + Returns: + Duplicated environment object + + Example: + >>> client.duplicate_environment( + ... environment_uid="env-123", + ... name="Production Backup" + ... ) + """ + # Get original environment + original = self.get_environment(environment_uid) + + # Prepare new environment name + new_name = name or f"{original['name']} Copy" + + # Extract variables (preserve types including secrets) + values = original.get('values', []) + + # Create new environment + return self.create_environment(new_name, values, workspace_id) + + def run_collection(self, collection_uid, environment_uid=None): + """ + Run a collection's tests. + + Note: This requires additional Postman features like Newman or Collection Runner. + For the POC, this is a placeholder showing the intended structure. + + Args: + collection_uid: Unique identifier for the collection + environment_uid: Optional environment to use + + Returns: + Test run results + """ + # Note: The actual test running would typically use Newman or the Collection Runner API + # This is a simplified version for the POC + raise NotImplementedError( + "Collection test execution requires Postman Collection Runner or Newman integration. " + "This POC demonstrates the API client structure. " + "For now, use 'list_collections' to discover available collections." + ) + + def list_monitors(self, workspace_id=None): + """ + List all monitors in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + List of monitor objects + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/monitors?workspace={workspace_id}" + else: + endpoint = "/monitors" + + response = self._make_request('GET', endpoint) + return response.get('monitors', []) + + def get_monitor(self, monitor_id): + """ + Get detailed information about a specific monitor. + + Args: + monitor_id: Unique identifier for the monitor + + Returns: + Monitor object with full details + """ + endpoint = f"/monitors/{monitor_id}" + response = self._make_request('GET', endpoint) + return response.get('monitor', {}) + + def create_monitor(self, monitor_data): + """ + Create a new monitor. + + Args: + monitor_data: Dictionary containing monitor configuration: + - name: Monitor name + - collection: Collection UID + - environment: Environment UID (optional) + - schedule: Schedule configuration (optional) + + Returns: + Created monitor object + """ + endpoint = "/monitors" + response = self._make_request('POST', endpoint, json={'monitor': monitor_data}) + return response.get('monitor', {}) + + def update_monitor(self, monitor_id, monitor_data): + """ + Update an existing monitor. + + Args: + monitor_id: Unique identifier for the monitor + monitor_data: Dictionary containing fields to update + + Returns: + Updated monitor object + """ + endpoint = f"/monitors/{monitor_id}" + response = self._make_request('PUT', endpoint, json={'monitor': monitor_data}) + return response.get('monitor', {}) + + def delete_monitor(self, monitor_id): + """ + Delete a monitor. + + Args: + monitor_id: Unique identifier for the monitor + + Returns: + Deletion confirmation + """ + endpoint = f"/monitors/{monitor_id}" + response = self._make_request('DELETE', endpoint) + return response + + def get_monitor_runs(self, monitor_id, limit=10): + """ + Get run history for a monitor. + + Args: + monitor_id: Unique identifier for the monitor + limit: Number of runs to retrieve (default: 10) + + Returns: + List of monitor run objects + """ + endpoint = f"/monitors/{monitor_id}/runs?limit={limit}" + response = self._make_request('GET', endpoint) + return response.get('runs', []) + + def list_apis(self, workspace_id=None): + """ + List all APIs in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + List of API objects + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/apis?workspace={workspace_id}" + else: + endpoint = "/apis" + + response = self._make_request('GET', endpoint) + return response.get('apis', []) + + def get_workspace(self, workspace_id=None): + """ + Get information about a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + Workspace object with details + """ + workspace_id = workspace_id or self.config.workspace_id + + if not workspace_id: + raise ValueError("Workspace ID must be provided or set in configuration") + + endpoint = f"/workspaces/{workspace_id}" + response = self._make_request('GET', endpoint) + return response.get('workspace', {}) + + # Design Phase: Schema and API Operations + + def get_api(self, api_id): + """ + Get detailed information about a specific API. + + Args: + api_id: Unique identifier for the API + + Returns: + API object with full details + """ + endpoint = f"/apis/{api_id}" + response = self._make_request('GET', endpoint) + return response.get('api', {}) + + def get_api_versions(self, api_id): + """ + Get all versions of an API. + + Args: + api_id: Unique identifier for the API + + Returns: + List of API version objects + """ + endpoint = f"/apis/{api_id}/versions" + response = self._make_request('GET', endpoint) + return response.get('versions', []) + + def get_api_version(self, api_id, version_id): + """ + Get a specific version of an API. + + Args: + api_id: Unique identifier for the API + version_id: Unique identifier for the version + + Returns: + API version object with details + """ + endpoint = f"/apis/{api_id}/versions/{version_id}" + response = self._make_request('GET', endpoint) + return response.get('version', {}) + + def get_api_schema(self, api_id, version_id): + """ + Get the schema for a specific API version. + + Args: + api_id: Unique identifier for the API + version_id: Unique identifier for the version + + Returns: + Schema object + """ + endpoint = f"/apis/{api_id}/versions/{version_id}/schemas" + response = self._make_request('GET', endpoint) + return response.get('schemas', []) + + def create_api(self, api_data, workspace_id=None): + """ + Create a new API. + + **DEPRECATED**: This method uses the legacy API creation approach. + Please use create_spec() instead, which creates specifications in Postman's Spec Hub. + + The Spec Hub is the recommended way to manage API specifications as it provides: + - Direct specification management (OpenAPI 3.0, AsyncAPI 2.0) + - Multi-file specification support + - Bidirectional collection generation + - Better version control integration + + See: create_spec() for the new approach + + Args: + api_data: Dictionary containing API configuration: + - name: API name + - summary: API summary (optional) + - description: API description (optional) + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + Created API object + """ + # Warn users about deprecation + warnings.warn( + "create_api() is deprecated and will be removed in a future version. " + "Please use create_spec() to create specifications in Postman's Spec Hub instead. " + "See: https://learning.postman.com/docs/design-apis/specifications/overview/", + DeprecationWarning, + stacklevel=2 + ) + + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/apis?workspace={workspace_id}" + else: + endpoint = "/apis" + + response = self._make_request('POST', endpoint, json={'api': api_data}) + return response.get('api', {}) + + def update_api(self, api_id, api_data): + """ + Update an existing API. + + Args: + api_id: Unique identifier for the API + api_data: Dictionary containing fields to update + + Returns: + Updated API object + """ + endpoint = f"/apis/{api_id}" + response = self._make_request('PUT', endpoint, json={'api': api_data}) + return response.get('api', {}) + + def delete_api(self, api_id): + """ + Delete an API. + + Args: + api_id: Unique identifier for the API + + Returns: + Deletion confirmation + """ + endpoint = f"/apis/{api_id}" + response = self._make_request('DELETE', endpoint) + return response + + # Spec Hub Operations (Replaces legacy API creation) + + def create_spec(self, spec_data, workspace_id=None): + """ + Create a new API specification in Postman's Spec Hub. + + **This is the recommended way to create API specifications.** + The legacy create_api() method is deprecated in favor of Spec Hub. + + Supports both single-file and multi-file specifications. + For OpenAPI 3.0 and AsyncAPI 2.0. + + Args: + spec_data: Dictionary containing specification configuration: + - name: Spec name (required) + - description: Spec description (optional) + - files: List of file objects (required): + - path: File path (e.g., "openapi.yaml") + - content: File content as string (JSON or YAML) + - root: Boolean, mark as root file (optional, default: first file) + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + Created spec object with spec ID and metadata + + Example: + >>> # Single-file OpenAPI 3.0 spec + >>> client.create_spec({ + ... "name": "Pet Store API", + ... "description": "A simple pet store API", + ... "files": [{ + ... "path": "openapi.yaml", + ... "content": yaml_or_json_string, + ... "root": True + ... }] + ... }) + >>> + >>> # Multi-file spec with separate schemas + >>> client.create_spec({ + ... "name": "Complex API", + ... "files": [ + ... {"path": "openapi.yaml", "content": main_spec, "root": True}, + ... {"path": "schemas/pet.json", "content": pet_schema}, + ... {"path": "schemas/user.json", "content": user_schema} + ... ] + ... }) + """ + workspace_id = workspace_id or self.config.workspace_id + + if not workspace_id: + raise ValueError("workspace_id is required to create a spec. Set POSTMAN_WORKSPACE_ID in .env or pass workspace_id parameter.") + + endpoint = f"/specs?workspaceId={workspace_id}" + + # Build the request payload + payload = { + "name": spec_data["name"], + "type": spec_data.get("type", "openapi:3") # Default to OpenAPI 3.0 + } + + if "description" in spec_data: + payload["description"] = spec_data["description"] + + # Add files + files = spec_data.get("files", []) + if not files: + raise ValueError("At least one file must be provided in spec_data['files']") + + payload["files"] = [] + for i, file_obj in enumerate(files): + file_entry = { + "path": file_obj["path"], + "content": file_obj["content"] + } + # Note: root is handled automatically by API based on type field + payload["files"].append(file_entry) + + response = self._make_request('POST', endpoint, json=payload) + return response.get('data', {}) + + def list_specs(self, workspace_id=None, limit=10, offset=0): + """ + List all API specifications in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + limit: Maximum number of specs to return (default: 10) + offset: Number of specs to skip (default: 0) + + Returns: + List of spec objects + + Example: + >>> specs = client.list_specs() + >>> for spec in specs: + ... print(f"{spec['name']} - {spec['id']}") + """ + workspace_id = workspace_id or self.config.workspace_id + + endpoint = "/specs" + params = [] + + if workspace_id: + params.append(f"workspaceId={workspace_id}") + if limit: + params.append(f"limit={limit}") + if offset: + params.append(f"offset={offset}") + + if params: + endpoint += "?" + "&".join(params) + + response = self._make_request('GET', endpoint) + return response.get('data', []) + + def get_spec(self, spec_id): + """ + Get detailed information about a specific API specification. + + Args: + spec_id: Unique identifier for the spec + + Returns: + Spec object with full details including files + + Example: + >>> spec = client.get_spec("spec-12345") + >>> print(f"Spec: {spec['name']}") + >>> print(f"Files: {len(spec.get('files', []))}") + """ + endpoint = f"/specs/{spec_id}" + response = self._make_request('GET', endpoint) + return response.get('data', {}) + + def update_spec(self, spec_id, spec_data): + """ + Update an existing API specification's metadata. + + Note: To update file contents, use update_spec_file() instead. + + Args: + spec_id: Unique identifier for the spec + spec_data: Dictionary containing fields to update: + - name: New spec name (optional) + - description: New description (optional) + + Returns: + Updated spec object + + Example: + >>> client.update_spec("spec-12345", { + ... "name": "Pet Store API v2", + ... "description": "Updated description" + ... }) + """ + endpoint = f"/specs/{spec_id}" + response = self._make_request('PATCH', endpoint, json=spec_data) + return response.get('data', {}) + + def delete_spec(self, spec_id): + """ + Delete an API specification from Spec Hub. + + Args: + spec_id: Unique identifier for the spec + + Returns: + Deletion confirmation + + Example: + >>> client.delete_spec("spec-12345") + """ + endpoint = f"/specs/{spec_id}" + response = self._make_request('DELETE', endpoint) + return response + + def create_spec_file(self, spec_id, file_path, content, root=False): + """ + Add a new file to an existing multi-file specification. + + Args: + spec_id: Unique identifier for the spec + file_path: Path for the new file (e.g., "schemas/pet.json") + content: File content as string (JSON or YAML) + root: Boolean, mark as root file (default: False) + + Returns: + Created file object + + Example: + >>> # Add a new schema file + >>> client.create_spec_file( + ... spec_id="spec-12345", + ... file_path="schemas/order.json", + ... content=order_schema_json + ... ) + """ + endpoint = f"/specs/{spec_id}/files" + payload = { + "path": file_path, + "content": content + } + if root: + payload["root"] = True + + response = self._make_request('POST', endpoint, json=payload) + return response.get('data', {}) + + def update_spec_file(self, spec_id, file_path, content=None, root=None): + """ + Update an existing file in a specification. + + Note: You can update either content, root status, or both. + + Args: + spec_id: Unique identifier for the spec + file_path: Path of the file to update + content: New file content (optional) + root: New root status (optional) + + Returns: + Updated file object + + Example: + >>> # Update file content + >>> client.update_spec_file( + ... spec_id="spec-12345", + ... file_path="openapi.yaml", + ... content=updated_yaml_content + ... ) + >>> + >>> # Change root file + >>> client.update_spec_file( + ... spec_id="spec-12345", + ... file_path="openapi.yaml", + ... root=True + ... ) + """ + endpoint = f"/specs/{spec_id}/files/{file_path}" + payload = {} + + if content is not None: + payload["content"] = content + if root is not None: + payload["root"] = root + + if not payload: + raise ValueError("At least one of 'content' or 'root' must be provided") + + response = self._make_request('PATCH', endpoint, json=payload) + return response.get('data', {}) + + def delete_spec_file(self, spec_id, file_path): + """ + Delete a file from a multi-file specification. + + Note: Cannot delete the root file if it's the only file. + + Args: + spec_id: Unique identifier for the spec + file_path: Path of the file to delete + + Returns: + Deletion confirmation + + Example: + >>> client.delete_spec_file("spec-12345", "schemas/deprecated.json") + """ + endpoint = f"/specs/{spec_id}/files/{file_path}" + response = self._make_request('DELETE', endpoint) + return response + + def get_spec_files(self, spec_id): + """ + List all files in a specification. + + Args: + spec_id: Unique identifier for the spec + + Returns: + List of file objects with paths and metadata + + Example: + >>> files = client.get_spec_files("spec-12345") + >>> for file in files: + ... print(f"{'[ROOT] ' if file.get('root') else ''}{file['path']}") + """ + endpoint = f"/specs/{spec_id}/files" + response = self._make_request('GET', endpoint) + return response.get('data', []) + + def generate_collection_from_spec(self, spec_id, collection_name=None): + """ + Generate a Postman collection from an API specification. + + Creates a collection with folders, requests, and examples based + on the spec's endpoints and operations. + + Args: + spec_id: Unique identifier for the spec + collection_name: Name for the generated collection (optional) + + Returns: + Generation task object with status and collection details + + Example: + >>> result = client.generate_collection_from_spec("spec-12345") + >>> if result.get('status') == 'completed': + ... collection_id = result['data']['collectionId'] + ... print(f"Collection created: {collection_id}") + """ + endpoint = f"/specs/{spec_id}/generations/collection" + payload = {} + + if collection_name: + payload["name"] = collection_name + + response = self._make_request('POST', endpoint, json=payload if payload else None) + return response + + def list_collections_from_spec(self, spec_id): + """ + List all collections generated from a specification. + + Args: + spec_id: Unique identifier for the spec + + Returns: + List of collection objects generated from this spec + + Example: + >>> collections = client.list_collections_from_spec("spec-12345") + >>> print(f"Found {len(collections)} collections generated from this spec") + """ + endpoint = f"/specs/{spec_id}/generations/collection" + response = self._make_request('GET', endpoint) + return response.get('data', []) + + def generate_spec_from_collection(self, collection_id, spec_name=None, workspace_id=None): + """ + Generate an API specification from an existing Postman collection. + + Reverse-engineers a spec from collection requests and responses. + + Args: + collection_id: Collection UID to generate spec from + spec_name: Name for the generated spec (optional) + workspace_id: Workspace for the spec (uses config default if not provided) + + Returns: + Generation task object with status and spec details + + Example: + >>> result = client.generate_spec_from_collection("collection-12345") + >>> if result.get('status') == 'completed': + ... spec_id = result['data']['specId'] + ... print(f"Spec created: {spec_id}") + """ + workspace_id = workspace_id or self.config.workspace_id + + endpoint = f"/collections/{collection_id}/generations/spec" + payload = {} + + if spec_name: + payload["name"] = spec_name + if workspace_id: + payload["workspace"] = workspace_id + + response = self._make_request('POST', endpoint, json=payload if payload else None) + return response + + # Deploy Phase: Mock Server Operations + + def list_mocks(self, workspace_id=None): + """ + List all mock servers in a workspace. + + Args: + workspace_id: Workspace ID (uses config default if not provided) + + Returns: + List of mock server objects + """ + workspace_id = workspace_id or self.config.workspace_id + + if workspace_id: + endpoint = f"/mocks?workspace={workspace_id}" + else: + endpoint = "/mocks" + + response = self._make_request('GET', endpoint) + return response.get('mocks', []) + + def get_mock(self, mock_id): + """ + Get detailed information about a specific mock server. + + Args: + mock_id: Unique identifier for the mock server + + Returns: + Mock server object with full details + """ + endpoint = f"/mocks/{mock_id}" + response = self._make_request('GET', endpoint) + return response.get('mock', {}) + + def create_mock(self, mock_data): + """ + Create a new mock server. + + Args: + mock_data: Dictionary containing mock server configuration: + - name: Mock server name + - collection: Collection UID + - environment: Environment UID (optional) + - private: Boolean, whether mock is private (optional) + + Returns: + Created mock server object + """ + endpoint = "/mocks" + response = self._make_request('POST', endpoint, json={'mock': mock_data}) + return response.get('mock', {}) + + def update_mock(self, mock_id, mock_data): + """ + Update an existing mock server. + + Args: + mock_id: Unique identifier for the mock server + mock_data: Dictionary containing fields to update + + Returns: + Updated mock server object + """ + endpoint = f"/mocks/{mock_id}" + response = self._make_request('PUT', endpoint, json={'mock': mock_data}) + return response.get('mock', {}) + + def delete_mock(self, mock_id): + """ + Delete a mock server. + + Args: + mock_id: Unique identifier for the mock server + + Returns: + Deletion confirmation + """ + endpoint = f"/mocks/{mock_id}" + response = self._make_request('DELETE', endpoint) + return response diff --git a/scripts/publish_docs.py b/scripts/publish_docs.py new file mode 100755 index 0000000..c3a4794 --- /dev/null +++ b/scripts/publish_docs.py @@ -0,0 +1,423 @@ +#!/usr/bin/env python3 +""" +Publish and manage Postman API documentation. + +This script helps publish collections as documentation, manage visibility, +and generate changelogs between versions. + +Usage: + python publish_docs.py --collection --publish + python publish_docs.py --collection --status + python publish_docs.py --compare --old --new + python publish_docs.py --changelog --api + python publish_docs.py --help +""" + +import sys +import os +import argparse +import json +from datetime import datetime +from collections import defaultdict + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +def publish_collection_docs(client, collection_id, name=None): + """ + Publish collection as public documentation. + + Note: This creates a public link to the collection that can be shared. + Postman API may not have a direct "publish documentation" endpoint, + so this provides collection sharing functionality. + """ + print(f"=== Publishing Collection Documentation ===\n") + + try: + # Get collection details + collection = client.get_collection(collection_id) + collection_name = collection.get('info', {}).get('name', 'Unnamed Collection') + + print(f"Collection: {collection_name}") + print(f"ID: {collection_id}\n") + + # Generate public documentation URL + # Note: Actual publishing may require Postman UI or specific API endpoints + public_url = f"https://documenter.getpostman.com/view/{collection_id}" + + print("✓ Collection can be accessed for documentation\n") + print("📚 Documentation URL:") + print(f" {public_url}\n") + + print("📋 Collection Details:") + print(f" Name: {collection_name}") + print(f" Description: {collection.get('info', {}).get('description', 'No description')}") + + # Count requests + total_requests = 0 + for item in collection.get('item', []): + if 'request' in item: + total_requests += 1 + elif 'item' in item: + total_requests += len([i for i in item['item'] if 'request' in i]) + + print(f" Total Endpoints: {total_requests}\n") + + print("💡 Next Steps:") + print(" 1. Share the documentation URL with your team") + print(" 2. Customize documentation in Postman web UI") + print(" 3. Add examples and descriptions to requests") + print(" 4. Set up environment templates for testing\n") + + print(f"🔗 View in Postman: https://postman.postman.co/collections/{collection_id}") + + except Exception as e: + print(f"Error publishing documentation: {e}") + sys.exit(1) + + +def get_collection_status(client, collection_id): + """Get collection documentation status.""" + print(f"=== Collection Documentation Status ===\n") + + try: + collection = client.get_collection(collection_id) + collection_name = collection.get('info', {}).get('name', 'Unnamed Collection') + + print(f"Collection: {collection_name}") + print(f"ID: {collection_id}\n") + + # Count documentation metrics + total_requests = 0 + documented_requests = 0 + requests_with_examples = 0 + + def count_requests(items): + """Recursively count requests and their documentation.""" + nonlocal total_requests, documented_requests, requests_with_examples + + for item in items: + if 'request' in item: + total_requests += 1 + + # Check if request has description + if item.get('request', {}).get('description'): + documented_requests += 1 + + # Check if request has example responses + if item.get('response', []): + requests_with_examples += 1 + + elif 'item' in item: + count_requests(item['item']) + + count_requests(collection.get('item', [])) + + # Calculate documentation coverage + doc_coverage = (documented_requests / total_requests * 100) if total_requests > 0 else 0 + example_coverage = (requests_with_examples / total_requests * 100) if total_requests > 0 else 0 + + print("📊 Documentation Metrics:") + print(f" Total Endpoints: {total_requests}") + print(f" Documented: {documented_requests} ({doc_coverage:.1f}%)") + print(f" With Examples: {requests_with_examples} ({example_coverage:.1f}%)\n") + + # Documentation quality score + score = (doc_coverage + example_coverage) / 2 + + print(f"📈 Documentation Quality Score: {score:.1f}/100") + + if score >= 90: + print(" Grade: A (Excellent) ✅") + elif score >= 75: + print(" Grade: B (Good) ✔️") + elif score >= 60: + print(" Grade: C (Fair) ⚠️") + else: + print(" Grade: D (Needs Improvement) ❌") + + print() + + # Recommendations + if doc_coverage < 100: + print("💡 Recommendations:") + print(f" • Add descriptions to {total_requests - documented_requests} endpoint(s)") + + if example_coverage < 100: + if doc_coverage >= 100: + print("💡 Recommendations:") + print(f" • Add example responses to {total_requests - requests_with_examples} endpoint(s)") + + except Exception as e: + print(f"Error getting collection status: {e}") + sys.exit(1) + + +def compare_collections_for_changelog(client, old_id, new_id): + """Compare two collections and generate a changelog.""" + print(f"=== Generating Changelog ===\n") + + try: + old_col = client.get_collection(old_id) + new_col = client.get_collection(new_id) + + old_name = old_col.get('info', {}).get('name', 'Old Version') + new_name = new_col.get('info', {}).get('name', 'New Version') + + print(f"Comparing:") + print(f" Old: {old_name} ({old_id})") + print(f" New: {new_name} ({new_id})\n") + + # Extract requests + def get_requests(collection): + """Extract all requests from collection.""" + requests = {} + + def extract(items, folder_path=""): + for item in items: + if 'request' in item: + name = item.get('name', 'Unnamed') + path = f"{folder_path}/{name}" if folder_path else name + method = item.get('request', {}).get('method', 'GET') + url = item.get('request', {}).get('url', {}) + + if isinstance(url, dict): + url_str = url.get('raw', '') + else: + url_str = str(url) + + requests[path] = { + 'name': name, + 'method': method, + 'url': url_str, + 'description': item.get('request', {}).get('description', '') + } + + elif 'item' in item: + folder_name = item.get('name', 'Folder') + new_path = f"{folder_path}/{folder_name}" if folder_path else folder_name + extract(item['item'], new_path) + + extract(collection.get('item', [])) + return requests + + old_requests = get_requests(old_col) + new_requests = get_requests(new_col) + + # Find differences + added = {k: v for k, v in new_requests.items() if k not in old_requests} + removed = {k: v for k, v in old_requests.items() if k not in new_requests} + modified = {} + + for key in set(old_requests.keys()) & set(new_requests.keys()): + old_req = old_requests[key] + new_req = new_requests[key] + + changes = [] + + if old_req['method'] != new_req['method']: + changes.append(f"Method: {old_req['method']} → {new_req['method']}") + + if old_req['url'] != new_req['url']: + changes.append(f"URL changed") + + if old_req['description'] != new_req['description']: + changes.append("Description updated") + + if changes: + modified[key] = changes + + # Print changelog + print("=" * 70) + print("# CHANGELOG") + print("=" * 70) + print() + + print(f"## {new_name}") + print(f"*Released: {datetime.now().strftime('%Y-%m-%d')}*\n") + + if added: + print(f"### ✨ Added ({len(added)} endpoint{'s' if len(added) > 1 else ''})") + print() + for name, req in added.items(): + print(f"- **{req['method']}** `{name}`") + if req['description']: + print(f" - {req['description']}") + print() + + if modified: + print(f"### 🔄 Changed ({len(modified)} endpoint{'s' if len(modified) > 1 else ''})") + print() + for name, changes in modified.items(): + print(f"- `{name}`") + for change in changes: + print(f" - {change}") + print() + + if removed: + print(f"### ⚠️ Removed ({len(removed)} endpoint{'s' if len(removed) > 1 else ''})") + print() + for name, req in removed.items(): + print(f"- **{req['method']}** `{name}`") + print() + + # Breaking changes analysis + print("### 🔴 Breaking Changes") + print() + + breaking_changes = [] + + if removed: + breaking_changes.append(f"{len(removed)} endpoint(s) removed") + + for name, changes in modified.items(): + if any('Method:' in c for c in changes): + breaking_changes.append(f"Method changed for {name}") + + if breaking_changes: + for change in breaking_changes: + print(f"- {change}") + else: + print("- None (Backward compatible)") + + print() + print("=" * 70) + + except Exception as e: + print(f"Error generating changelog: {e}") + sys.exit(1) + + +def generate_api_changelog(client, api_id): + """Generate changelog for API versions.""" + print(f"=== API Version Changelog ===\n") + + try: + api = client.get_api(api_id) + print(f"API: {api.get('name', 'Unnamed')}\n") + + versions = client.get_api_versions(api_id) + + if len(versions) < 2: + print("Need at least 2 versions to generate a changelog") + return + + print(f"Found {len(versions)} version(s):\n") + + for i, version in enumerate(versions): + print(f" {i + 1}. {version.get('name', 'Unnamed')} (ID: {version.get('id')})") + + print("\n" + "=" * 70) + print("# API VERSION HISTORY") + print("=" * 70) + print() + + for version in versions: + version_name = version.get('name', 'Unknown') + created_at = version.get('createdAt', 'Unknown') + + print(f"## Version {version_name}") + print(f"*Released: {created_at}*\n") + + # Try to get schema + try: + schemas = client.get_api_schema(api_id, version.get('id')) + if schemas: + schema = json.loads(schemas[0].get('schema', '{}')) + paths = schema.get('paths', {}) + + print(f"### Summary") + print(f"- Endpoints: {len(paths)}") + + # Count methods + methods = defaultdict(int) + for path, path_methods in paths.items(): + for method in path_methods.keys(): + if method in ['get', 'post', 'put', 'delete', 'patch']: + methods[method.upper()] += 1 + + print(f"- Methods: {dict(methods)}") + print() + + except: + pass # Skip if schema unavailable + + print() + + except Exception as e: + print(f"Error generating API changelog: {e}") + sys.exit(1) + + +def main(): + """Main entry point for documentation publishing.""" + + parser = argparse.ArgumentParser( + description='Publish and manage Postman API documentation', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Publish collection documentation + python publish_docs.py --collection col-123 --publish + + # Check documentation status + python publish_docs.py --collection col-123 --status + + # Generate changelog between versions + python publish_docs.py --compare --old col-123 --new col-456 + + # Generate API version changelog + python publish_docs.py --changelog --api api-789 + """ + ) + + parser.add_argument('--collection', metavar='COLLECTION_ID', help='Collection ID') + parser.add_argument('--publish', action='store_true', help='Publish collection documentation') + parser.add_argument('--status', action='store_true', help='Check documentation status') + parser.add_argument('--compare', action='store_true', help='Compare two collections for changelog') + parser.add_argument('--old', metavar='COLLECTION_ID', help='Old collection ID for comparison') + parser.add_argument('--new', metavar='COLLECTION_ID', help='New collection ID for comparison') + parser.add_argument('--changelog', action='store_true', help='Generate API version changelog') + parser.add_argument('--api', metavar='API_ID', help='API ID for changelog') + + args = parser.parse_args() + + if not any([args.publish, args.status, args.compare, args.changelog]): + parser.print_help() + return + + # Initialize client + client = PostmanClient() + + # Execute operations + if args.publish: + if not args.collection: + print("Error: --collection required for publishing") + sys.exit(1) + publish_collection_docs(client, args.collection) + + elif args.status: + if not args.collection: + print("Error: --collection required for status check") + sys.exit(1) + get_collection_status(client, args.collection) + + elif args.compare: + if not args.old or not args.new: + print("Error: --old and --new collection IDs required for comparison") + sys.exit(1) + compare_collections_for_changelog(client, args.old, args.new) + + elif args.changelog: + if not args.api: + print("Error: --api required for changelog generation") + sys.exit(1) + generate_api_changelog(client, args.api) + + +if __name__ == '__main__': + main() diff --git a/scripts/run_collection.py b/scripts/run_collection.py new file mode 100755 index 0000000..04517ed --- /dev/null +++ b/scripts/run_collection.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python3 +""" +Run a Postman collection using Newman (Postman's CLI collection runner). + +This script provides a Python wrapper around Newman to execute Postman collections +and parse the results. Newman must be installed separately via npm. + +Prerequisites: + - Node.js and npm installed + - Newman installed: npm install -g newman + +Usage: + python run_collection.py --collection="Collection Name" + python run_collection.py --collection-uid="12345-67890" + python run_collection.py --collection-uid="12345-67890" --environment-uid="abc-def" +""" + +import sys +import os +import argparse +import subprocess +import json +import tempfile + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import get_config +from utils.formatters import format_error + + +def check_newman_installed(): + """Check if Newman is installed""" + try: + result = subprocess.run( + ['newman', '--version'], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0: + return True, result.stdout.strip() + return False, None + except FileNotFoundError: + return False, None + except Exception as e: + return False, str(e) + + +def find_collection_by_name(client, name): + """ + Find a collection UID by name. + + Args: + client: PostmanClient instance + name: Collection name to search for + + Returns: + Collection UID if found, None otherwise + """ + collections = client.list_collections() + for collection in collections: + if collection['name'].lower() == name.lower(): + return collection['uid'] + return None + + +def run_newman(collection_file, environment_file=None, reporters='cli,json', timeout=300): + """ + Execute Newman with the given collection and environment. + + Args: + collection_file: Path to collection JSON file + environment_file: Optional path to environment JSON file + reporters: Newman reporters to use (default: 'cli,json') + timeout: Maximum execution time in seconds + + Returns: + tuple: (success: bool, results: dict) + """ + cmd = [ + 'newman', 'run', collection_file, + '--reporters', reporters, + '--reporter-json-export', '/tmp/newman-results.json', + '--color', 'off', # Disable colors for easier parsing + '--disable-unicode' # Disable unicode for compatibility + ] + + if environment_file: + cmd.extend(['--environment', environment_file]) + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout + ) + + # Load JSON results if available + results = {} + if os.path.exists('/tmp/newman-results.json'): + with open('/tmp/newman-results.json', 'r') as f: + results = json.load(f) + + return result.returncode == 0, results, result.stdout, result.stderr + + except subprocess.TimeoutExpired: + return False, {}, '', f'Newman execution timed out after {timeout} seconds' + except Exception as e: + return False, {}, '', str(e) + + +def format_test_results(results): + """ + Format Newman test results for human-readable output. + + Args: + results: Newman JSON results + + Returns: + Formatted string + """ + output = [] + output.append("=" * 60) + output.append("TEST RESULTS") + output.append("=" * 60) + + run = results.get('run', {}) + stats = run.get('stats', {}) + + # Summary + output.append("\nSummary:") + output.append(f" Total Requests: {stats.get('requests', {}).get('total', 0)}") + output.append(f" Requests Failed: {stats.get('requests', {}).get('failed', 0)}") + output.append(f" Total Assertions: {stats.get('assertions', {}).get('total', 0)}") + output.append(f" Assertions Failed: {stats.get('assertions', {}).get('failed', 0)}") + + # Timings + timings = run.get('timings', {}) + if timings: + output.append(f"\n Total Duration: {timings.get('completed', 0) - timings.get('started', 0)}ms") + + # Failures + failures = run.get('failures', []) + if failures: + output.append(f"\n{len(failures)} Failure(s):") + for idx, failure in enumerate(failures, 1): + output.append(f"\n {idx}. {failure.get('error', {}).get('name', 'Unknown Error')}") + output.append(f" Test: {failure.get('error', {}).get('test', 'N/A')}") + output.append(f" Message: {failure.get('error', {}).get('message', 'N/A')}") + + # Show which request failed + source = failure.get('source', {}) + if source: + output.append(f" Request: {source.get('name', 'N/A')}") + else: + output.append("\n✓ All tests passed!") + + output.append("\n" + "=" * 60) + + return "\n".join(output) + + +def main(): + parser = argparse.ArgumentParser( + description='Run a Postman collection using Newman' + ) + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + '--collection', + type=str, + help='Collection name to run' + ) + group.add_argument( + '--collection-uid', + type=str, + help='Collection UID to run' + ) + + parser.add_argument( + '--environment', + type=str, + help='Environment name to use (optional)' + ) + parser.add_argument( + '--environment-uid', + type=str, + help='Environment UID to use (optional)' + ) + parser.add_argument( + '--timeout', + type=int, + default=300, + help='Maximum execution time in seconds (default: 300)' + ) + + args = parser.parse_args() + + try: + # Check Newman installation + print("Checking Newman installation...") + is_installed, version = check_newman_installed() + + if not is_installed: + print("\n" + format_error( + "Newman is not installed", + "running collection tests" + )) + print("\nTo install Newman:") + print(" 1. Install Node.js from https://nodejs.org/") + print(" 2. Run: npm install -g newman") + print(" 3. Verify: newman --version") + sys.exit(1) + + print(f"✓ Newman {version} is installed\n") + + # Get configuration and create client + config = get_config() + client = PostmanClient(config) + + # Resolve collection UID + if args.collection: + print(f"Finding collection: {args.collection}...") + collection_uid = find_collection_by_name(client, args.collection) + if not collection_uid: + raise ValueError(f"Collection '{args.collection}' not found") + else: + collection_uid = args.collection_uid + + print(f"Collection UID: {collection_uid}") + + # Download collection + print("Downloading collection...") + collection_data = client.get_collection(collection_uid) + + # Save to temporary file + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + collection_file = f.name + json.dump(collection_data, f) + + # Handle environment if specified + environment_file = None + if args.environment or args.environment_uid: + print("Downloading environment...") + + if args.environment: + # Find environment by name + environments = client.list_environments() + env_uid = None + for env in environments: + if env['name'].lower() == args.environment.lower(): + env_uid = env['uid'] + break + if not env_uid: + raise ValueError(f"Environment '{args.environment}' not found") + else: + env_uid = args.environment_uid + + environment_data = client.get_environment(env_uid) + + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + environment_file = f.name + json.dump(environment_data, f) + + # Run Newman + print(f"\nRunning collection with Newman (timeout: {args.timeout}s)...") + print("-" * 60) + + success, results, stdout, stderr = run_newman( + collection_file, + environment_file, + timeout=args.timeout + ) + + # Print Newman console output + if stdout: + print(stdout) + + # Format and print results + if results: + print(format_test_results(results)) + + # Print any errors + if stderr: + print("\nNewman Errors:", file=sys.stderr) + print(stderr, file=sys.stderr) + + # Cleanup + os.unlink(collection_file) + if environment_file: + os.unlink(environment_file) + if os.path.exists('/tmp/newman-results.json'): + os.unlink('/tmp/newman-results.json') + + sys.exit(0 if success else 1) + + except Exception as e: + print(format_error(e, "running collection"), file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/validate_schema.py b/scripts/validate_schema.py new file mode 100755 index 0000000..f51c06a --- /dev/null +++ b/scripts/validate_schema.py @@ -0,0 +1,428 @@ +#!/usr/bin/env python3 +""" +Validate OpenAPI and API specifications for correctness and best practices. + +Performs comprehensive validation including: +- OpenAPI 3.0 specification compliance +- Schema structure validation +- Required fields checking +- Best practices recommendations + +Usage: + python validate_schema.py --api + python validate_schema.py --spec + python validate_schema.py --file + python validate_schema.py --help +""" + +import sys +import os +import argparse +import json +import re +from collections import defaultdict + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig + + +class SchemaValidator: + """OpenAPI schema validator.""" + + def __init__(self): + self.errors = [] + self.warnings = [] + self.info = [] + + def add_error(self, message, location=""): + """Add a validation error.""" + self.errors.append({"location": location, "message": message}) + + def add_warning(self, message, location=""): + """Add a validation warning.""" + self.warnings.append({"location": location, "message": message}) + + def add_info(self, message, location=""): + """Add validation info.""" + self.info.append({"location": location, "message": message}) + + def validate_openapi_structure(self, spec): + """Validate basic OpenAPI structure.""" + print("🔍 Validating OpenAPI Structure...\n") + + # Check OpenAPI version + if 'openapi' not in spec: + self.add_error("Missing 'openapi' version field", "root") + else: + version = spec['openapi'] + if not version.startswith('3.0'): + self.add_warning(f"OpenAPI version {version} - recommend 3.0.x", "openapi") + else: + print(f"✓ OpenAPI version: {version}") + + # Check required root fields + required_fields = ['info', 'paths'] + for field in required_fields: + if field not in spec: + self.add_error(f"Missing required field '{field}'", "root") + + # Validate info object + if 'info' in spec: + info = spec['info'] + if 'title' not in info: + self.add_error("Missing 'title' in info", "info") + if 'version' not in info: + self.add_error("Missing 'version' in info", "info") + + # Check for description + if 'description' not in info: + self.add_warning("No description in info object", "info") + + # Check for contact/license + if 'contact' not in info: + self.add_info("Consider adding contact information", "info") + if 'license' not in info: + self.add_info("Consider adding license information", "info") + + # Check servers + if 'servers' not in spec: + self.add_warning("No servers defined", "root") + else: + print(f"✓ Found {len(spec['servers'])} server(s)") + + def validate_paths(self, spec): + """Validate API paths.""" + print("\n🔍 Validating Paths...\n") + + paths = spec.get('paths', {}) + + if not paths: + self.add_error("No paths defined in specification", "paths") + return + + print(f"✓ Found {len(paths)} path(s)") + + # Track operations + operations_count = defaultdict(int) + + for path, methods in paths.items(): + # Check path format + if not path.startswith('/'): + self.add_error(f"Path must start with '/': {path}", f"paths.{path}") + + # Check for path parameters + path_params = re.findall(r'{([^}]+)}', path) + + for method, operation in methods.items(): + if method not in ['get', 'post', 'put', 'delete', 'patch', 'head', 'options']: + continue + + operations_count[method.upper()] += 1 + location = f"paths.{path}.{method}" + + # Check for operationId + if 'operationId' not in operation: + self.add_warning(f"Missing operationId", location) + + # Check for summary + if 'summary' not in operation: + self.add_warning(f"Missing summary", location) + + # Check for tags + if 'tags' not in operation: + self.add_info(f"No tags defined", location) + + # Validate path parameters are defined + if path_params: + params = operation.get('parameters', []) + param_names = {p['name'] for p in params if p.get('in') == 'path'} + + for path_param in path_params: + if path_param not in param_names: + self.add_error( + f"Path parameter '{path_param}' not defined in parameters", + location + ) + + # Check request body for POST/PUT/PATCH + if method in ['post', 'put', 'patch']: + if 'requestBody' not in operation: + self.add_warning( + f"{method.upper()} operation typically has a request body", + location + ) + + # Check responses + if 'responses' not in operation: + self.add_error(f"Missing responses", location) + else: + responses = operation['responses'] + + # Check for success response + has_success = any( + code.startswith('2') for code in responses.keys() + ) + if not has_success: + self.add_warning(f"No success response (2xx) defined", location) + + # Check for error responses + has_error = any( + code.startswith('4') or code.startswith('5') + for code in responses.keys() + ) + if not has_error: + self.add_info(f"Consider adding error responses (4xx, 5xx)", location) + + # Print operation summary + print(f"\nOperation Summary:") + for method, count in sorted(operations_count.items()): + print(f" {method}: {count}") + + def validate_components(self, spec): + """Validate components/schemas.""" + print("\n🔍 Validating Components...\n") + + components = spec.get('components', {}) + + if not components: + self.add_warning("No components defined", "components") + return + + # Check schemas + schemas = components.get('schemas', {}) + if schemas: + print(f"✓ Found {len(schemas)} schema(s)") + + for schema_name, schema in schemas.items(): + location = f"components.schemas.{schema_name}" + + # Check type + if 'type' not in schema and '$ref' not in schema: + self.add_warning(f"Schema missing 'type' field", location) + + # Check for description + if 'description' not in schema: + self.add_info(f"Consider adding description", location) + + # For object types, check properties + if schema.get('type') == 'object': + if 'properties' not in schema: + self.add_warning(f"Object schema has no properties", location) + + # Check security schemes + security_schemes = components.get('securitySchemes', {}) + if security_schemes: + print(f"✓ Found {len(security_schemes)} security scheme(s)") + else: + self.add_warning("No security schemes defined", "components.securitySchemes") + + def validate_spec(self, spec): + """Run all validations.""" + self.validate_openapi_structure(spec) + self.validate_paths(spec) + self.validate_components(spec) + + def print_report(self): + """Print validation report.""" + print("\n" + "=" * 70) + print("📋 VALIDATION REPORT") + print("=" * 70 + "\n") + + total = len(self.errors) + len(self.warnings) + len(self.info) + + if total == 0: + print("✅ Schema is valid! No issues found.\n") + return + + # Summary + print(f"Summary:") + print(f" 🔴 Errors: {len(self.errors)}") + print(f" 🟡 Warnings: {len(self.warnings)}") + print(f" 🔵 Info: {len(self.info)}") + print() + + # Errors + if self.errors: + print("🔴 ERRORS (Must Fix):") + print("-" * 70) + for i, error in enumerate(self.errors, 1): + loc = f" [{error['location']}]" if error['location'] else "" + print(f"{i}. {error['message']}{loc}") + print() + + # Warnings + if self.warnings: + print("🟡 WARNINGS (Should Fix):") + print("-" * 70) + for i, warning in enumerate(self.warnings, 1): + loc = f" [{warning['location']}]" if warning['location'] else "" + print(f"{i}. {warning['message']}{loc}") + print() + + # Info + if self.info: + print("🔵 RECOMMENDATIONS:") + print("-" * 70) + for i, info in enumerate(self.info, 1): + loc = f" [{info['location']}]" if info['location'] else "" + print(f"{i}. {info['message']}{loc}") + print() + + # Score + score = max(0, 100 - (len(self.errors) * 10) - (len(self.warnings) * 3)) + print("=" * 70) + print(f"Validation Score: {score}/100") + + if score >= 90: + print("Grade: A (Excellent) ✅") + elif score >= 75: + print("Grade: B (Good) ✔️") + elif score >= 60: + print("Grade: C (Fair) ⚠️") + else: + print("Grade: D (Needs Work) ❌") + + print("=" * 70 + "\n") + + +def validate_from_api(client, api_id): + """Validate schema from Postman API.""" + print(f"=== Validating API: {api_id} ===\n") + + try: + # Get API + api = client.get_api(api_id) + print(f"API: {api.get('name', 'Unnamed')}\n") + + # Get latest version + versions = client.get_api_versions(api_id) + if not versions: + print("❌ No versions found for this API") + return + + version = versions[0] + print(f"Version: {version.get('name', 'Unknown')}\n") + + # Get schema + schemas = client.get_api_schema(api_id, version.get('id')) + if not schemas: + print("❌ No schema found for this version") + return + + schema_content = schemas[0].get('schema', '{}') + spec = json.loads(schema_content) + + # Validate + validator = SchemaValidator() + validator.validate_spec(spec) + validator.print_report() + + except Exception as e: + print(f"❌ Error validating API: {e}") + sys.exit(1) + + +def validate_from_spec(client, spec_id): + """Validate schema from Spec Hub.""" + print(f"=== Validating Spec: {spec_id} ===\n") + + try: + spec_data = client.get_spec(spec_id) + print(f"Spec: {spec_data.get('name', 'Unnamed')}\n") + + files = spec_data.get('files', []) + if not files: + print("❌ No files found in spec") + return + + # Get root file + root_file = next((f for f in files if f.get('root')), files[0]) + spec_content = root_file.get('content', '{}') + spec = json.loads(spec_content) + + # Validate + validator = SchemaValidator() + validator.validate_spec(spec) + validator.print_report() + + except Exception as e: + print(f"❌ Error validating spec: {e}") + sys.exit(1) + + +def validate_from_file(file_path): + """Validate schema from file.""" + print(f"=== Validating File: {file_path} ===\n") + + try: + with open(file_path, 'r') as f: + if file_path.endswith('.yaml') or file_path.endswith('.yml'): + try: + import yaml + spec = yaml.safe_load(f) + except ImportError: + print("❌ PyYAML not installed. Install with: pip install pyyaml") + sys.exit(1) + else: + spec = json.load(f) + + # Validate + validator = SchemaValidator() + validator.validate_spec(spec) + validator.print_report() + + except Exception as e: + print(f"❌ Error validating file: {e}") + sys.exit(1) + + +def main(): + """Main entry point.""" + + parser = argparse.ArgumentParser( + description='Validate OpenAPI specifications', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Validate API schema + python validate_schema.py --api + + # Validate Spec Hub specification + python validate_schema.py --spec + + # Validate local file + python validate_schema.py --file openapi.json + """ + ) + + parser.add_argument('--api', metavar='API_ID', help='Validate API schema') + parser.add_argument('--spec', metavar='SPEC_ID', help='Validate Spec Hub specification') + parser.add_argument('--file', metavar='FILE_PATH', help='Validate local OpenAPI file') + + args = parser.parse_args() + + if not any([args.api, args.spec, args.file]): + parser.print_help() + return + + # Execute validation + if args.api: + client = PostmanClient() + validate_from_api(client, args.api) + + elif args.spec: + client = PostmanClient() + validate_from_spec(client, args.spec) + + elif args.file: + if not os.path.exists(args.file): + print(f"❌ File not found: {args.file}") + sys.exit(1) + validate_from_file(args.file) + + +if __name__ == '__main__': + main() diff --git a/scripts/validate_setup.py b/scripts/validate_setup.py new file mode 100644 index 0000000..f5caeec --- /dev/null +++ b/scripts/validate_setup.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +""" +Validates Postman skill setup and provides helpful diagnostics. +Run this first or auto-run on initial skill load. + +Usage: + python validate_setup.py +""" + +import sys +import os + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from scripts.postman_client import PostmanClient +from scripts.config import PostmanConfig +from utils.formatters import format_error + + +def validate_setup(): + """Comprehensive setup validation with helpful output.""" + + print("🔍 Validating Postman Skill Setup...\n") + + # 1. Check API key + config = PostmanConfig() + + if not config.api_key: + print("❌ No API key found") + print("📋 Setup: Add POSTMAN_API_KEY to .env file") + print("🔗 Get key: https://web.postman.co/settings/me/api-keys\n") + return False + + print(f"✅ API Key found (starts with {config.api_key[:9]}...)\n") + + # 2. Test API connectivity + print("🌐 Testing Postman API connection...") + client = PostmanClient(config) + + try: + # Try to list collections to test connectivity + collections = client.list_collections() + print(f"✅ Successfully connected to Postman API\n") + except Exception as e: + print(f"❌ API Connection failed: {e}\n") + print("🔧 Troubleshooting:") + print(" • Check your API key is valid") + print(" • Verify network connectivity") + print(" • Check Postman status: https://status.postman.com/\n") + return False + + # 3. Check workspace configuration + if config.workspace_id: + print(f"📁 Checking configured workspace: {config.workspace_id}") + try: + workspace = client.get_workspace(config.workspace_id) + workspace_name = workspace.get('name', 'Unknown') + workspace_type = workspace.get('type', 'Unknown') + print(f"✅ Workspace: {workspace_name} (Type: {workspace_type})\n") + + # 4. Get collection count + print("📊 Analyzing workspace contents...") + collections = client.list_collections(config.workspace_id) + collection_count = len(collections) + print(f" Collections: {collection_count}") + + environments = client.list_environments(config.workspace_id) + environment_count = len(environments) + print(f" Environments: {environment_count}") + + try: + monitors = client.list_monitors(config.workspace_id) + monitor_count = len(monitors) + print(f" Monitors: {monitor_count}") + except: + print(f" Monitors: (unavailable)") + + try: + apis = client.list_apis(config.workspace_id) + api_count = len(apis) + print(f" APIs: {api_count}\n") + except: + print(f" APIs: (unavailable)\n") + + if collection_count == 0: + print("ℹ️ This workspace has no collections. You can:") + print(" • Create a new collection") + print(" • Switch to a different workspace (run: python scripts/list_workspaces.py)") + print(" • Import collections from another source\n") + + except Exception as e: + print(f"⚠️ Workspace issue: {e}") + print("💡 Try running: python scripts/list_workspaces.py\n") + else: + print("ℹ️ No workspace configured (POSTMAN_WORKSPACE_ID not set)") + print(" Will show all collections across all workspaces") + print("💡 To set a workspace, add to .env file:") + print(" POSTMAN_WORKSPACE_ID=your-workspace-id\n") + + try: + # Try to list collections without workspace filter + collections = client.list_collections() + collection_count = len(collections) + print(f"📊 Total collections (all workspaces): {collection_count}\n") + + if collection_count == 0: + print("ℹ️ No collections found in any workspace.") + print("💡 Run: python scripts/list_workspaces.py to see your workspaces\n") + except Exception as e: + print(f"⚠️ Could not list collections: {e}\n") + + print("✅ Setup validation complete!\n") + print("🚀 You're ready to use the Postman skill!") + print("\n💡 Helpful commands:") + print(" • List collections: python scripts/list_collections.py") + print(" • List workspaces: python scripts/list_workspaces.py") + print(" • List all resources: python scripts/list_collections.py --all\n") + + return True + + +def main(): + try: + success = validate_setup() + sys.exit(0 if success else 1) + except Exception as e: + print(format_error(e, "validating setup"), file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/skills/knowledge/agent-ready-apis.md b/skills/knowledge/agent-ready-apis.md new file mode 100644 index 0000000..b5970e9 --- /dev/null +++ b/skills/knowledge/agent-ready-apis.md @@ -0,0 +1,190 @@ +# Agent-Ready APIs: Making Your API Work for AI + +## What is an Agent-Ready API? + +An **agent-ready API** is one that AI agents (like Claude Code, Cursor, or autonomous coding assistants) can: + +1. **Discover** - Find and understand what the API does +2. **Understand** - Know how to construct valid requests +3. **Self-heal** - Recover from errors without human intervention + +When AI agents work with APIs, they don't have humans to interpret vague error messages or figure out undocumented behavior. The API itself must provide enough information for the agent to operate autonomously. + +## The 8 Pillars of Agent-Readiness + +Clara analyzes APIs across 8 pillars: + +### 1. Machine-Consumable Metadata +**Why it matters:** Agents need unique identifiers and descriptions to reference and understand operations. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| operationId present | Critical | Unique ID to reference each operation | +| operationId descriptive | High | Meaningful names like `listUsers`, not `op1` | +| summary present | High | One-line description of what it does | +| description present | Medium | Detailed context for complex operations | +| tags present | Medium | Grouping to discover related operations | + +**Fix example:** +```yaml +# Bad +paths: + /users: + get: + responses: ... + +# Good +paths: + /users: + get: + operationId: listUsers + summary: List all users + description: Returns paginated list of users with optional filtering + tags: [Users] +``` + +### 2. Rich Error Semantics +**Why it matters:** Agents must understand what went wrong and how to fix it. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| 4xx responses documented | Critical | Know what errors to expect | +| error schema defined | Critical | Consistent, parseable error format | +| error code field | Critical | Machine-readable code like `VALIDATION_ERROR` | +| error message field | High | Human-readable explanation | +| error details field | High | Field-level validation info | +| expected vs received | High | Exactly what was wrong | + +**Fix example:** +```json +// Bad error response +{"error": "Something went wrong"} + +// Good error response +{ + "code": "VALIDATION_ERROR", + "message": "Request validation failed", + "details": [ + { + "field": "email", + "code": "INVALID_FORMAT", + "message": "Must be a valid email address", + "expected": "email format (user@domain.com)", + "received": "not-an-email" + } + ] +} +``` + +### 3. Complete Introspection +**Why it matters:** Agents construct requests from schema definitions alone. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| parameter types defined | Critical | Can't guess data types | +| required parameters marked | Critical | Know what must be provided | +| enum values listed | Critical | All valid options for choice fields | +| request/response examples | High | Concrete templates to follow | +| format/pattern specified | Medium | Validation before sending | + +### 4. Consistent Naming +**Why it matters:** Predictable patterns let agents infer behavior. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| HTTP method semantics | Critical | GET reads, POST creates, etc. | +| consistent casing | High | camelCase, snake_case - pick one | +| RESTful paths | High | `/users/{id}` not `/getUser` | +| no action verbs in paths | Medium | REST patterns, not RPC | + +### 5. Predictable Behavior +**Why it matters:** Agents expect consistent response formats. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| response schema defined | Critical | Can parse what comes back | +| response matches schema | Critical | No surprises | +| consistent pagination | High | Same pattern across list endpoints | +| idempotency documented | High | Safe retry behavior | + +### 6. Comprehensive Documentation +**Why it matters:** Agents may fetch docs for context. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| authentication documented | Critical | How to authenticate | +| rate limits documented | High | How to throttle requests | +| docs match spec | High | Consistent information | + +### 7. Speed & Reliability +**Why it matters:** Agents have timeouts and retry logic. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| response time acceptable | High | Fast enough to not timeout | +| rate limit headers | Medium | Self-throttle appropriately | +| request ID header | Medium | Correlate for debugging | + +### 8. Discoverability +**Why it matters:** Agents need to find the API. + +| Check | Severity | What Agents Need | +|-------|----------|-----------------| +| spec publicly accessible | High | Can fetch latest spec | +| server URL in spec | High | Know where to send requests | + +## Scoring + +Clara scores APIs on a 0-100 scale: + +- **70%+** with no critical failures = **Agent Ready** +- **Below 70%** or critical failures = **Not Agent Ready** + +Severity weights: +- Critical: 4x weight +- High: 2x weight +- Medium: 1x weight +- Low: 0.5x weight + +## Common Issues and Fixes + +### Missing operationIds +**Problem:** Agents can't reference operations uniquely. +**Fix:** Add descriptive operationIds to every endpoint. + +### Generic error responses +**Problem:** Agent gets `{"error": "Bad request"}` and has no idea what to fix. +**Fix:** Use structured errors with field-level details. + +### Undocumented authentication +**Problem:** Agent doesn't know how to authenticate. +**Fix:** Add securitySchemes and security requirements. + +### Missing examples +**Problem:** Agent must construct requests from schemas alone. +**Fix:** Add realistic examples for request/response bodies. + +## Running Clara + +```bash +# Analyze a local spec +python scripts/analyze_agent_readiness.py ./openapi.yaml + +# Analyze from URL +python scripts/analyze_agent_readiness.py --url https://api.example.com/openapi.json + +# Analyze a Postman collection +python scripts/analyze_agent_readiness.py --collection + +# Get JSON output +python scripts/analyze_agent_readiness.py ./openapi.yaml --json + +# Save reports +python scripts/analyze_agent_readiness.py ./openapi.yaml -o report.json -o report.md +``` + +## Learn More + +- **Clara Repository:** https://github.com/postmanlabs/clara +- **OpenAPI Specification:** https://spec.openapis.org/oas/latest.html +- **API Design Best Practices:** https://learning.postman.com/docs/designing-and-developing-your-api/ diff --git a/utils/exceptions.py b/utils/exceptions.py new file mode 100644 index 0000000..87b1077 --- /dev/null +++ b/utils/exceptions.py @@ -0,0 +1,432 @@ +""" +Custom exception classes for Postman API errors. + +Provides specific exception types for different API error scenarios, +with helpful error messages and resolution guidance. +""" + + +class PostmanAPIError(Exception): + """ + Base exception for all Postman API errors. + + Attributes: + message: Error message + status_code: HTTP status code (if applicable) + response_data: Raw API response data (if available) + """ + + def __init__(self, message, status_code=None, response_data=None): + self.message = message + self.status_code = status_code + self.response_data = response_data + super().__init__(self.message) + + def __str__(self): + if self.status_code: + return f"[{self.status_code}] {self.message}" + return self.message + + +class AuthenticationError(PostmanAPIError): + """ + API key is invalid or missing. + + Raised when authentication fails (401 Unauthorized). + """ + + def __init__(self, message=None, status_code=401, response_data=None): + if not message: + message = ( + "Authentication failed. Your API key may be invalid or missing.\n\n" + "To get a valid API key:\n" + "1. Go to https://web.postman.co/settings/me/api-keys\n" + "2. Click 'Generate API Key'\n" + "3. Copy the key (starts with 'PMAK-')\n" + "4. Set it as environment variable:\n" + " export POSTMAN_API_KEY='your-key-here'\n\n" + "Or check that your current key hasn't expired." + ) + super().__init__(message, status_code, response_data) + + +class RateLimitError(PostmanAPIError): + """ + API rate limit exceeded. + + Raised when the client exceeds the Postman API rate limit (429 Too Many Requests). + + Attributes: + retry_after: Seconds to wait before retrying (from Retry-After header) + """ + + def __init__(self, message=None, retry_after=None, status_code=429, response_data=None): + self.retry_after = retry_after + + if not message: + message = "API rate limit exceeded." + if retry_after: + message += f" Please wait {retry_after} seconds before retrying." + else: + message += " Please wait a few moments before retrying." + + message += ( + "\n\nTips to avoid rate limiting:\n" + "- Reduce request frequency\n" + "- Implement exponential backoff\n" + "- Cache responses when possible\n" + "- Upgrade to a higher plan for increased limits" + ) + + super().__init__(message, status_code, response_data) + + +class ResourceNotFoundError(PostmanAPIError): + """ + Requested resource doesn't exist. + + Raised when the requested resource (collection, environment, API, etc.) + is not found (404 Not Found). + """ + + def __init__(self, resource_type=None, resource_id=None, message=None, + status_code=404, response_data=None): + self.resource_type = resource_type + self.resource_id = resource_id + + if not message: + if resource_type and resource_id: + message = f"{resource_type} with ID '{resource_id}' was not found." + else: + message = "The requested resource was not found." + + message += ( + "\n\nPossible reasons:\n" + "- The resource was deleted\n" + "- The ID is incorrect\n" + "- You don't have permission to access it\n" + "- The resource is in a different workspace" + ) + + super().__init__(message, status_code, response_data) + + +class ValidationError(PostmanAPIError): + """ + Request data failed validation. + + Raised when the API request contains invalid data (400 Bad Request). + """ + + def __init__(self, message=None, validation_errors=None, + status_code=400, response_data=None): + self.validation_errors = validation_errors or [] + + if not message: + message = "Request validation failed." + + if self.validation_errors: + message += "\n\nValidation errors:\n" + for error in self.validation_errors: + message += f" - {error}\n" + else: + message += " Please check your request data." + + super().__init__(message, status_code, response_data) + + +class PermissionError(PostmanAPIError): + """ + Insufficient permissions for this operation. + + Raised when the user doesn't have permission to perform the requested + operation (403 Forbidden). + """ + + def __init__(self, message=None, required_permission=None, + status_code=403, response_data=None): + self.required_permission = required_permission + + if not message: + message = "You don't have permission to perform this operation." + + if required_permission: + message += f"\n\nRequired permission: {required_permission}" + + message += ( + "\n\nPossible solutions:\n" + "- Check your workspace role/permissions\n" + "- Request access from the workspace admin\n" + "- Verify your API key has the necessary scopes\n" + "- Ensure you're using the correct workspace ID" + ) + + super().__init__(message, status_code, response_data) + + +class DeprecatedEndpointError(PostmanAPIError): + """ + Using a deprecated API endpoint. + + Raised when the skill detects use of a deprecated endpoint. + """ + + def __init__(self, endpoint, replacement=None, message=None, + status_code=None, response_data=None): + self.endpoint = endpoint + self.replacement = replacement + + if not message: + message = f"Endpoint '{endpoint}' is deprecated." + + if replacement: + message += f"\n\nPlease use '{replacement}' instead." + + message += ( + "\n\nDeprecated endpoints may be removed in future API versions. " + "Update your code to use the recommended replacement." + ) + + super().__init__(message, status_code, response_data) + + +class ConflictError(PostmanAPIError): + """ + Request conflicts with current state. + + Raised when the request conflicts with existing data (409 Conflict). + For example, trying to create a resource that already exists. + """ + + def __init__(self, message=None, status_code=409, response_data=None): + if not message: + message = ( + "Request conflicts with existing data.\n\n" + "Common causes:\n" + "- Resource with this name already exists\n" + "- Resource is in use and cannot be modified\n" + "- Concurrent modification conflict" + ) + + super().__init__(message, status_code, response_data) + + +class ServerError(PostmanAPIError): + """ + Postman API server error. + + Raised when the Postman API returns a server error (5xx status codes). + """ + + def __init__(self, message=None, status_code=500, response_data=None): + if not message: + message = ( + "Postman API server error. This is typically a temporary issue.\n\n" + "Recommended actions:\n" + "- Wait a few moments and retry\n" + "- Check Postman status page: https://status.postman.com\n" + "- If the issue persists, contact Postman support" + ) + + super().__init__(message, status_code, response_data) + + +class APIVersionError(PostmanAPIError): + """ + API version incompatibility. + + Raised when the detected API version doesn't support the requested feature. + """ + + def __init__(self, feature, required_version=None, detected_version=None, + message=None): + self.feature = feature + self.required_version = required_version + self.detected_version = detected_version + + if not message: + message = f"Feature '{feature}' is not available." + + if required_version: + message += f"\n\nRequired API version: {required_version}" + + if detected_version: + message += f"\nDetected API version: {detected_version}" + + message += ( + "\n\nThis feature requires a newer version of the Postman API. " + "Please upgrade to Postman v10+ for full compatibility." + ) + + super().__init__(message) + + +class NetworkError(PostmanAPIError): + """ + Network connectivity error. + + Raised when unable to connect to the Postman API due to network issues. + """ + + def __init__(self, message=None, original_error=None): + self.original_error = original_error + + if not message: + message = "Failed to connect to Postman API." + + if original_error: + error_str = str(original_error) + message += f"\n\nOriginal error: {error_str}" + + # Detect DNS resolution errors (Claude Desktop network allowlist) + if "NameResolutionError" in error_str or "Failed to resolve" in error_str or "Temporary failure in name resolution" in error_str: + message += ( + "\n\n⚠️ DNS RESOLUTION ERROR - NETWORK RESTRICTION DETECTED" + "\n\n**This skill cannot run in Claude Desktop** due to network security restrictions." + "\n\nClaude Desktop only allows connections to these domains:" + "\n- api.anthropic.com" + "\n- github.com / pypi.org / npmjs.com" + "\n- archive.ubuntu.com / security.ubuntu.com" + "\n\n'api.getpostman.com' is NOT in the allowlist." + "\n\nSOLUTIONS:" + "\n1. Use the Claude API with code execution (fully supported)" + "\n2. Run the scripts directly on your local machine:" + "\n python scripts/list_collections.py" + "\n3. Contact Anthropic to request adding api.getpostman.com to the allowlist" + "\n\nSee SKILL.md for details on environment compatibility." + ) + return super().__init__(message) + + # Detect proxy-related errors + if "ProxyError" in error_str or "Tunnel connection failed" in error_str or "403 Forbidden" in error_str: + message += ( + "\n\n⚠️ PROXY ERROR DETECTED" + "\n\nThe connection is being blocked by a proxy server." + "\n\nSOLUTION: The skill now bypasses proxies by default." + "\n\nIf you're still seeing this error:" + "\n1. Ensure you have the latest version of the skill" + "\n2. The .env file should NOT contain POSTMAN_USE_PROXY=true" + "\n3. If in a corporate environment, you may need to:" + "\n - Disable Claude Desktop's proxy settings" + "\n - Or configure your proxy to allow api.getpostman.com" + "\n - Or use a direct internet connection" + ) + return super().__init__(message) + + message += ( + "\n\nPossible causes:\n" + "- No internet connection\n" + "- Proxy server blocking requests (see proxy error above)\n" + "- Firewall blocking requests\n" + "- Postman API is down (check https://status.postman.com)\n" + "- DNS resolution issues" + ) + + super().__init__(message) + + +class TimeoutError(PostmanAPIError): + """ + Request timeout error. + + Raised when the API request times out. + """ + + def __init__(self, message=None, timeout_seconds=None): + self.timeout_seconds = timeout_seconds + + if not message: + message = "API request timed out." + + if timeout_seconds: + message += f"\n\nTimeout limit: {timeout_seconds} seconds" + + message += ( + "\n\nPossible solutions:\n" + "- Increase timeout value in configuration\n" + "- Check network connectivity\n" + "- Try again later (API may be slow)\n" + "- For large collections, consider pagination" + ) + + super().__init__(message) + + +# Helper function to create appropriate exception from response +def create_exception_from_response(response, default_message="API request failed"): + """ + Create an appropriate exception based on HTTP status code and response data. + + Args: + response: requests.Response object + default_message: Default message if no specific error found + + Returns: + An instance of the appropriate exception class + """ + status_code = response.status_code + + # Try to parse error response + try: + error_data = response.json() + error_info = error_data.get('error', {}) + message = error_info.get('message', default_message) + error_name = error_info.get('name', '') + except: + error_data = None + message = default_message + error_name = '' + + # Map status codes to exceptions + if status_code == 401: + return AuthenticationError(message, status_code, error_data) + + elif status_code == 403: + return PermissionError(message, status_code=status_code, response_data=error_data) + + elif status_code == 404: + return ResourceNotFoundError( + message=message, + status_code=status_code, + response_data=error_data + ) + + elif status_code == 400: + # Try to extract validation errors + validation_errors = [] + if error_data and 'error' in error_data: + details = error_data['error'].get('details', []) + validation_errors = [d.get('message', str(d)) for d in details] + + return ValidationError( + message=message, + validation_errors=validation_errors, + status_code=status_code, + response_data=error_data + ) + + elif status_code == 409: + return ConflictError(message, status_code, error_data) + + elif status_code == 429: + # Try to get retry-after header + retry_after = response.headers.get('Retry-After') + if retry_after: + try: + retry_after = int(retry_after) + except: + retry_after = None + + return RateLimitError( + message=message, + retry_after=retry_after, + status_code=status_code, + response_data=error_data + ) + + elif status_code >= 500: + return ServerError(message, status_code, error_data) + + else: + # Generic error for other status codes + return PostmanAPIError(message, status_code, error_data) diff --git a/utils/formatters.py b/utils/formatters.py new file mode 100644 index 0000000..35696e1 --- /dev/null +++ b/utils/formatters.py @@ -0,0 +1,310 @@ +""" +Output formatters for Postman API responses. +Makes API data human-readable for Claude and users. +""" + +from datetime import datetime + + +def format_collections_list(collections): + """ + Format a list of collections into readable output. + + Args: + collections: List of collection objects from Postman API + + Returns: + Formatted string representation + """ + if not collections: + return "No collections found in this workspace." + + output = [] + output.append(f"Found {len(collections)} collection(s):") + output.append("") + + for idx, collection in enumerate(collections, 1): + name = collection.get('name', 'Unnamed Collection') + uid = collection.get('uid', 'N/A') + output.append(f"{idx}. {name}") + output.append(f" UID: {uid}") + if 'owner' in collection: + output.append(f" Owner: {collection['owner']}") + output.append("") + + return "\n".join(output) + + +def format_environments_list(environments): + """ + Format a list of environments into readable output. + + Args: + environments: List of environment objects from Postman API + + Returns: + Formatted string representation + """ + if not environments: + return "No environments found in this workspace." + + output = [] + output.append(f"Found {len(environments)} environment(s):") + output.append("") + + for idx, env in enumerate(environments, 1): + name = env.get('name', 'Unnamed Environment') + uid = env.get('uid', 'N/A') + output.append(f"{idx}. {name}") + output.append(f" UID: {uid}") + output.append("") + + return "\n".join(output) + + +def format_monitors_list(monitors): + """ + Format a list of monitors into readable output. + + Args: + monitors: List of monitor objects from Postman API + + Returns: + Formatted string representation + """ + if not monitors: + return "No monitors found in this workspace." + + output = [] + output.append(f"Found {len(monitors)} monitor(s):") + output.append("") + + for idx, monitor in enumerate(monitors, 1): + name = monitor.get('name', 'Unnamed Monitor') + uid = monitor.get('uid', 'N/A') + output.append(f"{idx}. {name}") + output.append(f" UID: {uid}") + if 'collection' in monitor: + output.append(f" Collection: {monitor['collection']}") + output.append("") + + return "\n".join(output) + + +def format_apis_list(apis): + """ + Format a list of APIs into readable output. + + Args: + apis: List of API objects from Postman API + + Returns: + Formatted string representation + """ + if not apis: + return "No APIs found in this workspace." + + output = [] + output.append(f"Found {len(apis)} API(s):") + output.append("") + + for idx, api in enumerate(apis, 1): + name = api.get('name', 'Unnamed API') + api_id = api.get('id', 'N/A') + output.append(f"{idx}. {name}") + output.append(f" ID: {api_id}") + if 'summary' in api: + output.append(f" Summary: {api['summary']}") + output.append("") + + return "\n".join(output) + + +def format_workspace_summary(collections, environments, monitors, apis): + """ + Format a complete workspace summary. + + Args: + collections: List of collections + environments: List of environments + monitors: List of monitors + apis: List of APIs + + Returns: + Formatted string representation + """ + output = [] + output.append("=== Workspace Summary ===") + output.append("") + output.append(f"Collections: {len(collections)}") + output.append(f"Environments: {len(environments)}") + output.append(f"Monitors: {len(monitors)}") + output.append(f"APIs: {len(apis)}") + output.append("") + output.append("Use specific list commands to see details:") + output.append("- List collections: python /skills/postman-skill/scripts/list_collections.py") + output.append("- List environments: python /skills/postman-skill/scripts/list_collections.py --environments") + output.append("- List monitors: python /skills/postman-skill/scripts/list_collections.py --monitors") + output.append("- List APIs: python /skills/postman-skill/scripts/list_collections.py --apis") + output.append("") + + return "\n".join(output) + + +def format_error(error, context=""): + """ + Format an error message with helpful guidance. + + Args: + error: Exception or error message + context: Additional context about what was being attempted + + Returns: + Formatted error message + """ + output = [] + output.append("=== Error ===") + if context: + output.append(f"Context: {context}") + output.append(f"Error: {str(error)}") + output.append("") + output.append("Troubleshooting steps:") + output.append("1. Check that POSTMAN_API_KEY is set correctly") + output.append("2. Verify your API key has the necessary permissions") + output.append("3. Check if the resource ID/UID is correct") + output.append("4. Review the Postman API status: https://status.postman.com/") + output.append("") + + return "\n".join(output) + + +def format_monitor(monitor, verbose=False): + """ + Format a single monitor for display. + + Args: + monitor: Monitor object from API + verbose: Include detailed information + + Returns: + Formatted string representation + """ + output = [] + + # Status and name + status = "✓ Active" if monitor.get('active', False) else "✗ Inactive" + name = monitor.get('name', 'Unnamed Monitor') + output.append(f"{status} {name}") + output.append(f" ID: {monitor.get('id')}") + output.append(f" UID: {monitor.get('uid')}") + + if verbose: + if monitor.get('collectionUid'): + output.append(f" Collection: {monitor.get('collectionUid')}") + if monitor.get('environmentUid'): + output.append(f" Environment: {monitor.get('environmentUid')}") + if monitor.get('schedule'): + schedule = monitor.get('schedule', {}) + output.append(f" Schedule: {schedule.get('cron', 'Not set')}") + if monitor.get('lastRun'): + last_run = monitor.get('lastRun', {}) + output.append(f" Last Run: {last_run.get('finishedAt', 'Never')}") + if last_run.get('status'): + output.append(f" Last Status: {last_run.get('status')}") + + return "\n".join(output) + + +def format_monitor_run(run, index=None): + """ + Format a monitor run for display. + + Args: + run: Monitor run object from API + index: Optional index number for the run + + Returns: + Formatted string representation + """ + from datetime import datetime + + output = [] + + status = run.get('status', 'unknown') + status_icon = "✓" if status == 'success' else "✗" + + # Run number + if index is not None: + output.append(f"{index}. {status_icon} {status.upper()}") + else: + output.append(f"{status_icon} {status.upper()}") + + # Timestamps + started = run.get('startedAt', 'N/A') + finished = run.get('finishedAt', 'N/A') + output.append(f" Started: {started}") + + # Calculate duration + duration = "N/A" + if started != 'N/A' and finished != 'N/A': + try: + start_dt = datetime.fromisoformat(started.replace('Z', '+00:00')) + finish_dt = datetime.fromisoformat(finished.replace('Z', '+00:00')) + duration_seconds = (finish_dt - start_dt).total_seconds() + duration = f"{duration_seconds:.1f}s" + except: + pass + + output.append(f" Duration: {duration}") + + # Stats + stats = run.get('stats', {}) + if stats: + requests = stats.get('requests', {}) + assertions = stats.get('assertions', {}) + output.append(f" Requests: {requests.get('total', 0)} total, {requests.get('failed', 0)} failed") + output.append(f" Assertions: {assertions.get('total', 0)} total, {assertions.get('failed', 0)} failed") + + return "\n".join(output) + + +def format_monitor_runs_summary(runs): + """ + Format a summary of monitor runs. + + Args: + runs: List of monitor run objects + + Returns: + Formatted string representation + """ + if not runs: + return "No run history available." + + output = [] + + # Calculate statistics + total_runs = len(runs) + successful_runs = sum(1 for run in runs if run.get('status') == 'success') + failed_runs = total_runs - successful_runs + + output.append(f"Monitor Run History (Last {total_runs} runs)") + output.append("=" * 80) + output.append("") + + # Summary + output.append("Summary:") + output.append(f" Total Runs: {total_runs}") + output.append(f" Successful: {successful_runs} ({successful_runs/total_runs*100:.1f}%)") + output.append(f" Failed: {failed_runs} ({failed_runs/total_runs*100:.1f}%)") + output.append("") + + # Recent runs + output.append("Recent Runs:") + output.append("-" * 80) + + for i, run in enumerate(runs, 1): + output.append(format_monitor_run(run, i)) + output.append("") + + return "\n".join(output) diff --git a/utils/retry_handler.py b/utils/retry_handler.py new file mode 100644 index 0000000..b255feb --- /dev/null +++ b/utils/retry_handler.py @@ -0,0 +1,79 @@ +""" +Retry handler with exponential backoff for Postman API calls. +""" + +import time +import sys + + +class RetryHandler: + """Handles retry logic with exponential backoff for API calls""" + + def __init__(self, max_retries=3, base_delay=1): + self.max_retries = max_retries + self.base_delay = base_delay + + def should_retry(self, status_code): + """Determine if a request should be retried based on status code""" + # Retry on rate limits and server errors + return status_code in [429, 500, 502, 503, 504] + + def get_delay(self, attempt): + """Calculate exponential backoff delay""" + return self.base_delay * (2 ** attempt) + + def execute(self, func, *args, **kwargs): + """ + Execute a function with retry logic. + + Args: + func: Function to execute (should return a requests.Response) + *args, **kwargs: Arguments to pass to the function + + Returns: + Response object from successful request + + Raises: + Exception: If all retries are exhausted + """ + last_exception = None + + for attempt in range(self.max_retries): + try: + response = func(*args, **kwargs) + + # Check if we should retry + if self.should_retry(response.status_code): + if attempt < self.max_retries - 1: + delay = self.get_delay(attempt) + print( + f"Rate limited or server error (status {response.status_code}). " + f"Retrying in {delay}s... (attempt {attempt + 1}/{self.max_retries})", + file=sys.stderr + ) + time.sleep(delay) + continue + else: + raise Exception( + f"Max retries ({self.max_retries}) exceeded. " + f"Last status code: {response.status_code}" + ) + + # Success or non-retryable error + return response + + except Exception as e: + last_exception = e + if attempt < self.max_retries - 1: + delay = self.get_delay(attempt) + print( + f"Request failed: {e}. " + f"Retrying in {delay}s... (attempt {attempt + 1}/{self.max_retries})", + file=sys.stderr + ) + time.sleep(delay) + else: + raise + + # Should not reach here, but raise last exception if we do + raise last_exception