From eb546578ba87b101eed0d2aeef0f45210ad84db6 Mon Sep 17 00:00:00 2001 From: Michael Dailey Date: Tue, 5 May 2026 11:53:42 -0500 Subject: [PATCH 1/2] PDX-0: fix(mcp): rename tool names from dot-notation to underscore for Copilot RCA: All 41 MCP tool names used dot-notation (provar.xxx.yyy) which violates the [a-z0-9_-] constraint enforced by GitHub Copilot and other MCP clients. Fix: Rename to underscore form across source, tests, scripts, and docs; restore 4 correctness fixes from pre-commit review; bump version to 1.5.0-beta.15. Co-Authored-By: Claude Sonnet 4.6 --- docs/PROVAR_TEST_STEP_REFERENCE.md | 4 +- docs/mcp-pilot-guide.md | 44 +- docs/mcp.md | 276 ++++---- docs/provar-mcp-public-docs.md | 82 +-- docs/university-of-provar-mcp-course.md | 68 +- messages/sf.provar.auth.clear.md | 2 +- messages/sf.provar.mcp.start.md | 113 ++-- package.json | 2 +- scripts/mcp-smoke.cjs | 166 ++--- server.json | 4 +- src/mcp/prompts/loopPrompts.ts | 36 +- src/mcp/prompts/migrationPrompts.ts | 20 +- src/mcp/server.ts | 2 +- src/mcp/tools/antTools.ts | 14 +- src/mcp/tools/automationTools.ts | 62 +- src/mcp/tools/connectionTools.ts | 12 +- src/mcp/tools/defectTools.ts | 54 +- src/mcp/tools/hierarchyValidate.ts | 591 ++++++++++++++---- src/mcp/tools/nitroXTools.ts | 184 +++--- src/mcp/tools/pageObjectGenerate.ts | 10 +- src/mcp/tools/pageObjectValidate.ts | 192 ++++-- src/mcp/tools/projectInspect.ts | 126 ++-- src/mcp/tools/projectValidateFromPath.ts | 67 +- src/mcp/tools/propertiesTools.ts | 42 +- src/mcp/tools/qualityHubApiTools.ts | 20 +- src/mcp/tools/qualityHubTools.ts | 52 +- src/mcp/tools/rcaTools.ts | 20 +- src/mcp/tools/testCaseGenerate.ts | 32 +- src/mcp/tools/testCaseStepTools.ts | 8 +- src/mcp/tools/testCaseValidate.ts | 45 +- src/mcp/tools/testPlanTools.ts | 244 ++++++-- src/mcp/tools/testPlanValidate.ts | 79 ++- src/mcp/tools/testSuiteValidate.ts | 49 +- src/services/projectValidation.ts | 161 +++-- test/unit/mcp/antTools.test.ts | 36 +- test/unit/mcp/automationTools.test.ts | 142 ++--- test/unit/mcp/connectionTools.test.ts | 30 +- test/unit/mcp/defectTools.test.ts | 212 ++++--- test/unit/mcp/loopPrompts.test.ts | 20 +- test/unit/mcp/migrationPrompts.test.ts | 6 +- test/unit/mcp/nitroXTools.test.ts | 125 ++-- test/unit/mcp/pageObjectGenerate.test.ts | 56 +- test/unit/mcp/projectValidateFromPath.test.ts | 45 +- test/unit/mcp/propertiesTools.test.ts | 82 +-- test/unit/mcp/qualityHubApiTools.test.ts | 24 +- test/unit/mcp/qualityHubTools.test.ts | 82 +-- test/unit/mcp/rcaTools.test.ts | 74 +-- test/unit/mcp/testCaseGenerate.test.ts | 106 ++-- test/unit/mcp/testCaseStepTools.test.ts | 28 +- test/unit/mcp/testPlanTools.test.ts | 90 +-- test/unit/mcp/testPlanValidate.test.ts | 75 ++- test/unit/mcp/testSuiteValidate.test.ts | 114 ++-- 52 files changed, 2526 insertions(+), 1704 deletions(-) diff --git a/docs/PROVAR_TEST_STEP_REFERENCE.md b/docs/PROVAR_TEST_STEP_REFERENCE.md index 0762ad03..b5a89da3 100644 --- a/docs/PROVAR_TEST_STEP_REFERENCE.md +++ b/docs/PROVAR_TEST_STEP_REFERENCE.md @@ -1,8 +1,8 @@ # Provar Test Step Reference > **Source of truth** for AI-assisted test generation in the provardx-cli / Quality Hub MCP toolchain. -> All examples are sourced from the SalesCloud corpus. The `provar.qualityhub.examples.retrieve` tool returns real -> examples for additional grounding; `provar.testcase.validate` enforces all rules documented here. +> All examples are sourced from the SalesCloud corpus. The `provar_qualityhub_examples_retrieve` tool returns real +> examples for additional grounding; `provar_testcase_validate` enforces all rules documented here. --- diff --git a/docs/mcp-pilot-guide.md b/docs/mcp-pilot-guide.md index bc7283ef..7da00fc1 100644 --- a/docs/mcp-pilot-guide.md +++ b/docs/mcp-pilot-guide.md @@ -189,9 +189,9 @@ Restart Cursor after saving. The Provar tools will appear under **Settings → M ## Testing the Connection -Before testing with a real project, verify the server is reachable using the `provardx.ping` tool. In your AI client, ask: +Before testing with a real project, verify the server is reachable using the `provardx_ping` tool. In your AI client, ask: -> "Call provardx.ping with message 'hello'" +> "Call provardx_ping with message 'hello'" Expected response: @@ -217,7 +217,7 @@ Work through these in order — they build on each other. Prompt your AI assistant: -> "Use provar.project.inspect on `/path/to/my/project` and tell me what you find — how many test cases, any coverage gaps?" +> "Use provar_project_inspect on `/path/to/my/project` and tell me what you find — how many test cases, any coverage gaps?" **What to look for:** @@ -296,7 +296,7 @@ Pre-requisite: `sf org login web -a MyQHOrg` then `sf provar quality-hub connect **What to look for:** -- The AI chaining: `provar.qualityhub.connect` → `provar.qualityhub.testrun` → `provar.qualityhub.testrun.report` (looped) +- The AI chaining: `provar_qualityhub_connect` → `provar_qualityhub_testrun` → `provar_qualityhub_testrun_report` (looped) - The run ID extracted from the `testrun` response and passed to `testrun.report` - Final result status reported back @@ -304,7 +304,7 @@ Pre-requisite: `sf org login web -a MyQHOrg` then `sf provar quality-hub connect ### Scenario 8: Quality Hub API Validation -**Goal:** Confirm that `provar.testcase.validate` upgrades from local rules to the full Quality Hub API ruleset when an API key is present. +**Goal:** Confirm that `provar_testcase_validate` upgrades from local rules to the full Quality Hub API ruleset when an API key is present. **Setup:** Run `sf provar auth login` and complete the browser login, then confirm with `sf provar auth status`. @@ -330,13 +330,13 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba > "Discover all NitroX page objects in my Provar project at `/path/to/my/project` and tell me how many there are." -**What to look for:** The AI calls `provar.nitrox.discover`, finds the `nitroX/` directory, and reports the file count. +**What to look for:** The AI calls `provar_nitrox_discover`, finds the `nitroX/` directory, and reports the file count. **Step 2 — Read examples for context:** > "Read up to 5 NitroX page objects from my project so you understand the structure." -**What to look for:** The AI calls `provar.nitrox.read` and summarises the patterns it sees (tagName, qualifier, element types, interactions). +**What to look for:** The AI calls `provar_nitrox_read` and summarises the patterns it sees (tagName, qualifier, element types, interactions). **Step 3 — Generate a new component:** @@ -344,7 +344,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba **What to look for:** -- The AI calls `provar.nitrox.generate` with `dry_run: true` first, then writes after your confirmation +- The AI calls `provar_nitrox_generate` with `dry_run: true` first, then writes after your confirmation - Generated JSON has valid UUIDs for all `componentId` fields - `tagName`, `parameters`, and `elements` match your description @@ -354,7 +354,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba **What to look for:** -- `provar.nitrox.validate` returns `valid: true` and `score: 100` +- `provar_nitrox_validate` returns `valid: true` and `score: 100` - Any issues are listed with rule IDs (NX001–NX010) and suggestions **Step 5 — Apply a targeted edit:** @@ -363,7 +363,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba **What to look for:** -- The AI calls `provar.nitrox.patch` with `dry_run: true` to show the change +- The AI calls `provar_nitrox_patch` with `dry_run: true` to show the change - After confirmation, calls again with `dry_run: false` - `validate_after: true` (the default) confirms the patch didn't break the schema @@ -383,9 +383,9 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba **What to look for:** - _(If SF MCP connected)_ `getObjectSchema` called for `Opportunity` — AI uses real field names (e.g. `Amount`, `CloseDate`, `StageName`) in the corpus query -- `provar.qualityhub.examples.retrieve` called with the enriched user story as query, returning `examples` array with `similarity_score` values and XML content -- The AI using the retrieved XML as few-shot context when calling `provar.testcase.generate` -- `provar.testcase.validate` confirming `quality_score >= 70` +- `provar_qualityhub_examples_retrieve` called with the enriched user story as query, returning `examples` array with `similarity_score` values and XML content +- The AI using the retrieved XML as few-shot context when calling `provar_testcase_generate` +- `provar_testcase_validate` confirming `quality_score >= 70` - If no API key: tool returns `{ examples: [], warning: "..." }` with `isError: false` and the AI continues without grounding **To test graceful degrade:** Run `sf provar auth clear` and repeat. Verify `examples: []` with a warning and generation still proceeds. @@ -394,7 +394,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba ### Scenario 10: Corpus Retrieval — No Key / Rate Limit -**Goal:** Confirm `provar.qualityhub.examples.retrieve` never hard-errors on API failure. +**Goal:** Confirm `provar_qualityhub_examples_retrieve` never hard-errors on API failure. > "Fetch 3 corpus examples for: Create a Contact in Salesforce." @@ -431,10 +431,10 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba **What to look for:** -- The AI calls `provar.qualityhub.examples.retrieve` first to retrieve grounding examples (if an API key is configured) +- The AI calls `provar_qualityhub_examples_retrieve` first to retrieve grounding examples (if an API key is configured) - A valid Provar XML test case is generated, using `` steps that correspond to the source test's actions - Salesforce login/navigation is omitted from the generated XML (Provar handles this via Connection Manager) -- `provar.testcase.validate` is called on the result and returns `is_valid: true` +- `provar_testcase_validate` is called on the result and returns `is_valid: true` - Any steps that could not be mapped are left as `` comments in the XML --- @@ -447,7 +447,7 @@ NitroX is Provar's Hybrid Model for locators — it maps Salesforce component-ba - Validates all incoming paths against those roots before any file operation - Blocks path traversal attempts (`../`) with a `PATH_TRAVERSAL` error - Resolves symlinks via `fs.realpathSync` before the containment check — a symlink inside an allowed directory pointing outside it cannot bypass the restriction -- Validates all path-type input fields (e.g. `provar_home`, `project_path`, `results_path` in `provar.ant.generate`) before any file operation, not just the output path +- Validates all path-type input fields (e.g. `provar_home`, `project_path`, `results_path` in `provar_ant_generate`) before any file operation, not just the output path - Invokes `sf` CLI subprocesses for Quality Hub and Automation tools — these use the SF CLI's existing credential store (`~/.sf/credentials.json`), which the MCP server does not read directly ### License validation @@ -489,7 +489,7 @@ The MCP server uses **stdio transport** exclusively. Communication travels over **Salesforce org credentials** — the Quality Hub and Automation tools invoke `sf` subprocesses. Salesforce org credentials are managed entirely by the Salesforce CLI and stored in its own credential store (`~/.sf/`). The Provar MCP server never reads, parses, or transmits those credentials. -**Provar API key** — the `provar.testcase.validate` tool optionally reads a `pv_k_` API key to enable Quality Hub API validation. The key is stored at `~/.provar/credentials.json` (written by `sf provar auth login`) or read from the `PROVAR_API_KEY` environment variable. The key is sent to the Provar Quality Hub API only when a validation request is made — it is never logged or written anywhere other than `~/.provar/credentials.json`. +**Provar API key** — the `provar_testcase_validate` tool optionally reads a `pv_k_` API key to enable Quality Hub API validation. The key is stored at `~/.provar/credentials.json` (written by `sf provar auth login`) or read from the `PROVAR_API_KEY` environment variable. The key is sent to the Provar Quality Hub API only when a validation request is made — it is never logged or written anywhere other than `~/.provar/credentials.json`. ### Path policy enforcement @@ -507,7 +507,7 @@ This check runs before every file read and write, including all path-type input All tool invocations are logged to **stderr** with a unique `requestId` per call. The log format is structured JSON: ``` -[INFO] provar.testcase.validate {"requestId":"req-a1b2c3","file_path":"/workspace/..."} +[INFO] provar_testcase_validate {"requestId":"req-a1b2c3","file_path":"/workspace/..."} ``` You can capture stderr from the MCP server process to maintain an audit trail of all AI agent tool calls. @@ -518,10 +518,10 @@ You can capture stderr from the MCP server process to maintain an audit trail of | Limitation | Details | | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| No async operations | Quality Hub test run tools use synchronous SF CLI invocations. Long-running runs should use `provar.qualityhub.testrun.report` in a polling loop. | -| SF CLI must be in PATH | The `provar.qualityhub.*` and `provar.automation.*` tools spawn `sf` as a subprocess. If `sf` is not on `PATH`, you get `SF_NOT_FOUND`. | +| No async operations | Quality Hub test run tools use synchronous SF CLI invocations. Long-running runs should use `provar_qualityhub_testrun_report` in a polling loop. | +| SF CLI must be in PATH | The `provar_qualityhub_*` and `provar_automation_*` tools spawn `sf` as a subprocess. If `sf` is not on `PATH`, you get `SF_NOT_FOUND`. | | No Windows native paths in `--allowed-paths` via JSON | Use forward slashes in MCP client config JSON, even on Windows. The server normalises paths internally. | -| Page Object validation is static | The `provar.pageobject.validate` tool parses Java source statically. It does not compile or resolve imports. | +| Page Object validation is static | The `provar_pageobject_validate` tool parses Java source statically. It does not compile or resolve imports. | | Quality scores are local | The MCP quality scores are computed locally using the same formula as the Quality Hub Lambda. They are not submitted to or stored by any Provar service unless you call the Quality Hub API separately. | --- diff --git a/docs/mcp.md b/docs/mcp.md index 3366c616..5b365438 100644 --- a/docs/mcp.md +++ b/docs/mcp.md @@ -16,48 +16,48 @@ The Provar DX CLI ships with a built-in **Model Context Protocol (MCP) server** - [Other MCP-compatible clients](#other-mcp-compatible-clients) - [Path security](#path-security) - [Available tools](#available-tools) - - [provardx.ping](#provardxping) - - [provar.project.inspect](#provarprojectinspect) - - [provar.connection.list](#provarconnectionlist) - - [provar.pageobject.generate](#provarpageobjectgenerate) - - [provar.pageobject.validate](#provarpageobjectvalidate) - - [provar.testcase.generate](#provartestcasegenerate) - - [provar.testcase.validate](#provartestcasevalidate) - - [provar.testsuite.validate](#provartestsuitevalidate) - - [provar.testplan.validate](#provartestplanvalidate) - - [provar.project.validate](#provarprojectvalidate) - - [provar.properties.generate](#provarpropertiesgenerate) - - [provar.properties.read](#provarpropertiesread) - - [provar.properties.set](#provarpropertiesset) - - [provar.properties.validate](#provarpropertiesvalidate) - - [provar.ant.generate](#provarantgenerate) - - [provar.ant.validate](#provarantvalidate) - - [provar.qualityhub.connect](#provarqualityhubconnect) - - [provar.qualityhub.display](#provarqualityhubdisplay) - - [provar.qualityhub.testrun](#provarqualityhubtestrun) - - [provar.qualityhub.testrun.report](#provarqualityhubtestrunreport) - - [provar.qualityhub.testrun.abort](#provarqualityhubtestrunabort) - - [provar.qualityhub.testcase.retrieve](#provarqualityhubtestcaseretrieve) - - [provar.automation.setup](#provarautomationsetup) - - [provar.automation.testrun](#provarautomationtestrun) - - [provar.automation.compile](#provarautomationcompile) - - [provar.automation.config.load](#provarautomationconfigload) - - [provar.automation.metadata.download](#provarautomationmetadatadownload) - - [provar.qualityhub.defect.create](#provarqualityhubdefectcreate) - - [provar.testrun.report.locate](#provartestrunreportlocate) - - [provar.testrun.rca](#provartestrunrca) - - [provar.testcase.step.edit](#provartestcasestepedit) - - [provar.testplan.add-instance](#provartestplanadinstance) - - [provar.testplan.create-suite](#provartestplancreatetsuite) - - [provar.testplan.remove-instance](#provartestplanremoveinstance) + - [provardx_ping](#provardx_ping) + - [provar_project_inspect](#provar_project_inspect) + - [provar_connection_list](#provar_connection_list) + - [provar_pageobject_generate](#provar_pageobject_generate) + - [provar_pageobject_validate](#provar_pageobject_validate) + - [provar_testcase_generate](#provar_testcase_generate) + - [provar_testcase_validate](#provar_testcase_validate) + - [provar_testsuite_validate](#provar_testsuite_validate) + - [provar_testplan_validate](#provar_testplan_validate) + - [provar_project_validate](#provar_project_validate) + - [provar_properties_generate](#provar_properties_generate) + - [provar_properties_read](#provar_properties_read) + - [provar_properties_set](#provar_properties_set) + - [provar_properties_validate](#provar_properties_validate) + - [provar_ant_generate](#provar_ant_generate) + - [provar_ant_validate](#provar_ant_validate) + - [provar_qualityhub_connect](#provar_qualityhub_connect) + - [provar_qualityhub_display](#provar_qualityhub_display) + - [provar_qualityhub_testrun](#provar_qualityhub_testrun) + - [provar_qualityhub_testrun_report](#provar_qualityhub_testrun_report) + - [provar_qualityhub_testrun_abort](#provar_qualityhub_testrun_abort) + - [provar_qualityhub_testcase_retrieve](#provar_qualityhub_testcase_retrieve) + - [provar_automation_setup](#provar_automation_setup) + - [provar_automation_testrun](#provar_automation_testrun) + - [provar_automation_compile](#provar_automation_compile) + - [provar_automation_config_load](#provar_automation_config_load) + - [provar_automation_metadata_download](#provar_automation_metadata_download) + - [provar_qualityhub_defect_create](#provar_qualityhub_defect_create) + - [provar_testrun_report_locate](#provar_testrun_report_locate) + - [provar_testrun_rca](#provar_testrun_rca) + - [provar_testcase_step_edit](#provar_testcase_step_edit) + - [provar_testplan_add-instance](#provar_testplan_add-instance) + - [provar_testplan_create-suite](#provar_testplan_create-suite) + - [provar_testplan_remove-instance](#provar_testplan_remove-instance) - [NitroX — Hybrid Model page objects](#nitrox--hybrid-model-page-objects) - - [provar.nitrox.discover](#provarnitroxdiscover) - - [provar.nitrox.read](#provarnitroxread) - - [provar.nitrox.validate](#provarnitroxvalidate) - - [provar.nitrox.generate](#provarnitroxgenerate) - - [provar.nitrox.patch](#provarnitroxpatch) + - [provar_nitrox_discover](#provar_nitrox_discover) + - [provar_nitrox_read](#provar_nitrox_read) + - [provar_nitrox_validate](#provar_nitrox_validate) + - [provar_nitrox_generate](#provar_nitrox_generate) + - [provar_nitrox_patch](#provar_nitrox_patch) - [Quality Hub API tools](#quality-hub-api-tools) - - [provar.qualityhub.examples.retrieve](#provarqualityhubexamplesretrieve) + - [provar_qualityhub_examples_retrieve](#provar_qualityhub_examples_retrieve) - [Org metadata via Salesforce Hosted MCP](#org-metadata-via-salesforce-hosted-mcp) - [MCP Prompts](#mcp-prompts) - [Migration prompts](#migration-prompts) @@ -121,7 +121,7 @@ claude mcp add provar -s user -- sf provar mcp start --allowed-paths /path/to/yo > **Windows (Claude Desktop):** If `sf` is not found, use `sf.cmd` as the command instead. -**Verify it's working** — ask your AI assistant: _"Call provardx.ping with message hello"_. You should get `{ "message": "hello" }` back. +**Verify it's working** — ask your AI assistant: _"Call provardx_ping with message hello"_. You should get `{ "pong": "hello", "ts": "...", "server": "provar-mcp@..." }` back. --- @@ -400,14 +400,14 @@ Replace `/path/to/your/provar/project` with the actual root of your Provar Autom ## Authentication — Quality Hub API -The `provar.testcase.validate` tool can run in two modes depending on whether an API key is configured. +The `provar_testcase_validate` tool can run in two modes depending on whether an API key is configured. | Mode | When | What you get | | ------------------- | ------------------ | --------------------------------------------------- | | **Quality Hub API** | API key configured | 170+ rules, quality score, tier-specific thresholds | | **Local only** | No key | Structural/schema rules only | -The `validation_source` field in every `provar.testcase.validate` response tells you which mode fired: +The `validation_source` field in every `provar_testcase_validate` response tells you which mode fired: | Value | Meaning | | ---------------- | ------------------------------------------------------------------------------------------------- | @@ -469,13 +469,13 @@ sf provar auth clear All file-system operations (read, write, generate) are restricted to the paths supplied via `--allowed-paths`. Any attempt to access a path outside those roots is rejected with a `PATH_NOT_ALLOWED` error. Path traversal sequences (`../`) are blocked with a `PATH_TRAVERSAL` error. -Symlinks are resolved via `fs.realpathSync` before the containment check, so a symlink inside an allowed directory that points outside it cannot bypass the restriction. For tools that accept multiple path inputs (such as `provar.ant.generate`'s `provar_home`, `project_path`, and `results_path`), all path fields are validated before any file operation occurs — not just the output path. +Symlinks are resolved via `fs.realpathSync` before the containment check, so a symlink inside an allowed directory that points outside it cannot bypass the restriction. For tools that accept multiple path inputs (such as `provar_ant_generate`'s `provar_home`, `project_path`, and `results_path`), all path fields are validated before any file operation occurs — not just the output path. --- ## Available tools -### `provardx.ping` +### `provardx_ping` A lightweight sanity-check tool. Echoes back the message you send. Useful for verifying the server is running and the client is connected. @@ -485,11 +485,17 @@ A lightweight sanity-check tool. Echoes back the message you send. Useful for ve | --------- | ------ | -------- | --------------------- | | `message` | string | no | Any text to echo back | -**Output** — `{ message: string }` +**Output** + +| Field | Type | Description | +| -------- | ------ | ------------------------------------------ | +| `pong` | string | The echoed message | +| `ts` | string | ISO-8601 timestamp | +| `server` | string | Server name and version (`provar-mcp@...`) | --- -### `provar.project.inspect` +### `provar_project_inspect` Inspects a Provar project folder and returns a structured inventory of all key project artefacts. Compiled `bin/` directories are automatically excluded. @@ -531,11 +537,11 @@ Provar test plans live in `plans/`. Each plan is a directory containing a `.plan --- -### `provar.connection.list` +### `provar_connection_list` Lists all connections and named environments defined in the project's `.testproject` file. Use this **before** generating test cases or page objects to discover the exact connection names to use. -**Prerequisite:** the project must have a `.testproject` file. Run `provar.project.validate` first if unsure of the project root. +**Prerequisite:** the project must have a `.testproject` file. Run `provar_project_validate` first if unsure of the project root. **Security:** only connection names, types, and URLs are returned — credential values from `.secrets` are never included in the output. @@ -567,12 +573,12 @@ Connection `type` values: `Salesforce`, `Web`, `Quality Hub`, `Web Service`, `Da | Code | Meaning | | --------------------------- | ------------------------------------------------------------------------- | -| `CONNECTION_FILE_NOT_FOUND` | No `.testproject` at the given path. Run `provar.project.validate` first. | +| `CONNECTION_FILE_NOT_FOUND` | No `.testproject` at the given path. Run `provar_project_validate` first. | | `PATH_NOT_ALLOWED` | `project_path` is outside the server's `--allowed-paths` | --- -### `provar.pageobject.generate` +### `provar_pageobject_generate` Generates a Java Page Object skeleton with the correct `@Page` or `@SalesforcePage` annotation and `@FindBy` field stubs. Optionally generates an `ILoginPage` implementation stub for non-SF SSO connections. @@ -599,7 +605,7 @@ When `sso_class` is provided the response includes `sso_stub_source` (the `ILogi --- -### `provar.pageobject.validate` +### `provar_pageobject_validate` Validates a Java Page Object source file against 30+ quality rules (structural correctness, annotation completeness, locator best practices). @@ -628,7 +634,7 @@ Validates a Java Page Object source file against 30+ quality rules (structural c --- -### `provar.testcase.generate` +### `provar_testcase_generate` Generates an XML test case skeleton with UUID v4 guids and sequential `testItemId` values. @@ -639,13 +645,13 @@ Generates an XML test case skeleton with UUID v4 guids and sequential `testItemI **Argument XML conventions** (automatically applied by the generator): -| Argument key / value pattern | Emitted XML class | API context | -| ------------------------------------ | ----------------------------- | ----------------------------------- | -| `target` key | `class="uiTarget"` | UiWithScreen, UiWithRow | -| `locator` key | `class="uiLocator"` | UiDoAction, UiAssert | -| Value matches `{VarName}` or `{A.B}` | `class="variable"` + `` | Any step | -| SetValues attributes | `class="valueList"/` | SetValues only | -| All other values | `class="value" valueClass="string"` | Any step | +| Argument key / value pattern | Emitted XML class | API context | +| ------------------------------------ | ----------------------------------- | ----------------------- | +| `target` key | `class="uiTarget"` | UiWithScreen, UiWithRow | +| `locator` key | `class="uiLocator"` | UiDoAction, UiAssert | +| Value matches `{VarName}` or `{A.B}` | `class="variable"` + `` | Any step | +| SetValues attributes | `class="valueList"/` | SetValues only | +| All other values | `class="value" valueClass="string"` | Any step | AssertValues uses **flat** argument structure (`expectedValue`, `actualValue`, `comparisonType`) — not the `valueList`/namedValues format. @@ -676,7 +682,7 @@ AssertValues uses **flat** argument structure (`expectedValue`, `actualValue`, ` --- -### `provar.testcase.validate` +### `provar_testcase_validate` Validates an XML test case for schema correctness (validity score) and best practices (quality score). The quality score uses the exact same weighted-deduction formula as the Provar Quality Hub Lambda service, guaranteeing score parity between the MCP and API surfaces. @@ -719,7 +725,7 @@ Validates an XML test case for schema correctness (validity score) and best prac --- -### `provar.testsuite.validate` +### `provar_testsuite_validate` Validates a Provar test suite — checks for empty suites, duplicate names (within the suite), oversized suites (>75 test cases), and naming convention consistency. Recursively validates child suites and individual test case XML. @@ -739,7 +745,7 @@ Validates a Provar test suite — checks for empty suites, duplicate names (with --- -### `provar.testplan.validate` +### `provar_testplan_validate` Validates a Provar test plan — checks for empty plans, duplicate suite names, oversized plans (>20 suites), plan-completeness metadata, and naming consistency. Recursively validates suites and test cases. @@ -773,7 +779,7 @@ Validates a Provar test plan — checks for empty plans, duplicate suite names, --- -### `provar.project.validate` +### `provar_project_validate` Validates a Provar project directly from its directory on disk. Reads the plan/suite/testinstance hierarchy from `plans/`, resolves test case XML from `tests/`, extracts project context (connections, environments, secrets password) from the `.testproject` file, and runs the full cross-cutting rule set. @@ -858,11 +864,11 @@ Test case tools accept either field name for XML content: | `xml_content` | Provar MCP (original) | Full XML content of the test case | | `xml` | Quality Hub batch API | API-compatible alias; takes precedence when both are supplied | -Both names are accepted in all four validation tools (`provar.testcase.validate`, `provar.testsuite.validate`, `provar.testplan.validate`, `provar.project.validate`). This makes it straightforward to share request payloads between the REST API and the MCP surface without conversion. +Both names are accepted in all four validation tools (`provar_testcase_validate`, `provar_testsuite_validate`, `provar_testplan_validate`, `provar_project_validate`). This makes it straightforward to share request payloads between the REST API and the MCP surface without conversion. --- -### `provar.properties.generate` +### `provar_properties_generate` Generates a `provardx-properties.json` file from the standard template. Placeholder values (`${...}`) are pre-filled where optional overrides are not provided. @@ -883,11 +889,11 @@ Generates a `provardx-properties.json` file from the standard template. Placehol --- -### `provar.properties.read` +### `provar_properties_read` -Reads and parses a `provardx-properties.json` file directly from disk. Use this to inspect the current configuration before making changes with `provar.properties.set`. +Reads and parses a `provardx-properties.json` file directly from disk. Use this to inspect the current configuration before making changes with `provar_properties_set`. -If the file you read differs on critical fields (`provarHome`, `projectPath`, `resultsPath`) from the file currently registered via `provar.automation.config.load`, the response will include a `details.warning` listing the divergent keys. This catches the common case where the agent reads one file but test runs use another. +If the file you read differs on critical fields (`provarHome`, `projectPath`, `resultsPath`) from the file currently registered via `provar_automation_config_load`, the response will include a `details.warning` listing the divergent keys. This catches the common case where the agent reads one file but test runs use another. **Input** @@ -901,7 +907,7 @@ If the file you read differs on critical fields (`provarHome`, `projectPath`, `r --- -### `provar.properties.set` +### `provar_properties_set` Updates one or more fields in a `provardx-properties.json` file. Only the supplied fields are changed. Object fields (`environment`, `metadata`) are deep-merged; array fields (`testCase`, `testPlan`, `connectionOverride`) replace the existing value entirely. @@ -937,7 +943,7 @@ Updates one or more fields in a `provardx-properties.json` file. Only the suppli --- -### `provar.properties.validate` +### `provar_properties_validate` Validates a `provardx-properties.json` file against the ProvarDX schema. Checks required fields, valid enum values, and warns about unfilled `${PLACEHOLDER}` values. Accepts either a file path or inline JSON content. @@ -961,7 +967,7 @@ Validates a `provardx-properties.json` file against the ProvarDX schema. Checks --- -### `provar.ant.generate` +### `provar_ant_generate` Generates a Provar ANT `build.xml` file from structured inputs. Produces the standard `` skeleton with `` declarations, ``, and ``. Supports targeting tests by folder, test plan, or individual `.testcase` files. @@ -1022,7 +1028,7 @@ Examples: --- -### `provar.ant.validate` +### `provar_ant_validate` Validates a Provar ANT `build.xml` for structural correctness. Accepts either a file path or inline XML content. @@ -1076,7 +1082,7 @@ Validates a Provar ANT `build.xml` for structural correctness. Accepts either a --- -### `provar.qualityhub.connect` +### `provar_qualityhub_connect` Connects to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` via the Salesforce CLI. @@ -1095,7 +1101,7 @@ Connects to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` vi --- -### `provar.qualityhub.display` +### `provar_qualityhub_display` Displays information about the currently connected Quality Hub org. Invokes `sf provar quality-hub display`. @@ -1110,9 +1116,9 @@ Displays information about the currently connected Quality Hub org. Invokes `sf --- -### `provar.qualityhub.testrun` +### `provar_qualityhub_testrun` -Triggers a Quality Hub test run. Invokes `sf provar quality-hub test run`. Returns the test run ID which can be passed to `provar.qualityhub.testrun.report` to poll for results. +Triggers a Quality Hub test run. Invokes `sf provar quality-hub test run`. Returns the test run ID which can be passed to `provar_qualityhub_testrun_report` to poll for results. > **Wildcard warning:** if any value in `flags` contains `*` or `?`, the tool adds `details.warning` explaining that QH plan-level reporting will be skipped. Execution still proceeds — the warning is non-blocking. @@ -1129,7 +1135,7 @@ Triggers a Quality Hub test run. Invokes `sf provar quality-hub test run`. Retur --- -### `provar.qualityhub.testrun.report` +### `provar_qualityhub_testrun_report` Polls the status of an in-progress or completed Quality Hub test run. Invokes `sf provar quality-hub test run report`. @@ -1138,7 +1144,7 @@ Polls the status of an in-progress or completed Quality Hub test run. Invokes `s | Parameter | Type | Required | Description | | ------------ | -------- | -------- | ------------------------------------------------------------- | | `target_org` | string | yes | SF CLI org alias or username | -| `run_id` | string | yes | Test run ID returned by `provar.qualityhub.testrun` | +| `run_id` | string | yes | Test run ID returned by `provar_qualityhub_testrun` | | `flags` | string[] | no | Additional raw CLI flags (e.g. `["--result-format", "json"]`) | **Output** — `{ requestId, exitCode, stdout, stderr }` @@ -1147,7 +1153,7 @@ Polls the status of an in-progress or completed Quality Hub test run. Invokes `s --- -### `provar.qualityhub.testrun.abort` +### `provar_qualityhub_testrun_abort` Aborts an in-progress Quality Hub test run. Invokes `sf provar quality-hub test run abort`. @@ -1165,7 +1171,7 @@ Aborts an in-progress Quality Hub test run. Invokes `sf provar quality-hub test --- -### `provar.qualityhub.testcase.retrieve` +### `provar_qualityhub_testcase_retrieve` Retrieves test cases from Quality Hub by user story or metadata component. Invokes `sf provar quality-hub testcase retrieve`. @@ -1182,7 +1188,7 @@ Retrieves test cases from Quality Hub by user story or metadata component. Invok --- -### `provar.automation.setup` +### `provar_automation_setup` Detects existing Provar Automation installations on the machine. If found, returns the install path so you can set `provarHome` in your properties file — without downloading anything. If no installation is found, invokes `sf provar automation setup` to download and install the binaries. @@ -1210,15 +1216,15 @@ Checks in this order: | `version` | string \| null | Detected or installed version | | `message` | string | Human-readable summary | -After a successful setup, update `provarHome` in your `provardx-properties.json` using `provar.properties.set`. +After a successful setup, update `provarHome` in your `provardx-properties.json` using `provar_properties_set`. **Error codes:** `AUTOMATION_SETUP_FAILED`, `SF_NOT_FOUND` --- -### `provar.automation.testrun` +### `provar_automation_testrun` -Triggers a Provar Automation test run using the currently loaded properties file. Invokes `sf provar automation test run`. This is the **LOCAL Execute** step of the AI loop — for grid-managed runs use `provar.qualityhub.testrun`. +Triggers a Provar Automation test run using the currently loaded properties file. Invokes `sf provar automation test run`. This is the **LOCAL Execute** step of the AI loop — for grid-managed runs use `provar_qualityhub_testrun`. **Input** @@ -1245,7 +1251,7 @@ Each entry represents one test case. `status` is `"pass"`, `"fail"`, or `"skip"` --- -### `provar.automation.compile` +### `provar_automation_compile` Compiles PageObject and PageControl Java source files. Invokes `sf provar automation project compile`. Run this after generating or modifying Page Objects, before triggering a test run. @@ -1261,7 +1267,7 @@ Compiles PageObject and PageControl Java source files. Invokes `sf provar automa --- -### `provar.automation.metadata.download` +### `provar_automation_metadata_download` Downloads Salesforce metadata into the Provar project cache. Invokes `sf provar automation metadata download`. Run this when you need up-to-date org metadata for Page Object generation or test execution. @@ -1277,7 +1283,7 @@ Downloads Salesforce metadata into the Provar project cache. Invokes `sf provar --- -### `provar.qualityhub.defect.create` +### `provar_qualityhub_defect_create` Creates `Defect__c` records in Quality Hub for every failed test execution in a given test run. For each failure, creates a `Defect__c` (with description, step, browser, environment, and tester populated), then links it via `Test_Case_Defect__c` and `Test_Execution_Defect__c` junction records. If Jira or ADO sync is configured in the Quality Hub org, defects automatically sync to those systems. @@ -1285,7 +1291,7 @@ Creates `Defect__c` records in Quality Hub for every failed test execution in a | Parameter | Type | Required | Description | | -------------- | -------- | -------- | ------------------------------------------------------------------------------------------------------- | -| `run_id` | string | yes | Test run `Tracking_Id__c` returned by `provar.qualityhub.testrun` | +| `run_id` | string | yes | Test run `Tracking_Id__c` returned by `provar_qualityhub_testrun` | | `target_org` | string | yes | SF CLI org alias or username for the Quality Hub org | | `failed_tests` | string[] | no | Optional filter — list of `Test_Case__c` ID substrings to restrict defect creation to specific failures | @@ -1301,9 +1307,9 @@ Creates `Defect__c` records in Quality Hub for every failed test execution in a --- -### `provar.automation.config.load` +### `provar_automation_config_load` -Register a `provardx-properties.json` file as the active Provar configuration. **Required before `provar.automation.compile` or `provar.automation.testrun`** — without this step those commands fail with `MISSING_FILE`. +Register a `provardx-properties.json` file as the active Provar configuration. **Required before `provar_automation_compile` or `provar_automation_testrun`** — without this step those commands fail with `MISSING_FILE`. Invokes `sf provar automation config load --properties-file `, writing the path to `~/.sf/config.json` under `PROVARDX_PROPERTIES_FILE_PATH`. @@ -1321,7 +1327,7 @@ Invokes `sf provar automation config load --properties-file `, writing the --- -### `provar.testrun.report.locate` +### `provar_testrun_report_locate` Resolve artifact paths for a completed test run without parsing them. Returns the absolute paths to `JUnit.xml`, `Index.html`, per-test HTML reports, and validation JSONs. @@ -1348,7 +1354,7 @@ Uses a 4-step resolution algorithm (explicit path → `~/.sf/config.json` → `p --- -### `provar.testrun.rca` +### `provar_testrun_rca` Analyse a completed test run and return a structured Root Cause Analysis report. Reads `JUnit.xml`, classifies each failure into a root cause category, extracts page object and operation names, and flags pre-existing failures across prior Increment runs. @@ -1406,7 +1412,7 @@ Salesforce DML error categories (`SALESFORCE_*`) represent test-data failures --- -### `provar.testcase.step.edit` +### `provar_testcase_step_edit` Atomically add or remove a single step (``) in a Provar XML test case file. Writes a `.bak` backup before mutating, runs structural validation after the edit, and automatically restores the backup if validation fails. @@ -1463,7 +1469,7 @@ Prerequisites: the test case file must exist and be valid XML with a ` --- -### `provar.testplan.add-instance` +### `provar_testplan_add-instance` Wire a test case into a plan suite by writing a `.testinstance` file. Handles UUID generation, `testCaseId` extraction from the testcase file's `registryId`/`id`/`guid` attribute, and path normalisation (always forward slashes). @@ -1487,7 +1493,7 @@ Wire a test case into a plan suite by writing a `.testinstance` file. Handles UU --- -### `provar.testplan.create-suite` +### `provar_testplan_create-suite` Create a new test suite directory with a `.planitem` file inside an existing plan. The plan directory and its `.planitem` must already exist. @@ -1510,7 +1516,7 @@ Create a new test suite directory with a `.planitem` file inside an existing pla --- -### `provar.testplan.remove-instance` +### `provar_testplan_remove-instance` Remove a `.testinstance` file from a plan suite. Path is validated to stay within the project root. @@ -1535,13 +1541,13 @@ Remove a `.testinstance` file from a plan suite. Path is validated to stay withi NitroX is Provar's **Hybrid Model** for locators. Instead of hand-written Java Page Objects it uses component-based `.po.json` files that map UI elements for any Salesforce component type: LWC, Screen Flow, Industry / OmniStudio, Experience Cloud, and standard HTML5. These files live in `nitroX/` directories inside your Provar project. -The five `provar.nitrox.*` tools let an AI agent discover existing NitroX page objects, read them as training context, validate new ones against the schema, generate fresh components from a description, and apply surgical edits via JSON merge-patch. +The five `provar_nitrox_*` tools let an AI agent discover existing NitroX page objects, read them as training context, validate new ones against the schema, generate fresh components from a description, and apply surgical edits via JSON merge-patch. > **Note:** NitroX page objects are read and written directly from disk using the standard file-system path policy (`--allowed-paths`). No `sf` subprocess is involved. --- -### `provar.nitrox.discover` +### `provar_nitrox_discover` Scan a set of directories for Provar projects (identified by a `.testproject` marker file) and inventory each project's `nitroX/` and `nitroXPackages/` directories. Useful as a first step before reading or generating files. @@ -1573,7 +1579,7 @@ Directories named `node_modules`, `.git`, or any hidden directory (`.`-prefixed) --- -### `provar.nitrox.read` +### `provar_nitrox_read` Read one or more NitroX `.po.json` files and return their parsed content for context or training. Provide specific `file_paths` or a `project_path` to read all files from a project's `nitroX/` directory. @@ -1595,7 +1601,7 @@ Path policy is enforced per-file. A missing or unparseable file returns an `erro --- -### `provar.nitrox.validate` +### `provar_nitrox_validate` Validate a NitroX `.po.json` (Hybrid Model component page object) against the FACT schema rules. Returns a quality score (0–100) and a list of issues. @@ -1633,7 +1639,7 @@ Score formula: `100 − (20 × errors) − (5 × warnings) − (1 × infos)`, mi --- -### `provar.nitrox.generate` +### `provar_nitrox_generate` Generate a new NitroX `.po.json` from a component description. All `componentId` fields are assigned fresh UUIDs. Returns the JSON content; writes to disk only when `dry_run=false`. @@ -1667,7 +1673,7 @@ Applicable to any component type: LWC, Screen Flow, Industry Components, Experie --- -### `provar.nitrox.patch` +### `provar_nitrox_patch` Apply a [JSON merge-patch (RFC 7396)](https://www.rfc-editor.org/rfc/rfc7396) to an existing `.po.json` file. Reads the file, merges the patch, optionally validates the result, and writes back. Use `dry_run=true` (default) to preview changes before committing. @@ -1698,9 +1704,9 @@ When `validate_after=true` and the merged content has errors, the write is block These tools call the Quality Hub HTTP API directly (no `sf` subprocess). They require a Provar API key set via `sf provar auth login`. -### `provar.qualityhub.examples.retrieve` +### `provar_qualityhub_examples_retrieve` -Retrieve N similar Provar test case examples from the Quality Hub corpus (1000+ tests indexed in Bedrock). Use this **before** `provar.testcase.generate` to provide few-shot grounding examples. +Retrieve N similar Provar test case examples from the Quality Hub corpus (1000+ tests indexed in Bedrock). Use this **before** `provar_testcase_generate` to provide few-shot grounding examples. If retrieval fails for any reason (no key, invalid key, rate limit, network error), the tool returns `{ examples: [], count: 0, warning: "..." }` with `isError: false` so the generation workflow can continue without grounding. It **never** hard-errors on API failure. @@ -1737,7 +1743,7 @@ Each element in `examples`: ### Org metadata via Salesforce Hosted MCP -Provar MCP does not include a built-in org introspection tool. Instead, connect the **Salesforce Hosted MCP Server** (`platform/sobject-reads`) alongside Provar MCP and call `getObjectSchema` to retrieve sObject field metadata. Pass the result as additional context in your `provar.qualityhub.examples.retrieve` query. +Provar MCP does not include a built-in org introspection tool. Instead, connect the **Salesforce Hosted MCP Server** (`platform/sobject-reads`) alongside Provar MCP and call `getObjectSchema` to retrieve sObject field metadata. Pass the result as additional context in your `provar_qualityhub_examples_retrieve` query. | Endpoint | URL | | ---------- | --------------------------------------------------------------------------- | @@ -1746,7 +1752,7 @@ Provar MCP does not include a built-in org introspection tool. Instead, connect The SF Hosted MCP uses per-user OAuth 2.0, respects field-level security and sharing rules automatically, and is maintained by Salesforce. See [Salesforce Hosted MCP Server docs](https://developer.salesforce.com/docs/platform/hosted-mcp-servers/guide/sobject-reads.html) for setup. -**Fallback (no SF MCP configured):** append key field API names directly to your `provar.qualityhub.examples.retrieve` query. Example: `"... [Opportunity: CloseDate (Date), Amount (Currency), StageName (Picklist), CustomField__c (Text)]"` +**Fallback (no SF MCP configured):** append key field API names directly to your `provar_qualityhub_examples_retrieve` query. Example: `"... [Opportunity: CloseDate (Date), Amount (Currency), StageName (Picklist), CustomField__c (Text)]"` --- @@ -1760,10 +1766,10 @@ The Provar MCP server registers **7 MCP prompts** that pre-wire the tool chain i These prompts convert tests from other frameworks into Provar XML. Each prompt: -1. Calls `provar.qualityhub.examples.retrieve` with keywords from the source test to load few-shot grounding examples. +1. Calls `provar_qualityhub_examples_retrieve` with keywords from the source test to load few-shot grounding examples. 2. Generates a Provar XML test case using those examples as structural context. 3. Writes the file to the target project. -4. Calls `provar.testcase.validate` and iterates until the output is clean. +4. Calls `provar_testcase_validate` and iterates until the output is clean. --- @@ -1839,7 +1845,7 @@ Fix a failing Provar test case using RCA output. Reads the current XML, interpre | Parameter | Type | Required | Description | | -------------- | ------ | -------- | ---------------------------------------------------------------------- | | `testcasePath` | string | yes | Absolute path to the `.testcase` file to fix. | -| `rcaOutput` | string | yes | The failure message or RCA output from `provar.testrun.rca`. | +| `rcaOutput` | string | yes | The failure message or RCA output from `provar_testrun_rca`. | | `projectPath` | string | no | Absolute path to the Provar project root (used for context if needed). | --- @@ -1867,7 +1873,7 @@ Analyse coverage gaps for a Salesforce object or feature area. Inspects the proj | ------------- | ------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `objectName` | string | yes | Primary Salesforce object to check coverage for (e.g. `"Opportunity"`, `"Lead"`). | | `projectPath` | string | yes | Absolute path to the Provar project root. | -| `targetOrg` | string | no | Salesforce org alias or username. When provided, existing Quality Hub test cases for this object are retrieved via `provar.qualityhub.testcase.retrieve` before the coverage gap analysis. | +| `targetOrg` | string | no | Salesforce org alias or username. When provided, existing Quality Hub test cases for this object are retrieved via `provar_qualityhub_testcase_retrieve` before the coverage gap analysis. | --- @@ -1915,40 +1921,40 @@ The resource content is the same as `docs/PROVAR_TEST_STEP_REFERENCE.md` in this The automation tools are designed to support an **AI-driven fix loop**: an agent can iteratively improve test quality without leaving the chat session. ``` -provar.project.inspect → understand what's in the project, find uncovered tests +provar_project_inspect → understand what's in the project, find uncovered tests [SF MCP] getObjectSchema → retrieve org field metadata (Salesforce Hosted MCP — optional but recommended) -provar.qualityhub.examples.retrieve → fetch few-shot grounding examples from the corpus -provar.testcase.validate → find quality issues in a test case -provar.testcase.generate → regenerate or fix the test case XML -provar.testplan.add-instance → wire a new/fixed test case into a plan suite -provar.testplan.create-suite → create a suite to organise new tests -provar.ant.generate → generate (or regenerate) the ANT build.xml for CI -provar.ant.validate → validate an existing build.xml before committing -provar.automation.config.load → register the properties file (required before compile/testrun) -provar.automation.compile → compile Page Objects after changes -provar.automation.testrun → execute tests locally against the real org -provar.testrun.rca → diagnose failures: classify root cause, extract page objects -provar.project.validate → re-score the full project +provar_qualityhub_examples_retrieve → fetch few-shot grounding examples from the corpus +provar_testcase_validate → find quality issues in a test case +provar_testcase_generate → regenerate or fix the test case XML +provar_testplan_add-instance → wire a new/fixed test case into a plan suite +provar_testplan_create-suite → create a suite to organise new tests +provar_ant_generate → generate (or regenerate) the ANT build.xml for CI +provar_ant_validate → validate an existing build.xml before committing +provar_automation_config_load → register the properties file (required before compile/testrun) +provar_automation_compile → compile Page Objects after changes +provar_automation_testrun → execute tests locally against the real org +provar_testrun_rca → diagnose failures: classify root cause, extract page objects +provar_project_validate → re-score the full project ``` Combined with Quality Hub (grid-managed runs): ``` -provar.qualityhub.connect → authenticate -provar.qualityhub.testrun → start a Quality Hub-managed grid run -provar.qualityhub.testrun.report → poll until complete -provar.qualityhub.testcase.retrieve → pull test cases scoped to a user story -provar.qualityhub.defect.create → file defects for failures automatically +provar_qualityhub_connect → authenticate +provar_qualityhub_testrun → start a Quality Hub-managed grid run +provar_qualityhub_testrun_report → poll until complete +provar_qualityhub_testcase_retrieve → pull test cases scoped to a user story +provar_qualityhub_defect_create → file defects for failures automatically ``` NitroX (Hybrid Model) component page object loop: ``` -provar.nitrox.discover → find all NitroX projects and .po.json files on the machine -provar.nitrox.read → load existing page objects as AI training context -provar.nitrox.validate → check a generated or edited .po.json for schema issues -provar.nitrox.generate → create a new .po.json from a component description -provar.nitrox.patch → apply targeted edits to an existing .po.json (RFC 7396) +provar_nitrox_discover → find all NitroX projects and .po.json files on the machine +provar_nitrox_read → load existing page objects as AI training context +provar_nitrox_validate → check a generated or edited .po.json for schema issues +provar_nitrox_generate → create a new .po.json from a component description +provar_nitrox_patch → apply targeted edits to an existing .po.json (RFC 7396) ``` -> **Note:** `provar.automation.*` and `provar.qualityhub.*` tools invoke `sf` CLI subprocesses. The Salesforce CLI must be installed and in `PATH`, or pass `sf_path` pointing to the executable directly (e.g. `~/.nvm/versions/node/v22.0.0/bin/sf`). A missing `sf` binary returns the error code `SF_NOT_FOUND` with an installation hint. +> **Note:** `provar_automation_*` and `provar_qualityhub_*` tools invoke `sf` CLI subprocesses. The Salesforce CLI must be installed and in `PATH`, or pass `sf_path` pointing to the executable directly (e.g. `~/.nvm/versions/node/v22.0.0/bin/sf`). A missing `sf` binary returns the error code `SF_NOT_FOUND` with an installation hint. diff --git a/docs/provar-mcp-public-docs.md b/docs/provar-mcp-public-docs.md index 5e82bf11..18726fb7 100644 --- a/docs/provar-mcp-public-docs.md +++ b/docs/provar-mcp-public-docs.md @@ -187,7 +187,7 @@ Restart Cursor after saving. The Provar tools will appear under **Settings → M Once your AI client is configured, ask it: -> "Call provardx.ping with message 'hello'" +> "Call provardx_ping with message 'hello'" Expected response: @@ -207,7 +207,7 @@ Get an instant inventory of your Provar project — file counts, coverage gaps, **Prompt:** -> "Use provar.project.inspect on my project at `/workspace/MyProvarProject` and tell me what you find — how many test cases are there, and which ones aren't covered by any test plan?" +> "Use provar_project_inspect on my project at `/workspace/MyProvarProject` and tell me what you find — how many test cases are there, and which ones aren't covered by any test plan?" **What you get back:** @@ -299,10 +299,10 @@ Ask the AI to run your local Provar Automation test suite and report results. **The AI will chain:** -1. `provar.automation.config.load` — registers the properties file -2. `provar.automation.compile` — compiles Page Objects -3. `provar.automation.testrun` — executes the test run -4. `provar.testrun.report.locate` — finds the JUnit/HTML report paths +1. `provar_automation_config_load` — registers the properties file +2. `provar_automation_compile` — compiles Page Objects +3. `provar_automation_testrun` — executes the test run +4. `provar_testrun_report_locate` — finds the JUnit/HTML report paths --- @@ -323,9 +323,9 @@ sf provar quality-hub connect -o MyQHOrg **The AI will chain:** -1. `provar.qualityhub.connect` — connects to the org -2. `provar.qualityhub.testrun` — triggers the run -3. `provar.qualityhub.testrun.report` — polls status in a loop +1. `provar_qualityhub_connect` — connects to the org +2. `provar_qualityhub_testrun` — triggers the run +3. `provar_qualityhub_testrun_report` — polls status in a loop 4. Reports final pass/fail status and a summary of results --- @@ -360,38 +360,38 @@ Turn a failed test execution directly into a Quality Hub defect, without leaving | Tool | What it does | | ------------------------------------- | ---------------------------------------------------------------- | -| `provardx.ping` | Sanity check — verifies the server is running | -| `provar.project.inspect` | Inventory project artefacts and surface coverage gaps | -| `provar.project.validate` | Full project quality validation from disk | -| `provar.pageobject.generate` | Generate a Java Page Object skeleton | -| `provar.pageobject.validate` | Validate Page Object quality (30+ rules) | -| `provar.testcase.generate` | Generate an XML test case skeleton | -| `provar.testcase.validate` | Validate test case XML (schema + best-practices scores) | -| `provar.testsuite.validate` | Validate a test suite hierarchy | -| `provar.testplan.validate` | Validate a test plan with metadata completeness checks | -| `provar.testplan.add-instance` | Wire a test case into a plan suite | -| `provar.testplan.create-suite` | Create a new test suite inside a plan | -| `provar.testplan.remove-instance` | Remove a test instance from a plan suite | -| `provar.properties.generate` | Generate a `provardx-properties.json` from the standard template | -| `provar.properties.read` | Read and parse a `provardx-properties.json` | -| `provar.properties.set` | Update fields in a `provardx-properties.json` | -| `provar.properties.validate` | Validate a `provardx-properties.json` against the schema | -| `provar.ant.generate` | Generate an ANT `build.xml` for CI/CD pipeline execution | -| `provar.ant.validate` | Validate an ANT `build.xml` | -| `provar.automation.setup` | Detect or download/install Provar Automation binaries | -| `provar.automation.config.load` | Register a properties file as the active config | -| `provar.automation.compile` | Compile Page Objects after changes | -| `provar.automation.metadata.download` | Download Salesforce metadata into the project | -| `provar.automation.testrun` | Trigger a local Provar Automation test run | -| `provar.qualityhub.connect` | Connect to a Quality Hub org | -| `provar.qualityhub.display` | Display connected Quality Hub org info | -| `provar.qualityhub.testrun` | Trigger a Quality Hub managed test run | -| `provar.qualityhub.testrun.report` | Poll test run status | -| `provar.qualityhub.testrun.abort` | Abort an in-progress test run | -| `provar.qualityhub.testcase.retrieve` | Retrieve test cases by user story or component | -| `provar.qualityhub.defect.create` | Create Quality Hub defects from failed executions | -| `provar.testrun.report.locate` | Resolve JUnit/HTML report paths after a run | -| `provar.testrun.rca` | Classify failures and detect regressions | +| `provardx_ping` | Sanity check — verifies the server is running | +| `provar_project_inspect` | Inventory project artefacts and surface coverage gaps | +| `provar_project_validate` | Full project quality validation from disk | +| `provar_pageobject_generate` | Generate a Java Page Object skeleton | +| `provar_pageobject_validate` | Validate Page Object quality (30+ rules) | +| `provar_testcase_generate` | Generate an XML test case skeleton | +| `provar_testcase_validate` | Validate test case XML (schema + best-practices scores) | +| `provar_testsuite_validate` | Validate a test suite hierarchy | +| `provar_testplan_validate` | Validate a test plan with metadata completeness checks | +| `provar_testplan_add-instance` | Wire a test case into a plan suite | +| `provar_testplan_create-suite` | Create a new test suite inside a plan | +| `provar_testplan_remove-instance` | Remove a test instance from a plan suite | +| `provar_properties_generate` | Generate a `provardx-properties.json` from the standard template | +| `provar_properties_read` | Read and parse a `provardx-properties.json` | +| `provar_properties_set` | Update fields in a `provardx-properties.json` | +| `provar_properties_validate` | Validate a `provardx-properties.json` against the schema | +| `provar_ant_generate` | Generate an ANT `build.xml` for CI/CD pipeline execution | +| `provar_ant_validate` | Validate an ANT `build.xml` | +| `provar_automation_setup` | Detect or download/install Provar Automation binaries | +| `provar_automation_config_load` | Register a properties file as the active config | +| `provar_automation_compile` | Compile Page Objects after changes | +| `provar_automation_metadata_download` | Download Salesforce metadata into the project | +| `provar_automation_testrun` | Trigger a local Provar Automation test run | +| `provar_qualityhub_connect` | Connect to a Quality Hub org | +| `provar_qualityhub_display` | Display connected Quality Hub org info | +| `provar_qualityhub_testrun` | Trigger a Quality Hub managed test run | +| `provar_qualityhub_testrun_report` | Poll test run status | +| `provar_qualityhub_testrun_abort` | Abort an in-progress test run | +| `provar_qualityhub_testcase_retrieve` | Retrieve test cases by user story or component | +| `provar_qualityhub_defect_create` | Create Quality Hub defects from failed executions | +| `provar_testrun_report_locate` | Resolve JUnit/HTML report paths after a run | +| `provar_testrun_rca` | Classify failures and detect regressions | --- diff --git a/docs/university-of-provar-mcp-course.md b/docs/university-of-provar-mcp-course.md index 14af15dd..25f8ecf0 100644 --- a/docs/university-of-provar-mcp-course.md +++ b/docs/university-of-provar-mcp-course.md @@ -126,7 +126,7 @@ You should see a list of flags and tool descriptions. If you see an error, confi 3. Fully quit and reopen Claude Desktop. -4. In a new conversation, look for Provar tools in the tool list. You should see entries like `provar.project.inspect`, `provar.testcase.validate`, etc. +4. In a new conversation, look for Provar tools in the tool list. You should see entries like `provar_project_inspect`, `provar_testcase_validate`, etc. > **macOS note:** If `sf` is not found, use the full path. Find it with `which sf` in your terminal, then use that path as the `"command"` value. @@ -155,7 +155,7 @@ Alternatively, run this inside a Claude Code session: In your AI client, type: -> "Call provardx.ping with message 'hello'" +> "Call provardx_ping with message 'hello'" Expected response: @@ -191,7 +191,7 @@ You should see `API key configured` with a source of `~/.provar/credentials.json 1. After editing `claude_desktop_config.json`, what do you need to do for the changes to take effect? _(Fully quit and reopen Claude Desktop — closing the window is not enough)_ -2. What does `provardx.ping` tell you when it responds successfully? +2. What does `provardx_ping` tell you when it responds successfully? _(That the MCP server is running, the client is connected, and the server version)_ 3. You get a `LICENSE_NOT_FOUND` error when the server starts. What is the most likely cause and how do you fix it? _(Provar Automation IDE license is not activated on this machine — open Provar Automation IDE, go to Help → Manage License, activate the license, then retry)_ @@ -204,13 +204,13 @@ You should see `API key configured` with a source of `~/.provar/credentials.json **Learning objectives** -- Use `provar.project.inspect` to get a full inventory of a Provar project +- Use `provar_project_inspect` to get a full inventory of a Provar project - Identify test coverage gaps from inspection output - Understand what project context the AI uses when reasoning about your tests ### 3.1 — What inspection tells you -`provar.project.inspect` reads your entire project directory and returns: +`provar_project_inspect` reads your entire project directory and returns: | What | Why it matters | | -------------------------------------- | --------------------------------------------------------------- | @@ -225,7 +225,7 @@ You should see `API key configured` with a source of `~/.provar/credentials.json Point the AI at your project and ask for a summary: -> "Use provar.project.inspect on `/path/to/your/provar/project` and give me a summary: how many test cases, suites, and plans are there? Which test cases aren't in any plan?" +> "Use provar_project_inspect on `/path/to/your/provar/project` and give me a summary: how many test cases, suites, and plans are there? Which test cases aren't in any plan?" **What to observe:** @@ -273,7 +273,7 @@ A file can be valid (no schema errors) but have a low quality score (missing des ### 4.2 — Two validation modes -The `provar.testcase.validate` tool operates in one of two modes depending on whether a Quality Hub API key is configured: +The `provar_testcase_validate` tool operates in one of two modes depending on whether a Quality Hub API key is configured: | Mode | `validation_source` | Rules | Requires | | ------------------- | ------------------- | -------------------------------- | ----------------------------- | @@ -342,7 +342,7 @@ Run a project-wide quality scan: ### 5.1 — Page Object generation -Provar Page Objects are Java classes annotated with `@Page` or `@SalesforcePage`. The `provar.pageobject.generate` tool creates a skeleton with correct structure, package declaration, and `@FindBy` field stubs — ready for you to refine and complete. +Provar Page Objects are Java classes annotated with `@Page` or `@SalesforcePage`. The `provar_pageobject_generate` tool creates a skeleton with correct structure, package declaration, and `@FindBy` field stubs — ready for you to refine and complete. ### Lab 5.1 — Generate a Salesforce Page Object @@ -384,7 +384,7 @@ If the quality score is below 80, ask: _(Both return the content without writing to disk, but `dry_run: true` makes the intent explicit and works even if an `output_path` is provided — the path is ignored. Omitting `output_path` simply means there is nowhere to write)_ 2. Why should you validate a generated test case immediately after generation? _(Generated artefacts may be missing best-practice fields — like a test case description or step metadata — that drop the quality score below the 80-point threshold required for plan coverage to count in Quality Hub)_ -3. What annotation does `provar.pageobject.generate` use for Salesforce pages vs non-Salesforce pages? +3. What annotation does `provar_pageobject_generate` use for Salesforce pages vs non-Salesforce pages? _(`@SalesforcePage` for Salesforce pages; `@Page` for standard web pages)_ --- @@ -414,11 +414,11 @@ This is the configuration file that tells the Provar DX CLI how and where to run > "Update the `environment.testEnvironment` field in `/path/to/project/provardx-properties.json` to `QA`." -The AI uses `provar.properties.set` to make a targeted update without touching the rest of the file. +The AI uses `provar_properties_set` to make a targeted update without touching the rest of the file. ### Knowledge check -1. What does `provar.automation.config.load` do, and why is it required before triggering a test run? +1. What does `provar_automation_config_load` do, and why is it required before triggering a test run? _(It validates and registers a `provardx-properties.json` as the active configuration in the current session. The compile and testrun tools depend on this loaded state — without it they don't know which project, Provar home, or test cases to use)_ 2. What happens if `provardx-properties.json` still contains `${PLACEHOLDER}` values when you try to run tests? _(The config load step will surface validation warnings for each unresolved placeholder. The run may still attempt to start but will likely fail when Provar Automation encounters the literal placeholder string instead of a real value)_ @@ -439,10 +439,10 @@ The AI uses `provar.properties.set` to make a targeted update without touching t **The AI chains these tools:** -1. `provar.automation.config.load` — registers the properties file -2. `provar.automation.compile` — compiles Java Page Objects -3. `provar.automation.testrun` — executes the run -4. `provar.testrun.report.locate` — finds the report artefacts +1. `provar_automation_config_load` — registers the properties file +2. `provar_automation_compile` — compiles Java Page Objects +3. `provar_automation_testrun` — executes the run +4. `provar_testrun_report_locate` — finds the report artefacts **What to observe:** @@ -473,15 +473,15 @@ Then in your AI client: > "Find the JUnit XML results for the run that just completed and summarise any failures." -The AI uses `provar.testrun.report.locate` to resolve the artefact paths, then reads the JUnit XML to extract failure details. +The AI uses `provar_testrun_report_locate` to resolve the artefact paths, then reads the JUnit XML to extract failure details. ### Knowledge check -1. What does `provar.automation.compile` do, and when is it necessary? +1. What does `provar_automation_compile` do, and when is it necessary? _(It compiles Java Page Object and Page Control source files into class files. It is necessary after any Page Object is created or modified — Provar Automation executes the compiled `.class` files, not the `.java` source)_ 2. Why does a Quality Hub test run use a polling loop rather than waiting synchronously? - _(Quality Hub runs are executed on a remote grid and can take minutes to hours. The MCP tools invoke `sf` CLI subprocesses synchronously, so a long-running run would block the entire AI conversation. Polling with `provar.qualityhub.testrun.report` lets the AI check in periodically and report status without blocking)_ -3. Where does `provar.testrun.report.locate` look for report artefacts? + _(Quality Hub runs are executed on a remote grid and can take minutes to hours. The MCP tools invoke `sf` CLI subprocesses synchronously, so a long-running run would block the entire AI conversation. Polling with `provar_qualityhub_testrun_report` lets the AI check in periodically and report status without blocking)_ +3. Where does `provar_testrun_report_locate` look for report artefacts? _(It searches the project's `Results/` directory for the most recent JUnit XML and HTML report files written by the last Provar Automation test run)_ --- @@ -490,7 +490,7 @@ The AI uses `provar.testrun.report.locate` to resolve the artefact paths, then r **Learning objectives** -- Use `provar.testrun.rca` to classify test failures +- Use `provar_testrun_rca` to classify test failures - Distinguish pre-existing issues from new regressions - Create a Quality Hub defect from a failed test execution @@ -510,14 +510,14 @@ After a test run with failures: > "The test 'LoginTest' failed with an assertion error on the Account Name field. Create a defect in Quality Hub for it, tagged to the 'Regression' test project." -The AI uses `provar.qualityhub.defect.create` to raise the defect without you leaving the chat session. +The AI uses `provar_qualityhub_defect_create` to raise the defect without you leaving the chat session. ### Knowledge check -1. What information does `provar.testrun.rca` use to classify failures as pre-existing vs new? +1. What information does `provar_testrun_rca` use to classify failures as pre-existing vs new? _(It reads the JUnit XML results from the completed run, analyses failure messages and stack traces, and cross-references them against the test case history and Page Objects involved to identify patterns that suggest a pre-existing flake vs a newly introduced failure)_ 2. What is required in Quality Hub before you can create a defect from an MCP tool call? - _(The Quality Hub org must be connected via `provar.qualityhub.connect` (or `sf provar quality-hub connect`) in the current session, and the test project you're filing against must already exist in Quality Hub)_ + _(The Quality Hub org must be connected via `provar_qualityhub_connect` (or `sf provar quality-hub connect`) in the current session, and the test project you're filing against must already exist in Quality Hub)_ --- @@ -535,7 +535,7 @@ After generating a new test case in Module 5: > "Add the test case at `/path/to/project/tests/smoke/CreateNewContact.testcase` to the test plan 'Smoke Tests', under the suite 'Contact Management'. Create the instance file at `/path/to/project/plans/SmokeTests/ContactManagement/CreateNewContact.testinstance`." -The AI uses `provar.testplan.add-instance` to write the `.testinstance` file with the correct `testCasePath` attribute. +The AI uses `provar_testplan_add-instance` to write the `.testinstance` file with the correct `testCasePath` attribute. ### Lab 9.2 — Create a new suite in a plan @@ -547,9 +547,9 @@ The AI uses `provar.testplan.add-instance` to write the `.testinstance` file wit ### Knowledge check -1. What file type does `provar.testplan.add-instance` create, and what key attribute does it contain? +1. What file type does `provar_testplan_add-instance` create, and what key attribute does it contain? _(A `.testinstance` file. The key attribute is `testCasePath`, which holds the relative path to the `.testcase` file being wired into the plan)_ -2. After adding test instances to a plan, how does that affect the `coverage_percent` reported by `provar.project.inspect`? +2. After adding test instances to a plan, how does that affect the `coverage_percent` reported by `provar_project_inspect`? _(The newly wired test cases move from `uncovered_test_case_paths` to `covered_test_case_paths`, increasing the `coverage_percent` value)_ --- @@ -588,13 +588,13 @@ You have now covered the full Provar MCP feature set: | Area | Key tools | | ------------------ | ----------------------------------------------------------------------------------------------------------------- | -| Project awareness | `provar.project.inspect`, `provar.project.validate` | -| Quality validation | `provar.testcase.validate`, `provar.pageobject.validate`, `provar.testsuite.validate`, `provar.testplan.validate` | -| Test authoring | `provar.pageobject.generate`, `provar.testcase.generate` | -| Run configuration | `provar.properties.generate`, `provar.properties.set`, `provar.automation.config.load` | -| Test execution | `provar.automation.testrun`, `provar.qualityhub.testrun`, `provar.testrun.report.locate` | -| Failure analysis | `provar.testrun.rca`, `provar.qualityhub.defect.create` | -| Plan management | `provar.testplan.add-instance`, `provar.testplan.create-suite`, `provar.testplan.remove-instance` | +| Project awareness | `provar_project_inspect`, `provar_project_validate` | +| Quality validation | `provar_testcase_validate`, `provar_pageobject_validate`, `provar_testsuite_validate`, `provar_testplan_validate` | +| Test authoring | `provar_pageobject_generate`, `provar_testcase_generate` | +| Run configuration | `provar_properties_generate`, `provar_properties_set`, `provar_automation_config_load` | +| Test execution | `provar_automation_testrun`, `provar_qualityhub_testrun`, `provar_testrun_report_locate` | +| Failure analysis | `provar_testrun_rca`, `provar_qualityhub_defect_create` | +| Plan management | `provar_testplan_add-instance`, `provar_testplan_create-suite`, `provar_testplan_remove-instance` | ## Frequently asked questions @@ -602,7 +602,7 @@ You have now covered the full Provar MCP feature set: No. Your existing Provar Automation license covers MCP usage. The MCP server reads your IDE license automatically. **Can I use Provar MCP without an existing Provar project?** -The AI can generate new artefacts (Page Objects, test cases, properties files) from scratch, but project-level tools like `provar.project.inspect` and `provar.project.validate` require a project directory with at least a `.testproject` file. We recommend starting from an existing project. +The AI can generate new artefacts (Page Objects, test cases, properties files) from scratch, but project-level tools like `provar_project_inspect` and `provar_project_validate` require a project directory with at least a `.testproject` file. We recommend starting from an existing project. **Will the AI send my project files to Provar?** No. The MCP server runs entirely on your local machine. File contents pass between the server and your AI client only (e.g. Claude Desktop, which runs locally). No data is sent to Provar's servers. diff --git a/messages/sf.provar.auth.clear.md b/messages/sf.provar.auth.clear.md index b15a5a5e..2bac7a12 100644 --- a/messages/sf.provar.auth.clear.md +++ b/messages/sf.provar.auth.clear.md @@ -5,7 +5,7 @@ Remove the stored Provar API key. # description Deletes the API key stored at ~/.provar/credentials.json. After clearing, the -provar.testcase.validate MCP tool falls back to local validation (structural rules only, +provar_testcase_validate MCP tool falls back to local validation (structural rules only, no Quality Hub quality scoring). The PROVAR_API_KEY environment variable is not affected by this command. diff --git a/messages/sf.provar.mcp.start.md b/messages/sf.provar.mcp.start.md index eb8f0b32..cddb7c6e 100644 --- a/messages/sf.provar.mcp.start.md +++ b/messages/sf.provar.mcp.start.md @@ -1,71 +1,88 @@ # summary + Start a local MCP server for Provar tools over stdio transport. # description + Launches a stateless MCP (Model Context Protocol) server that exposes Provar tools to AI assistants (Claude Desktop, Claude Code, Cursor) via stdio transport. All MCP JSON-RPC communication happens over stdout; all internal logging goes to stderr. Available tools: - Project & inspection: - - provar.project.inspect — inspect project folder inventory - - provar.project.validate — validate full project from disk: coverage, quality scores - - Page Object: - - provar.pageobject.generate — generate a Java Page Object skeleton - - provar.pageobject.validate — validate Page Object quality and naming - - Test Case: - - provar.testcase.generate — generate an XML test case skeleton - - provar.testcase.validate — validate test case XML (validity + best-practices scores) - - Test Suite / Plan: - - provar.testsuite.validate — validate test suite hierarchy - - provar.testplan.validate — validate test plan metadata completeness - - provar.testplan.create-suite — create a test suite under a plan - - provar.testplan.add-instance — add a test instance to a plan - - provar.testplan.remove-instance — remove a test instance from a plan - - Properties files: - - provar.properties.read — read a Provar properties file - - provar.properties.set — set a key in a Provar properties file - - provar.properties.validate — validate a properties file structure - - provar.properties.generate — generate a properties file skeleton - - Quality Hub (sf provar quality-hub wrappers): - - provar.qualityhub.connect — connect to a Quality Hub org - - provar.qualityhub.display — display connected org info - - provar.qualityhub.testrun — trigger a Quality Hub test run - - provar.qualityhub.testrun.report — poll test run status - - provar.qualityhub.testrun.abort — abort a running test run - - provar.qualityhub.testcase.retrieve — retrieve test case results - - provar.qualityhub.defect.create — create defects for failed test executions - - Automation (sf provar automation wrappers): - - provar.automation.setup — set up the Provar Automation runtime - - provar.automation.metadata.download — download Salesforce metadata - - provar.automation.compile — compile Provar test assets - - provar.automation.testrun — run Provar tests - - provar.automation.config.load — load a Provar configuration - - ANT build: - - provar.ant.generate — generate an ANT build.xml - - provar.ant.validate — validate an ANT build.xml - - Test result analysis: - - provar.testrun.rca — root cause analysis on a test result - - provar.testrun.report.locate — locate a test result report +Project & inspection: + +- provar_project_inspect — inspect project folder inventory +- provar_project_validate — validate full project from disk: coverage, quality scores +- provar_connection_list — list connections and named environments from the project + +Page Object: + +- provar_pageobject_generate — generate a Java Page Object skeleton +- provar_pageobject_validate — validate Page Object quality and naming + +Test Case: + +- provar_testcase_generate — generate an XML test case skeleton +- provar_testcase_validate — validate test case XML (validity + best-practices scores) +- provar_testcase_step_edit — atomically add or remove a single step in a test case + +Test Suite / Plan: + +- provar_testsuite_validate — validate test suite hierarchy +- provar_testplan_validate — validate test plan metadata completeness +- provar_testplan_create-suite — create a test suite under a plan +- provar_testplan_add-instance — add a test instance to a plan +- provar_testplan_remove-instance — remove a test instance from a plan + +Properties files: + +- provar_properties_read — read a Provar properties file +- provar_properties_set — set a key in a Provar properties file +- provar_properties_validate — validate a properties file structure +- provar_properties_generate — generate a properties file skeleton + +Quality Hub (sf provar quality-hub wrappers): + +- provar_qualityhub_connect — connect to a Quality Hub org +- provar_qualityhub_display — display connected org info +- provar_qualityhub_testrun — trigger a Quality Hub test run +- provar_qualityhub_testrun_report — poll test run status +- provar_qualityhub_testrun_abort — abort a running test run +- provar_qualityhub_testcase_retrieve — retrieve test case results +- provar_qualityhub_defect_create — create defects for failed test executions +- provar_qualityhub_examples_retrieve — retrieve corpus examples for test generation grounding + +Automation (sf provar automation wrappers): + +- provar_automation_setup — set up the Provar Automation runtime +- provar_automation_metadata_download — download Salesforce metadata +- provar_automation_compile — compile Provar test assets +- provar_automation_testrun — run Provar tests +- provar_automation_config_load — load a Provar configuration + +ANT build: + +- provar_ant_generate — generate an ANT build.xml +- provar_ant_validate — validate an ANT build.xml + +Test result analysis: + +- provar_testrun_rca — root cause analysis on a test result +- provar_testrun_report_locate — locate a test result report For full tool documentation see docs/mcp.md in this repository. # flags.allowed-paths.summary + Allowed base directory paths for file operations. Defaults to current directory. # flags.auto-defects.summary + When enabled, testrun.report suggestions will prompt defect creation on failures. # examples + - Start MCP server (accepts stdio connections from Claude Desktop / Cursor): <%= config.bin %> <%= command.id %> - Start with explicit allowed paths: diff --git a/package.json b/package.json index c9ccd879..72041167 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@provartesting/provardx-cli", "description": "A plugin for the Salesforce CLI to orchestrate testing activities and report quality metrics to Provar Quality Hub", - "version": "1.5.0-beta.13", + "version": "1.5.0-beta.15", "mcpName": "io.github.ProvarTesting/provar", "license": "BSD-3-Clause", "plugins": [ diff --git a/scripts/mcp-smoke.cjs b/scripts/mcp-smoke.cjs index 39bdf46f..0bbbd392 100644 --- a/scripts/mcp-smoke.cjs +++ b/scripts/mcp-smoke.cjs @@ -9,7 +9,7 @@ // Env flags: // SMOKE_REQUEST_TIMEOUT_MS Per-request timeout in ms (default: 30000) // SMOKE_OVERALL_TIMEOUT_MS Hard deadline for the whole run in ms (default: 120000) -// SMOKE_INCLUDE_SETUP Set to "1" to include provar.automation.setup (may download +// SMOKE_INCLUDE_SETUP Set to "1" to include provar_automation_setup (may download // binaries if no Provar install is found — disabled by default) const { spawn } = require('child_process'); @@ -115,174 +115,174 @@ async function runTests() { // ── 1. tools/list ───────────────────────────────────────────────────────── await send('tools/list', {}); - // ── 2. provardx.ping ────────────────────────────────────────────────────── - await callTool('provardx.ping', { message: 'smoke-test' }); + // ── 2. provardx_ping ────────────────────────────────────────────────────── + await callTool('provardx_ping', { message: 'smoke-test' }); - // ── 3. provar.project.inspect ───────────────────────────────────────────── + // ── 3. provar_project_inspect ───────────────────────────────────────────── // TMP has no .testproject → structured "not a Provar project" response - await callTool('provar.project.inspect', { project_path: TMP }); + await callTool('provar_project_inspect', { project_path: TMP }); - // ── 4. provar.pageobject.generate (dry_run) ─────────────────────────────── - await callTool('provar.pageobject.generate', { + // ── 4. provar_pageobject_generate (dry_run) ─────────────────────────────── + await callTool('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects.accounts', page_type: 'standard', dry_run: true, }); - // ── 5. provar.pageobject.validate ───────────────────────────────────────── - await callTool('provar.pageobject.validate', { + // ── 5. provar_pageobject_validate ───────────────────────────────────────── + await callTool('provar_pageobject_validate', { content: 'public class AccountDetailPage {}', }); - // ── 6. provar.testcase.generate (dry_run) ───────────────────────────────── - await callTool('provar.testcase.generate', { + // ── 6. provar_testcase_generate (dry_run) ───────────────────────────────── + await callTool('provar_testcase_generate', { test_case_name: 'Smoke Test Case', dry_run: true, }); - // ── 7. provar.testcase.validate ─────────────────────────────────────────── - await callTool('provar.testcase.validate', { content: '' }); + // ── 7. provar_testcase_validate ─────────────────────────────────────────── + await callTool('provar_testcase_validate', { content: '' }); - // ── 8. provar.testsuite.validate ────────────────────────────────────────── - await callTool('provar.testsuite.validate', { suite_name: 'SmokeTestSuite' }); + // ── 8. provar_testsuite_validate ────────────────────────────────────────── + await callTool('provar_testsuite_validate', { suite_name: 'SmokeTestSuite' }); - // ── 9. provar.testplan.validate ─────────────────────────────────────────── - await callTool('provar.testplan.validate', { plan_name: 'SmokeTestPlan' }); + // ── 9. provar_testplan_validate ─────────────────────────────────────────── + await callTool('provar_testplan_validate', { plan_name: 'SmokeTestPlan' }); - // ── 10. provar.project.validate ─────────────────────────────────────────── + // ── 10. provar_project_validate ─────────────────────────────────────────── // TMP is not a Provar project → PATH_NOT_FOUND or NOT_A_PROJECT result - await callTool('provar.project.validate', { project_path: TMP }); + await callTool('provar_project_validate', { project_path: TMP }); - // ── 11. provar.properties.generate (dry_run) ────────────────────────────── - await callTool('provar.properties.generate', { + // ── 11. provar_properties_generate (dry_run) ────────────────────────────── + await callTool('provar_properties_generate', { output_path: path.join(TMP, 'smoke-props.json'), dry_run: true, }); - // ── 12. provar.properties.read ──────────────────────────────────────────── + // ── 12. provar_properties_read ──────────────────────────────────────────── // Non-existent file → FILE_NOT_FOUND result - await callTool('provar.properties.read', { + await callTool('provar_properties_read', { file_path: path.join(TMP, 'nonexistent-props.json'), }); - // ── 13. provar.properties.set ───────────────────────────────────────────── + // ── 13. provar_properties_set ───────────────────────────────────────────── // Non-existent file → FILE_NOT_FOUND result - await callTool('provar.properties.set', { + await callTool('provar_properties_set', { file_path: path.join(TMP, 'nonexistent-props.json'), updates: { stopOnError: true }, }); - // ── 14. provar.properties.validate ─────────────────────────────────────── + // ── 14. provar_properties_validate ─────────────────────────────────────── // Empty JSON → validation issues about missing required fields - await callTool('provar.properties.validate', { content: '{}' }); + await callTool('provar_properties_validate', { content: '{}' }); - // ── 15. provar.ant.generate (dry_run) ───────────────────────────────────── - await callTool('provar.ant.generate', { + // ── 15. provar_ant_generate (dry_run) ───────────────────────────────────── + await callTool('provar_ant_generate', { provar_home: path.join(TMP, 'provar'), filesets: [{ dir: '../tests' }], dry_run: true, }); - // ── 16. provar.ant.validate ─────────────────────────────────────────────── + // ── 16. provar_ant_validate ─────────────────────────────────────────────── // Minimal XML — will have validation issues but not crash - await callTool('provar.ant.validate', { content: '' }); + await callTool('provar_ant_validate', { content: '' }); - // ── 17. provar.qualityhub.connect ───────────────────────────────────────── + // ── 17. provar_qualityhub_connect ───────────────────────────────────────── // No real org → SF_NOT_FOUND or auth error result - await callTool('provar.qualityhub.connect', { target_org: 'smoke-test-org' }); + await callTool('provar_qualityhub_connect', { target_org: 'smoke-test-org' }); - // ── 18. provar.qualityhub.display ───────────────────────────────────────── - await callTool('provar.qualityhub.display', {}); + // ── 18. provar_qualityhub_display ───────────────────────────────────────── + await callTool('provar_qualityhub_display', {}); - // ── 19. provar.qualityhub.testrun ───────────────────────────────────────── - await callTool('provar.qualityhub.testrun', { target_org: 'smoke-test-org' }); + // ── 19. provar_qualityhub_testrun ───────────────────────────────────────── + await callTool('provar_qualityhub_testrun', { target_org: 'smoke-test-org' }); - // ── 20. provar.qualityhub.testrun.report ────────────────────────────────── - await callTool('provar.qualityhub.testrun.report', { + // ── 20. provar_qualityhub_testrun_report ────────────────────────────────── + await callTool('provar_qualityhub_testrun_report', { target_org: 'smoke-test-org', run_id: 'fake-run-id-000', }); - // ── 21. provar.qualityhub.testrun.abort ─────────────────────────────────── - await callTool('provar.qualityhub.testrun.abort', { + // ── 21. provar_qualityhub_testrun_abort ─────────────────────────────────── + await callTool('provar_qualityhub_testrun_abort', { target_org: 'smoke-test-org', run_id: 'fake-run-id-000', }); - // ── 22. provar.qualityhub.testcase.retrieve ─────────────────────────────── - await callTool('provar.qualityhub.testcase.retrieve', { target_org: 'smoke-test-org' }); + // ── 22. provar_qualityhub_testcase_retrieve ─────────────────────────────── + await callTool('provar_qualityhub_testcase_retrieve', { target_org: 'smoke-test-org' }); - // ── 23. provar.qualityhub.defect.create ─────────────────────────────────── - await callTool('provar.qualityhub.defect.create', { + // ── 23. provar_qualityhub_defect_create ─────────────────────────────────── + await callTool('provar_qualityhub_defect_create', { run_id: 'fake-run-id-000', target_org: 'smoke-test-org', }); - // ── 24. provar.automation.setup ─────────────────────────────────────────── + // ── 24. provar_automation_setup ─────────────────────────────────────────── // Skipped by default: when no Provar installation is found on the CI runner, // this tool downloads the full Provar binary (~200 MB), which is a destructive // side effect in a smoke test. Enable with SMOKE_INCLUDE_SETUP=1. if (INCLUDE_SETUP) { - await callTool('provar.automation.setup', {}); + await callTool('provar_automation_setup', {}); } - // ── 25. provar.automation.metadata.download ─────────────────────────────── - await callTool('provar.automation.metadata.download', {}); + // ── 25. provar_automation_metadata_download ─────────────────────────────── + await callTool('provar_automation_metadata_download', {}); - // ── 26. provar.automation.compile ───────────────────────────────────────── - await callTool('provar.automation.compile', {}); + // ── 26. provar_automation_compile ───────────────────────────────────────── + await callTool('provar_automation_compile', {}); - // ── 27. provar.automation.testrun ───────────────────────────────────────── - await callTool('provar.automation.testrun', {}); + // ── 27. provar_automation_testrun ───────────────────────────────────────── + await callTool('provar_automation_testrun', {}); - // ── 28. provar.automation.config.load ───────────────────────────────────── - await callTool('provar.automation.config.load', { + // ── 28. provar_automation_config_load ───────────────────────────────────── + await callTool('provar_automation_config_load', { properties_path: path.join(TMP, 'nonexistent-props.json'), }); - // ── 29. provar.testrun.report.locate ───────────────────────────────────── + // ── 29. provar_testrun_report_locate ───────────────────────────────────── // TMP is not a Provar project → RESULTS_NOT_CONFIGURED result - await callTool('provar.testrun.report.locate', { project_path: TMP }); + await callTool('provar_testrun_report_locate', { project_path: TMP }); - // ── 30. provar.testrun.rca ─────────────────────────────────────────────── - await callTool('provar.testrun.rca', { project_path: TMP }); + // ── 30. provar_testrun_rca ─────────────────────────────────────────────── + await callTool('provar_testrun_rca', { project_path: TMP }); - // ── 31. provar.testplan.create ──────────────────────────────────────────── + // ── 31. provar_testplan_create ──────────────────────────────────────────── // TMP is not a Provar project → NOT_A_PROJECT result - await callTool('provar.testplan.create', { + await callTool('provar_testplan_create', { project_path: TMP, plan_name: 'SmokePlan', }); - // ── 32. provar.testplan.add-instance ───────────────────────────────────── + // ── 32. provar_testplan_add-instance ───────────────────────────────────── // TMP is not a Provar project → NOT_A_PROJECT result - await callTool('provar.testplan.add-instance', { + await callTool('provar_testplan_add-instance', { project_path: TMP, test_case_path: 'tests/Smoke/SmokeTest.testcase', plan_name: 'SmokePlan', }); - // ── 33. provar.testplan.create-suite ───────────────────────────────────── - await callTool('provar.testplan.create-suite', { + // ── 33. provar_testplan_create-suite ───────────────────────────────────── + await callTool('provar_testplan_create-suite', { project_path: TMP, plan_name: 'SmokePlan', suite_name: 'SmokeSuite', }); - // ── 34. provar.testplan.remove-instance ────────────────────────────────── - await callTool('provar.testplan.remove-instance', { + // ── 34. provar_testplan_remove-instance ────────────────────────────────── + await callTool('provar_testplan_remove-instance', { project_path: TMP, instance_path: 'plans/SmokePlan/SmokeSuite/smoke.testinstance', }); - // ── 35. provar.nitrox.discover ──────────────────────────────────────────── + // ── 35. provar_nitrox_discover ──────────────────────────────────────────── // TMP has no .testproject → empty projects list, no crash - await callTool('provar.nitrox.discover', { search_roots: [TMP] }); + await callTool('provar_nitrox_discover', { search_roots: [TMP] }); - // ── 36. provar.nitrox.validate ──────────────────────────────────────────── + // ── 36. provar_nitrox_validate ──────────────────────────────────────────── // Minimal valid root component → score 100 - await callTool('provar.nitrox.validate', { + await callTool('provar_nitrox_validate', { content: JSON.stringify({ componentId: '550e8400-e29b-41d4-a716-446655440000', name: '/com/smoke/SmokeComponent', @@ -292,29 +292,29 @@ async function runTests() { }), }); - // ── 36. provar.nitrox.generate (dry_run) ───────────────────────────────── - await callTool('provar.nitrox.generate', { + // ── 36. provar_nitrox_generate (dry_run) ───────────────────────────────── + await callTool('provar_nitrox_generate', { name: '/com/smoke/SmokeComponent', tag_name: 'c-smoke', dry_run: true, }); - // ── 37. provar.nitrox.read ──────────────────────────────────────────────── + // ── 37. provar_nitrox_read ──────────────────────────────────────────────── // Non-existent file → FILE_NOT_FOUND result (not a protocol error) - await callTool('provar.nitrox.read', { + await callTool('provar_nitrox_read', { file_paths: [path.join(TMP, 'nonexistent.po.json')], }); - // ── 38. provar.nitrox.patch ─────────────────────────────────────────────── + // ── 38. provar_nitrox_patch ─────────────────────────────────────────────── // Non-existent file → FILE_NOT_FOUND result (not a protocol error) - await callTool('provar.nitrox.patch', { + await callTool('provar_nitrox_patch', { file_path: path.join(TMP, 'nonexistent.po.json'), patch: { name: '/com/smoke/Patched' }, }); - // ── 39. provar.qualityhub.examples.retrieve ─────────────────────────────── + // ── 39. provar_qualityhub_examples_retrieve ─────────────────────────────── // No API key in CI → graceful degrade with warning, empty examples (isError: false) - await callTool('provar.qualityhub.examples.retrieve', { + await callTool('provar_qualityhub_examples_retrieve', { query: 'As a sales rep I want to create an Opportunity in Salesforce', n: 3, }); @@ -363,13 +363,13 @@ async function runTests() { arguments: { story: 'Verify Users table has at least one Active record after Salesforce flow runs' }, }); - // ── 49. provar.connection.list ──────────────────────────────────────────── + // ── 49. provar_connection_list ──────────────────────────────────────────── // TMP has no .testproject → CONNECTION_FILE_NOT_FOUND result (not a protocol error) - await callTool('provar.connection.list', { project_path: TMP }); + await callTool('provar_connection_list', { project_path: TMP }); - // ── 50. provar.testcase.step.edit ───────────────────────────────────────── + // ── 50. provar_testcase_step_edit ───────────────────────────────────────── // TMP/nonexistent.testcase does not exist → FILE_NOT_FOUND result - await callTool('provar.testcase.step.edit', { + await callTool('provar_testcase_step_edit', { test_case_path: path.join(TMP, 'nonexistent.testcase'), mode: 'remove', test_item_id: '1', diff --git a/server.json b/server.json index 96d72334..3b203868 100644 --- a/server.json +++ b/server.json @@ -14,12 +14,12 @@ "url": "https://github.com/ProvarTesting/provardx-cli", "source": "github" }, - "version": "1.5.0-beta.13", + "version": "1.5.0-beta.15", "packages": [ { "registryType": "npm", "identifier": "@provartesting/provardx-cli", - "version": "1.5.0-beta.13", + "version": "1.5.0-beta.15", "transport": { "type": "stdio" }, diff --git a/src/mcp/prompts/loopPrompts.ts b/src/mcp/prompts/loopPrompts.ts index 7d74e99b..2153a6c7 100644 --- a/src/mcp/prompts/loopPrompts.ts +++ b/src/mcp/prompts/loopPrompts.ts @@ -21,7 +21,7 @@ function projectHint(projectPath: string | undefined): string { export function registerLoopGeneratePrompt(server: McpServer): void { server.prompt( 'provar.loop.generate', - 'Generate a Provar XML test case from a user story or acceptance criteria. Retrieves corpus examples for grounding, generates the test, writes it to the project, then validates it with provar.testcase.validate.', + 'Generate a Provar XML test case from a user story or acceptance criteria. Retrieves corpus examples for grounding, generates the test, writes it to the project, then validates it with provar_testcase_validate.', { story: z .string() @@ -62,7 +62,7 @@ Follow these steps in order: 1. **Extract keywords** — identify the Salesforce object, the action (create/update/close/delete/view), and key scenario details from the story. Use these as the query for step 2. -2. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with the keywords you extracted +2. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with the keywords you extracted (e.g. "close opportunity" or "create lead"). Use the returned XML examples as the sole reference for Provar step structure and argument patterns. Do not invent XML structure from prior knowledge. If the response has \`"count": 0\` with a \`"warning"\` field (API unavailable or not configured), @@ -85,7 +85,7 @@ Follow these steps in order: ${projectHint(projectPath)} ${testName ? `Target file name: ${testName}.testcase` : 'Infer the file name from the story (snake_case).'} -6. **Validate** — call \`provar.testcase.validate\` on the saved file. If it reports errors, fix them +6. **Validate** — call \`provar_testcase_validate\` on the saved file. If it reports errors, fix them and re-validate until the file passes clean. 7. **Report** — summarise: @@ -99,7 +99,7 @@ ${story} ${objectName ? `Primary object: ${objectName}` : ''} -Begin with step 1: extract keywords, then call provar.qualityhub.examples.retrieve.`, +Begin with step 1: extract keywords, then call provar_qualityhub_examples_retrieve.`, }, }, ], @@ -112,7 +112,7 @@ Begin with step 1: extract keywords, then call provar.qualityhub.examples.retrie export function registerLoopFixPrompt(server: McpServer): void { server.prompt( 'provar.loop.fix', - 'Fix a failing Provar test case using the output from provar.testrun.rca. Reads the current XML, retrieves corpus examples for the failing step type, applies targeted fixes, then re-validates.', + 'Fix a failing Provar test case using the output from provar_testrun_rca. Reads the current XML, retrieves corpus examples for the failing step type, applies targeted fixes, then re-validates.', { testcasePath: z .string() @@ -122,7 +122,7 @@ export function registerLoopFixPrompt(server: McpServer): void { rcaOutput: z .string() .describe( - 'The RCA report text from provar.testrun.rca, or a raw failure message from a test run. Include the full error text — step name, error type, and message. The more detail, the better the fix.' + 'The RCA report text from provar_testrun_rca, or a raw failure message from a test run. Include the full error text — step name, error type, and message. The more detail, the better the fix.' ), projectPath: z .string() @@ -160,7 +160,7 @@ Follow these steps in order: - The failure category (e.g. element not found, assertion mismatch, connection error, XML structure error) - The specific error message -3. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with keywords describing the +3. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with keywords describing the failing step's scenario (e.g. "close opportunity UiDoAction" or "assert field value UiAssert"). Use the returned examples to verify the correct structure for the failing step type. If the response has \`"count": 0\` with a \`"warning"\` field, fall back: read the @@ -179,7 +179,7 @@ Follow these steps in order: 5. **Apply the fix** — rewrite only the failing step(s). Preserve all other steps unchanged. Write the updated XML back to the same file path. -6. **Validate** — call \`provar.testcase.validate\` on the updated file. If new errors appear, fix them +6. **Validate** — call \`provar_testcase_validate\` on the updated file. If new errors appear, fix them and re-validate until the file passes clean. 7. **Report** — summarise: @@ -226,13 +226,13 @@ ${projectPath ? `Provar project root: ${projectPath}` : ''} Follow these steps in order: -1. **Validate** — call \`provar.testcase.validate\` on the file. Note all errors and warnings. Do not stop +1. **Validate** — call \`provar_testcase_validate\` on the file. Note all errors and warnings. Do not stop here even if the file is valid — continue the review. 2. **Read the file** — read the XML to understand the test structure: what object is being tested, what actions are performed, and what is being asserted. -3. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with keywords describing the +3. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with keywords describing the test scenario (e.g. "create opportunity", "close opportunity"). Use the returned examples as a quality baseline. If the response has \`"count": 0\` with a \`"warning"\` field, fall back: read the \`provar://docs/step-reference\` MCP resource for step type schemas, then continue the review @@ -276,7 +276,7 @@ Follow these steps in order: ### Suggested improvements Any non-blocking suggestions (e.g. parameterising a hardcoded value that appears more than once). -Begin with step 1: call provar.testcase.validate on the file at: ${testcasePath}`, +Begin with step 1: call provar_testcase_validate on the file at: ${testcasePath}`, }, }, ], @@ -305,7 +305,7 @@ export function registerLoopCoveragePrompt(server: McpServer): void { .string() .optional() .describe( - 'SF org alias or username. If provided, also queries provar.qualityhub.testcase.retrieve to include Quality Hub tests in the coverage analysis.' + 'SF org alias or username. If provided, also queries provar_qualityhub_testcase_retrieve to include Quality Hub tests in the coverage analysis.' ), }, ({ objectName, projectPath, targetOrg }) => ({ @@ -335,15 +335,15 @@ Follow these steps in order: ${ targetOrg - ? `2. **Query Quality Hub** — call \`provar.qualityhub.testcase.retrieve\` with target_org="${targetOrg}" + ? `2. **Query Quality Hub** — call \`provar_qualityhub_testcase_retrieve\` with target_org="${targetOrg}" to retrieve test cases linked to the "${objectName}" object from Quality Hub. Add these to the coverage inventory. -3. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with "${objectName.toLowerCase()}" +3. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with "${objectName.toLowerCase()}" as the query to understand what test patterns exist in the corpus for this object. If the response has \`"count": 0\` with a \`"warning"\` field, fall back: read the \`provar://docs/step-reference\` MCP resource for step type schemas, then continue.` - : `2. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with "${objectName.toLowerCase()}" + : `2. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with "${objectName.toLowerCase()}" as the query to understand what test patterns exist in the corpus for this object. If the response has \`"count": 0\` with a \`"warning"\` field, fall back: read the \`provar://docs/step-reference\` MCP resource for step type schemas, then continue.` @@ -436,7 +436,7 @@ This is a **database test**, NOT a Salesforce UI or Apex test. Do not use UiConn Follow these steps in order: -1. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with a query that includes "database DbConnect SqlQuery" plus keywords from the story (e.g. "database SQL Server verify record count"). Use the returned XML examples as the reference for correct step structure. +1. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with a query that includes "database DbConnect SqlQuery" plus keywords from the story (e.g. "database SQL Server verify record count"). Use the returned XML examples as the reference for correct step structure. If the response has \`"count": 0\` with a \`"warning"\` field (API unavailable or not configured), fall back: read the \`provar://docs/step-reference\` MCP resource — specifically the Database Steps section — for the correct attribute schema, then continue. 2. **Generate the test case** — produce valid Provar XML. Apply these database-specific rules: @@ -480,7 +480,7 @@ Follow these steps in order: ${projectHint(projectPath)} ${testName ? `Target file name: ${testName}.testcase` : 'Infer the file name from the story (snake_case).'} -4. **Validate** — call \`provar.testcase.validate\` on the saved file. If it reports errors, fix them and re-validate until the file passes clean. +4. **Validate** — call \`provar_testcase_validate\` on the saved file. If it reports errors, fix them and re-validate until the file passes clean. 5. **Report** — summarise: - Which query/assertion was implemented @@ -492,7 +492,7 @@ Follow these steps in order: ${story} -Begin with step 1: call provar.qualityhub.examples.retrieve with "database DbConnect SqlQuery" plus keywords from the story above.`, +Begin with step 1: call provar_qualityhub_examples_retrieve with "database DbConnect SqlQuery" plus keywords from the story above.`, }, }, ], diff --git a/src/mcp/prompts/migrationPrompts.ts b/src/mcp/prompts/migrationPrompts.ts index db36abc8..37c7f53c 100644 --- a/src/mcp/prompts/migrationPrompts.ts +++ b/src/mcp/prompts/migrationPrompts.ts @@ -10,8 +10,8 @@ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; // Source-framework context injected into each migration prompt. // Lightweight: just enough to interpret the source. Format knowledge lives -// in the corpus (provar.qualityhub.examples.retrieve) and the validator -// (provar.testcase.validate) — not hardcoded here. +// in the corpus (provar_qualityhub_examples_retrieve) and the validator +// (provar_testcase_validate) — not hardcoded here. const CRT_CONTEXT = ` CRT (Copado Robotic Testing) is a keyword-driven framework built on Robot Framework. @@ -65,7 +65,7 @@ function migrationOrchestration(projectPath: string | undefined): string { Follow these steps in order: -1. **Get corpus examples** — call \`provar.qualityhub.examples.retrieve\` with keywords that +1. **Get corpus examples** — call \`provar_qualityhub_examples_retrieve\` with keywords that describe the source test's main scenario (e.g. "create opportunity", "close case", "convert lead"). Use the returned examples as few-shot grounding for the Provar XML format and step patterns. If the response has \`"count": 0\` with a \`"warning"\` field (API unavailable or not configured), @@ -80,7 +80,7 @@ Follow these steps in order: 3. **Write the file** — save the generated XML to the appropriate \`tests/\` subdirectory inside the Provar project. ${projectHint} -4. **Validate** — call \`provar.testcase.validate\` on the saved file. If it reports errors, +4. **Validate** — call \`provar_testcase_validate\` on the saved file. If it reports errors, fix them and re-validate until the file passes clean. 5. **Report** — summarise what was migrated, any steps that could not be mapped (add them @@ -93,7 +93,7 @@ Follow these steps in order: export function registerCrtMigrationPrompt(server: McpServer): void { server.prompt( 'provar.migrate.crt', - 'Convert a Copado Robotic Testing (CRT) test — either a QWord step sequence or a Robot Framework .robot file — into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar.testcase.validate.', + 'Convert a Copado Robotic Testing (CRT) test — either a QWord step sequence or a Robot Framework .robot file — into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar_testcase_validate.', { source: z .string() @@ -133,7 +133,7 @@ ${source} ${testName ? `Target test case name: ${testName}` : 'Infer the test case name from the source content.'} -Begin with step 1: call provar.qualityhub.examples.retrieve.`, +Begin with step 1: call provar_qualityhub_examples_retrieve.`, }, }, ], @@ -146,7 +146,7 @@ Begin with step 1: call provar.qualityhub.examples.retrieve.`, export function registerSeleniumMigrationPrompt(server: McpServer): void { server.prompt( 'provar.migrate.selenium', - 'Convert a Selenium WebDriver test (Java, Python, or JavaScript) that tests a Salesforce org into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar.testcase.validate.', + 'Convert a Selenium WebDriver test (Java, Python, or JavaScript) that tests a Salesforce org into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar_testcase_validate.', { source: z .string() @@ -188,7 +188,7 @@ ${source} ${testName ? `Target test case name: ${testName}` : 'Infer the test case name from the class and method name.'} -Begin with step 1: call provar.qualityhub.examples.retrieve.`, +Begin with step 1: call provar_qualityhub_examples_retrieve.`, }, }, ], @@ -201,7 +201,7 @@ Begin with step 1: call provar.qualityhub.examples.retrieve.`, export function registerPlaywrightMigrationPrompt(server: McpServer): void { server.prompt( 'provar.migrate.playwright', - 'Convert a Playwright test (TypeScript or JavaScript) that tests a Salesforce org into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar.testcase.validate.', + 'Convert a Playwright test (TypeScript or JavaScript) that tests a Salesforce org into a Provar XML test case. Retrieves corpus examples for grounding, generates the test case, then validates it with provar_testcase_validate.', { source: z .string() @@ -243,7 +243,7 @@ ${source} ${testName ? `Target test case name: ${testName}` : 'Infer the test case name from the test() block description.'} -Begin with step 1: call provar.qualityhub.examples.retrieve.`, +Begin with step 1: call provar_qualityhub_examples_retrieve.`, }, }, ], diff --git a/src/mcp/server.ts b/src/mcp/server.ts index 27b889fb..08c7cb5f 100644 --- a/src/mcp/server.ts +++ b/src/mcp/server.ts @@ -50,7 +50,7 @@ export function createProvarMcpServer(config: ServerConfig): McpServer { // ── Sanity-check tool ──────────────────────────────────────────────────────── server.tool( - 'provardx.ping', + 'provardx_ping', 'Sanity-check tool. Echoes back a message with a timestamp. Use this to verify the MCP server is reachable before calling other tools.', { message: z.string().optional().default('ping').describe('Optional message to echo back'), diff --git a/src/mcp/tools/antTools.ts b/src/mcp/tools/antTools.ts index 17801734..0d5eb506 100644 --- a/src/mcp/tools/antTools.ts +++ b/src/mcp/tools/antTools.ts @@ -67,7 +67,7 @@ const AttachmentPropertiesSchema = z.object({ export function registerAntGenerate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.ant.generate', + 'provar_ant_generate', [ 'Generate a Provar ANT build.xml file.', 'Produces the standard skeleton with Provar-Compile and Run-Test-Case tasks.', @@ -218,7 +218,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.ant.generate', { + log('info', 'provar_ant_generate', { requestId, output_path: input.output_path, dry_run: input.dry_run, @@ -253,7 +253,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo fs.mkdirSync(path.dirname(filePath), { recursive: true }); fs.writeFileSync(filePath, xmlContent, 'utf-8'); written = true; - log('info', 'provar.ant.generate: wrote file', { requestId, filePath }); + log('info', 'provar_ant_generate: wrote file', { requestId, filePath }); } const result = { @@ -275,7 +275,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo requestId, false ); - log('error', 'provar.ant.generate failed', { requestId, error: error.message }); + log('error', 'provar_ant_generate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -286,7 +286,7 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo export function registerAntValidate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.ant.validate', + 'provar_ant_validate', [ 'Validate a Provar ANT build.xml for structural correctness.', 'Checks XML well-formedness, required declarations, step,', @@ -299,7 +299,7 @@ export function registerAntValidate(server: McpServer, config: ServerConfig): vo }, ({ content, file_path }) => { const requestId = makeRequestId(); - log('info', 'provar.ant.validate', { requestId, has_content: !!content, file_path }); + log('info', 'provar_ant_validate', { requestId, has_content: !!content, file_path }); try { let source = content; @@ -333,7 +333,7 @@ export function registerAntValidate(server: McpServer, config: ServerConfig): vo requestId, false ); - log('error', 'provar.ant.validate failed', { requestId, error: error.message }); + log('error', 'provar_ant_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/automationTools.ts b/src/mcp/tools/automationTools.ts index 42b4038b..e3406bd5 100644 --- a/src/mcp/tools/automationTools.ts +++ b/src/mcp/tools/automationTools.ts @@ -216,16 +216,16 @@ function handleSpawnError( }; } -// ── Tool: provar.automation.config.load ────────────────────────────────────── +// ── Tool: provar_automation_config_load ────────────────────────────────────── export function registerAutomationConfigLoad(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.automation.config.load', + 'provar_automation_config_load', [ 'Register a provardx-properties.json file as the active Provar configuration.', 'Invokes `sf provar automation config load --properties-file `, writing the path to ~/.sf/config.json.', - 'REQUIRED before provar.automation.compile or provar.automation.testrun — without this step those commands fail with MISSING_FILE.', - 'Typical workflow: provar.automation.config.load → provar.automation.compile → provar.automation.testrun.', + 'REQUIRED before provar_automation_compile or provar_automation_testrun — without this step those commands fail with MISSING_FILE.', + 'Typical workflow: provar_automation_config_load → provar_automation_compile → provar_automation_testrun.', ].join(' '), { properties_path: z @@ -238,7 +238,7 @@ export function registerAutomationConfigLoad(server: McpServer, config: ServerCo }, ({ properties_path, sf_path }) => { const requestId = makeRequestId(); - log('info', 'provar.automation.config.load', { requestId, properties_path }); + log('info', 'provar_automation_config_load', { requestId, properties_path }); try { assertPathAllowed(properties_path, config.allowedPaths); @@ -278,7 +278,7 @@ export function registerAutomationConfigLoad(server: McpServer, config: ServerCo ], }; } - return handleSpawnError(err, requestId, 'provar.automation.config.load'); + return handleSpawnError(err, requestId, 'provar_automation_config_load'); } } ); @@ -317,7 +317,7 @@ export function filterTestRunOutput(raw: string): { filtered: string; suppressed let filtered = kept.join('\n'); if (suppressed > 0) { - filtered += `\n[testrun: ${suppressed} lines suppressed (schema validator / logger noise) — use provar.testrun.rca for full results]`; + filtered += `\n[testrun: ${suppressed} lines suppressed (schema validator / logger noise) — use provar_testrun_rca for full results]`; } return { filtered, suppressed }; } @@ -384,17 +384,17 @@ function readResultsPathFromSfConfig(config: ServerConfig): string | null { } } -// ── Tool: provar.automation.testrun ─────────────────────────────────────────── +// ── Tool: provar_automation_testrun ─────────────────────────────────────────── export function registerAutomationTestRun(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.automation.testrun', + 'provar_automation_testrun', [ 'Trigger a LOCAL Provar automation test run using installed Provar binaries. Invokes `sf provar automation test run`.', - 'PREREQUISITE: Run provar.automation.config.load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', + 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', 'Requires Provar to be installed locally and provarHome set correctly in the properties file.', - 'Use provar.automation.setup first if Provar is not yet installed.', - 'For grid/CI execution via Provar Quality Hub instead of running locally, use provar.qualityhub.testrun.', + 'Use provar_automation_setup first if Provar is not yet installed.', + 'For grid/CI execution via Provar Quality Hub instead of running locally, use provar_qualityhub_testrun.', 'Typical local AI loop: config.load → compile → testrun → inspect results.', ].join(' '), { @@ -410,7 +410,7 @@ export function registerAutomationTestRun(server: McpServer, config: ServerConfi }, ({ flags, sf_path }) => { const requestId = makeRequestId(); - log('info', 'provar.automation.testrun', { requestId }); + log('info', 'provar_automation_testrun', { requestId }); try { const result = runSfCommand(['provar', 'automation', 'test', 'run', ...flags], sf_path); @@ -435,7 +435,7 @@ export function registerAutomationTestRun(server: McpServer, config: ServerConfi errBody['details'] = { warning: junitWarning ?? - 'Could not locate results directory — step-level output unavailable. Run provar.automation.config.load first.', + 'Could not locate results directory — step-level output unavailable. Run provar_automation_config_load first.', }; } return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(errBody) }] }; @@ -452,20 +452,20 @@ export function registerAutomationTestRun(server: McpServer, config: ServerConfi if (junitWarning) response['details'] = { warning: junitWarning }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.automation.testrun'); + return handleSpawnError(err, requestId, 'provar_automation_testrun'); } } ); } -// ── Tool: provar.automation.compile ─────────────────────────────────────────── +// ── Tool: provar_automation_compile ─────────────────────────────────────────── export function registerAutomationCompile(server: McpServer): void { server.tool( - 'provar.automation.compile', + 'provar_automation_compile', [ 'Compile a Provar automation project. Invokes `sf provar automation project compile`.', - 'PREREQUISITE: Run provar.automation.config.load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', + 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', 'Run this before triggering a test run after modifying test cases.', ].join(' '), { @@ -481,7 +481,7 @@ export function registerAutomationCompile(server: McpServer): void { }, ({ flags, sf_path }) => { const requestId = makeRequestId(); - log('info', 'provar.automation.compile', { requestId }); + log('info', 'provar_automation_compile', { requestId }); try { const result = runSfCommand(['provar', 'automation', 'project', 'compile', ...flags], sf_path); @@ -501,13 +501,13 @@ export function registerAutomationCompile(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.automation.compile'); + return handleSpawnError(err, requestId, 'provar_automation_compile'); } } ); } -// ── Tool: provar.automation.metadata.download ───────────────────────────────── +// ── Tool: provar_automation_metadata_download ───────────────────────────────── const DOWNLOAD_ERROR_SUGGESTION = 'A [DOWNLOAD_ERROR] almost always means a Salesforce authentication failure for the connection being used. ' + @@ -518,11 +518,11 @@ const DOWNLOAD_ERROR_SUGGESTION = export function registerAutomationMetadataDownload(server: McpServer): void { server.tool( - 'provar.automation.metadata.download', + 'provar_automation_metadata_download', [ 'Download Salesforce metadata for one or more connections into a Provar project.', 'Invokes `sf provar automation metadata download`.', - 'PREREQUISITE: Call provar.automation.config.load first — without it the command fails with MISSING_FILE.', + 'PREREQUISITE: Call provar_automation_config_load first — without it the command fails with MISSING_FILE.', 'Use the -c flag to specify connections: flags: ["-c", "ConnectionName1,ConnectionName2"].', 'Connection names are case-sensitive and must match the names defined in the Provar project.', 'If the download fails with [DOWNLOAD_ERROR], this is almost always a Salesforce authentication issue —', @@ -543,7 +543,7 @@ export function registerAutomationMetadataDownload(server: McpServer): void { }, ({ flags, sf_path }) => { const requestId = makeRequestId(); - log('info', 'provar.automation.metadata.download', { requestId }); + log('info', 'provar_automation_metadata_download', { requestId }); try { const result = runSfCommand(['provar', 'automation', 'metadata', 'download', ...flags], sf_path); @@ -568,13 +568,13 @@ export function registerAutomationMetadataDownload(server: McpServer): void { const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.automation.metadata.download'); + return handleSpawnError(err, requestId, 'provar_automation_metadata_download'); } } ); } -// ── Tool: provar.automation.setup ───────────────────────────────────────────── +// ── Tool: provar_automation_setup ───────────────────────────────────────────── /** Known system-level Provar install paths per platform. */ const SYSTEM_INSTALL_BASES: Record = { @@ -663,7 +663,7 @@ function findExistingInstallations(): ProvarInstall[] { export function registerAutomationSetup(server: McpServer): void { server.tool( - 'provar.automation.setup', + 'provar_automation_setup', [ 'Download and install Provar Automation binaries locally. Invokes `sf provar automation setup`.', 'Before downloading, checks for existing Provar installations in:', @@ -672,7 +672,7 @@ export function registerAutomationSetup(server: McpServer): void { ' • C:\\Program Files\\Provar* (Windows system installs)', ' • /Applications/Provar* (macOS app installs)', 'If an existing installation is found, returns its path so you can set provarHome in the properties file — skipping the download unless force is true.', - 'After a successful install, update the provarHome property in provardx-properties.json to the returned install_path using provar.properties.set.', + 'After a successful install, update the provarHome property in provardx-properties.json to the returned install_path using provar_properties_set.', ].join(' '), { version: z @@ -691,7 +691,7 @@ export function registerAutomationSetup(server: McpServer): void { }, ({ version, force, sf_path }) => { const requestId = makeRequestId(); - log('info', 'provar.automation.setup', { requestId, version, force }); + log('info', 'provar_automation_setup', { requestId, version, force }); try { // ── 1. Check for existing installations ────────────────────────────── @@ -750,7 +750,7 @@ export function registerAutomationSetup(server: McpServer): void { version: detectedVersion, message: [ `Provar Automation installed successfully at: ${installPath}.`, - 'Update provarHome in your provardx-properties.json to this path using provar.properties.set.', + 'Update provarHome in your provardx-properties.json to this path using provar_properties_set.', ].join(' '), }; return { @@ -758,7 +758,7 @@ export function registerAutomationSetup(server: McpServer): void { structuredContent: response, }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.automation.setup'); + return handleSpawnError(err, requestId, 'provar_automation_setup'); } } ); diff --git a/src/mcp/tools/connectionTools.ts b/src/mcp/tools/connectionTools.ts index c500e75f..43d5b600 100644 --- a/src/mcp/tools/connectionTools.ts +++ b/src/mcp/tools/connectionTools.ts @@ -132,12 +132,12 @@ function parseEnvironmentList(content: string): EnvironmentEntry[] { export function registerConnectionList(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.connection.list', + 'provar_connection_list', [ 'List all connections and named environments defined in the .testproject file.', 'Use this before generating test cases or page objects to get the correct connection names.', 'Returns connections (name, type, url, sso_configured) and environments (name, connection, url).', - 'Prerequisite: the project must have a .testproject file — run provar.project.validate first if unsure.', + 'Prerequisite: the project must have a .testproject file — run provar_project_validate first if unsure.', 'Security: only connection names, types, and URLs are returned — credential values from .secrets are never included.', ].join(' '), { @@ -147,7 +147,7 @@ export function registerConnectionList(server: McpServer, config: ServerConfig): }, ({ project_path }) => { const requestId = makeRequestId(); - log('info', 'provar.connection.list', { requestId, project_path }); + log('info', 'provar_connection_list', { requestId, project_path }); try { const resolvedPath = path.resolve(project_path); @@ -157,10 +157,10 @@ export function registerConnectionList(server: McpServer, config: ServerConfig): if (!fs.existsSync(testProjectPath)) { const err = makeError( 'CONNECTION_FILE_NOT_FOUND', - `No .testproject file found at: ${testProjectPath}. Run provar.project.validate first to confirm the project structure.`, + `No .testproject file found at: ${testProjectPath}. Run provar_project_validate first to confirm the project structure.`, requestId, false, - { suggestion: 'Run provar.project.validate with the project_path to confirm the project root, then retry.' } + { suggestion: 'Run provar_project_validate with the project_path to confirm the project root, then retry.' } ); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; } @@ -205,7 +205,7 @@ export function registerConnectionList(server: McpServer, config: ServerConfig): requestId, false ); - log('error', 'provar.connection.list failed', { requestId, error: error.message }); + log('error', 'provar_connection_list failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/defectTools.ts b/src/mcp/tools/defectTools.ts index 9dc9685a..a8fd76b7 100644 --- a/src/mcp/tools/defectTools.ts +++ b/src/mcp/tools/defectTools.ts @@ -59,9 +59,12 @@ function formatSfCommandError(action: string, exitCode: number, stderr: string, function runQuery(soql: string, targetOrg: string): SfQueryResponse { const { stdout, stderr, exitCode } = runSfArgs([ - 'data', 'query', - '--query', soql, - '--target-org', targetOrg, + 'data', + 'query', + '--query', + soql, + '--target-org', + targetOrg, '--json', ]); if (exitCode !== 0) { @@ -72,10 +75,15 @@ function runQuery(soql: string, targetOrg: string): SfQueryResponse { function createRecord(sobject: string, values: string, targetOrg: string): string { const { stdout, stderr, exitCode } = runSfArgs([ - 'data', 'create', 'record', - '--sobject', sobject, - '--values', values, - '--target-org', targetOrg, + 'data', + 'create', + 'record', + '--sobject', + sobject, + '--values', + values, + '--target-org', + targetOrg, '--json', ]); if (exitCode !== 0) { @@ -91,10 +99,7 @@ function createRecord(sobject: string, values: string, targetOrg: string): strin /** Strip characters unsafe for sf --values double-quoted strings and truncate. */ function safeText(value: unknown, maxLen = 200): string { if (value === null || value === undefined) return ''; - return String(value) - .replace(/"/g, "'") - .replace(/\n|\r/g, ' ') - .substring(0, maxLen); + return String(value).replace(/"/g, "'").replace(/\n|\r/g, ' ').substring(0, maxLen); } // ── Core defect creation logic (exported for auto-defects use) ───────────────── @@ -159,7 +164,9 @@ export function createDefectsForRun( // if needed; here we filter by TC record ID substring for flexibility) if (failedTestFilter && failedTestFilter.length > 0) { executions = executions.filter((e) => - failedTestFilter.some((f) => String(e['provar__Test_Case__c']).includes(f) || f.includes(String(e['provar__Test_Case__c']))) + failedTestFilter.some( + (f) => String(e['provar__Test_Case__c']).includes(f) || f.includes(String(e['provar__Test_Case__c'])) + ) ); } @@ -236,19 +243,14 @@ export function createDefectsForRun( `provar__Test_Execution__c="${executionId}"` + (stepExecutionId ? ` provar__Test_Step_Execution__c="${stepExecutionId}"` : ''); - const execDefectId = createRecord( - 'provar__Test_Execution_Defect__c', - execDefectValues, - targetOrg - ); + const execDefectId = createRecord('provar__Test_Execution_Defect__c', execDefectValues, targetOrg); log('info', 'defect created', { defectId, tcDefectId, execDefectId, executionId }); created.push({ defectId, tcDefectId, execDefectId, executionId, testCaseId }); } - const syncNote = - 'If Jira or ADO sync is enabled in your Quality Hub org, these defects will sync automatically.'; + const syncNote = 'If Jira or ADO sync is enabled in your Quality Hub org, these defects will sync automatically.'; return { created, skipped: execQuery.result.totalSize - created.length, @@ -260,7 +262,7 @@ export function createDefectsForRun( export function registerQualityHubDefectCreate(server: McpServer): void { server.tool( - 'provar.qualityhub.defect.create', + 'provar_qualityhub_defect_create', [ 'Create Defect__c records in Quality Hub for failed test executions in a given run.', 'Queries the run by Tracking_Id__c, finds failed Test_Execution__c records, creates a', @@ -269,9 +271,7 @@ export function registerQualityHubDefectCreate(server: McpServer): void { 'If Jira or ADO sync is configured in Quality Hub, defects sync to those systems automatically.', ].join(' '), { - run_id: z - .string() - .describe('Test run Tracking_Id__c value returned by provar.qualityhub.testrun'), + run_id: z.string().describe('Test run Tracking_Id__c value returned by provar_qualityhub_testrun'), target_org: z.string().describe('SF org alias or username for the Quality Hub org'), failed_tests: z .array(z.string()) @@ -282,7 +282,7 @@ export function registerQualityHubDefectCreate(server: McpServer): void { }, ({ run_id, target_org, failed_tests }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.defect.create', { requestId, run_id, target_org }); + log('info', 'provar_qualityhub_defect_create', { requestId, run_id, target_org }); try { const result = createDefectsForRun(run_id, target_org, failed_tests); @@ -293,7 +293,7 @@ export function registerQualityHubDefectCreate(server: McpServer): void { }; } catch (err) { const error = err as Error & { code?: string }; - log('error', 'provar.qualityhub.defect.create failed', { + log('error', 'provar_qualityhub_defect_create failed', { requestId, error: error.message, }); @@ -302,9 +302,7 @@ export function registerQualityHubDefectCreate(server: McpServer): void { content: [ { type: 'text' as const, - text: JSON.stringify( - makeError(error.code ?? 'DEFECT_CREATE_FAILED', error.message, requestId, false) - ), + text: JSON.stringify(makeError(error.code ?? 'DEFECT_CREATE_FAILED', error.message, requestId, false)), }, ], }; diff --git a/src/mcp/tools/hierarchyValidate.ts b/src/mcp/tools/hierarchyValidate.ts index 80b051f7..b8a742de 100644 --- a/src/mcp/tools/hierarchyValidate.ts +++ b/src/mcp/tools/hierarchyValidate.ts @@ -54,7 +54,7 @@ export interface ProjectContext { secretsPasswordSet?: boolean; /** * Number of keys in the .secrets file whose values are NOT wrapped in ENC1(). - * Populated by provar.project.inspect → secrets_validation.unencrypted_key_count. + * Populated by provar_project_inspect → secrets_validation.unencrypted_key_count. * Any value > 0 triggers PROJ-ENC-001. */ unencrypted_secret_count?: number; @@ -150,33 +150,116 @@ export interface HierarchySummary { // ── Rule registry (ported from batch_validator/handler.py) ─────────────────── const RULE_REGISTRY: Record = { - 'SUITE-EMPTY-001': { name: 'Empty Test Suite', description: 'A test suite must contain at least one test case or child suite.' }, - 'SUITE-DUP-001': { name: 'Duplicate Test Case Name in Suite', description: 'Test case names within a suite must be unique.' }, - 'SUITE-DUP-002': { name: 'Duplicate Child Suite Name', description: 'Child suite names within a parent suite must be unique.' }, - 'SUITE-SIZE-001': { name: 'Oversized Test Suite', description: 'A test suite should contain no more than 75 test cases.' }, - 'SUITE-NAMING-001': { name: 'Inconsistent Child Suite Naming', description: 'All child suite names should follow a consistent naming convention.' }, - 'SUITE-NAMING-002': { name: 'Inconsistent Test Case Naming', description: 'All test case names within a suite should follow a consistent naming convention.' }, - 'PLAN-EMPTY-001': { name: 'Empty Test Plan', description: 'A test plan must contain at least one test suite.' }, - 'PLAN-DUP-001': { name: 'Duplicate Suite Name in Plan', description: 'Suite names within a test plan must be unique.' }, - 'PLAN-META-001': { name: 'Missing Plan Objectives', description: 'Test plans should define clear testing objectives. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-002': { name: 'Missing In-Scope Definition', description: 'Test plans should specify which features are in scope. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-003': { name: 'Missing Testing Methodology', description: 'Test plans should document the testing methodology. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-004': { name: 'Missing Acceptance Criteria', description: 'Test plans should define acceptance criteria or an acceptable pass rate. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-005': { name: 'Missing Test Environments', description: 'Test plans should specify the target test environments. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-006': { name: 'Missing Test Data Strategy', description: 'Test plans should document how test data will be prepared and cleaned up. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-META-007': { name: 'Missing Risk Assessment', description: 'Test plans should identify potential risks and mitigations. This field is configured in the Provar Quality Hub app (not stored in local project files).' }, - 'PLAN-SIZE-001': { name: 'Oversized Test Plan', description: 'A test plan should contain no more than 20 test suites.' }, - 'PLAN-NAMING-001': { name: 'Inconsistent Suite Naming in Plan', description: 'All suite names within a test plan should follow a consistent naming convention.' }, - 'PROJ-EMPTY-001': { name: 'Empty Project', description: 'A project must contain at least one test plan.' }, - 'PROJ-DUP-001': { name: 'Duplicate Test Case Name Across Project', description: 'Test case names must be unique across the entire project.' }, - 'PROJ-DUP-002': { name: 'Duplicate Plan Name in Project', description: 'Test plan names within a project must be unique.' }, - 'PROJ-CALLABLE-001': { name: 'Unresolved caseCall Reference', description: 'All caseCall references must point to existing test cases in the project.' }, - 'PROJ-CALLABLE-002': { name: 'Missing Callable Test Case', description: 'All callable tests referenced by caseCall must exist in the project.' }, - 'PROJ-CONN-001': { name: 'Undefined Connection Name', description: 'All connection names used in test cases must be defined in the project context.' }, - 'PROJ-ENV-001': { name: 'Duplicate Environment Name', description: 'Environment names within a project must be unique.' }, - 'PROJ-ENV-002': { name: 'Invalid Environment Name', description: 'Environment names must contain only letters, digits, and underscores.' }, - 'PROJ-SECRET-001': { name: 'Missing Secrets Password', description: 'A Provar Secrets Password must be configured to protect sensitive test data.' }, - 'PROJ-ENC-001': { name: 'Unencrypted Credentials in .secrets', description: 'All values in the .secrets file must be wrapped in ENC1() encryption. Plaintext credentials are a critical security risk.' }, + 'SUITE-EMPTY-001': { + name: 'Empty Test Suite', + description: 'A test suite must contain at least one test case or child suite.', + }, + 'SUITE-DUP-001': { + name: 'Duplicate Test Case Name in Suite', + description: 'Test case names within a suite must be unique.', + }, + 'SUITE-DUP-002': { + name: 'Duplicate Child Suite Name', + description: 'Child suite names within a parent suite must be unique.', + }, + 'SUITE-SIZE-001': { + name: 'Oversized Test Suite', + description: 'A test suite should contain no more than 75 test cases.', + }, + 'SUITE-NAMING-001': { + name: 'Inconsistent Child Suite Naming', + description: 'All child suite names should follow a consistent naming convention.', + }, + 'SUITE-NAMING-002': { + name: 'Inconsistent Test Case Naming', + description: 'All test case names within a suite should follow a consistent naming convention.', + }, + 'PLAN-EMPTY-001': { name: 'Empty Test Plan', description: 'A test plan must contain at least one test suite.' }, + 'PLAN-DUP-001': { + name: 'Duplicate Suite Name in Plan', + description: 'Suite names within a test plan must be unique.', + }, + 'PLAN-META-001': { + name: 'Missing Plan Objectives', + description: + 'Test plans should define clear testing objectives. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-002': { + name: 'Missing In-Scope Definition', + description: + 'Test plans should specify which features are in scope. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-003': { + name: 'Missing Testing Methodology', + description: + 'Test plans should document the testing methodology. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-004': { + name: 'Missing Acceptance Criteria', + description: + 'Test plans should define acceptance criteria or an acceptable pass rate. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-005': { + name: 'Missing Test Environments', + description: + 'Test plans should specify the target test environments. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-006': { + name: 'Missing Test Data Strategy', + description: + 'Test plans should document how test data will be prepared and cleaned up. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-META-007': { + name: 'Missing Risk Assessment', + description: + 'Test plans should identify potential risks and mitigations. This field is configured in the Provar Quality Hub app (not stored in local project files).', + }, + 'PLAN-SIZE-001': { + name: 'Oversized Test Plan', + description: 'A test plan should contain no more than 20 test suites.', + }, + 'PLAN-NAMING-001': { + name: 'Inconsistent Suite Naming in Plan', + description: 'All suite names within a test plan should follow a consistent naming convention.', + }, + 'PROJ-EMPTY-001': { name: 'Empty Project', description: 'A project must contain at least one test plan.' }, + 'PROJ-DUP-001': { + name: 'Duplicate Test Case Name Across Project', + description: 'Test case names must be unique across the entire project.', + }, + 'PROJ-DUP-002': { + name: 'Duplicate Plan Name in Project', + description: 'Test plan names within a project must be unique.', + }, + 'PROJ-CALLABLE-001': { + name: 'Unresolved caseCall Reference', + description: 'All caseCall references must point to existing test cases in the project.', + }, + 'PROJ-CALLABLE-002': { + name: 'Missing Callable Test Case', + description: 'All callable tests referenced by caseCall must exist in the project.', + }, + 'PROJ-CONN-001': { + name: 'Undefined Connection Name', + description: 'All connection names used in test cases must be defined in the project context.', + }, + 'PROJ-ENV-001': { + name: 'Duplicate Environment Name', + description: 'Environment names within a project must be unique.', + }, + 'PROJ-ENV-002': { + name: 'Invalid Environment Name', + description: 'Environment names must contain only letters, digits, and underscores.', + }, + 'PROJ-SECRET-001': { + name: 'Missing Secrets Password', + description: 'A Provar Secrets Password must be configured to protect sensitive test data.', + }, + 'PROJ-ENC-001': { + name: 'Unencrypted Credentials in .secrets', + description: + 'All values in the .secrets file must be wrapped in ENC1() encryption. Plaintext credentials are a critical security risk.', + }, }; // ── Violation builder ───────────────────────────────────────────────────────── @@ -210,7 +293,10 @@ function makeViolation( // ── Scoring helpers ─────────────────────────────────────────────────────────── const SEVERITY_MULTIPLIER: Record = { - critical: 1.0, major: 0.75, minor: 0.5, info: 0.25, + critical: 1.0, + major: 0.75, + minor: 0.5, + info: 0.25, }; export function computeViolationDeduction(violations: HierarchyViolation[]): number { @@ -264,20 +350,22 @@ export function checkNamingConsistency( .map(([s, items]) => `${s}: "${items[0]}"`) .join(', '); - return [makeViolation( - ruleId, 'NamingConventions', 'info', 2, - `Inconsistent naming conventions in ${contextLabel}: ${styleSummary}`, - `Adopt a single naming convention across all items. Examples found: ${examples}`, - [level], - )]; + return [ + makeViolation( + ruleId, + 'NamingConventions', + 'info', + 2, + `Inconsistent naming conventions in ${contextLabel}: ${styleSummary}`, + `Adopt a single naming convention across all items. Examples found: ${examples}`, + [level] + ), + ]; } // ── Test case validation (adapts existing validator to hierarchy result) ─────── -export function validateHierarchyTestCase( - tc: TestCaseInput, - qualityThreshold: number -): TestCaseResult { +export function validateHierarchyTestCase(tc: TestCaseInput, qualityThreshold: number): TestCaseResult { // Accept both `xml` (API standard) and `xml_content` (MCP original); prefer `xml` const xmlSource = tc.xml ?? tc.xml_content ?? ''; try { @@ -307,12 +395,14 @@ export function validateHierarchyTestCase( error_count: 1, warning_count: 0, step_count: 0, - issues: [{ - rule_id: 'TC_000', - severity: 'ERROR', - message: `Unexpected error: ${(e as Error).message}`, - applies_to: 'document', - }], + issues: [ + { + rule_id: 'TC_000', + severity: 'ERROR', + message: `Unexpected error: ${(e as Error).message}`, + applies_to: 'document', + }, + ], best_practices_violations: [], }; } @@ -327,12 +417,17 @@ function checkSuiteStructure( ): HierarchyViolation[] { const violations: HierarchyViolation[] = []; if (!childSuites.length && !childCases.length) { - violations.push(makeViolation( - 'SUITE-EMPTY-001', 'SuiteStructure', 'major', 5, - `Test suite "${suiteName}" contains no test cases or child suites`, - `Add at least one test case or child suite to "${suiteName}"`, - ['TestSuite'], - )); + violations.push( + makeViolation( + 'SUITE-EMPTY-001', + 'SuiteStructure', + 'major', + 5, + `Test suite "${suiteName}" contains no test cases or child suites`, + `Add at least one test case or child suite to "${suiteName}"`, + ['TestSuite'] + ) + ); } return violations; } @@ -348,13 +443,18 @@ function checkSuiteDuplicates( for (const tc of childCases) { const base = path.basename(tc.name, path.extname(tc.name)).toLowerCase(); if (seenCases[base]) { - violations.push(makeViolation( - 'SUITE-DUP-001', 'Maintainability', 'major', 5, - `Duplicate test case name "${base}" in suite "${suiteName}"`, - 'Ensure all test case names within a suite are unique', - ['TestSuite'], - [seenCases[base], tc.name], - )); + violations.push( + makeViolation( + 'SUITE-DUP-001', + 'Maintainability', + 'major', + 5, + `Duplicate test case name "${base}" in suite "${suiteName}"`, + 'Ensure all test case names within a suite are unique', + ['TestSuite'], + [seenCases[base], tc.name] + ) + ); } else { seenCases[base] = tc.name; } @@ -364,12 +464,17 @@ function checkSuiteDuplicates( for (const cs of childSuites) { const lower = cs.name.toLowerCase(); if (seenSuites.has(lower)) { - violations.push(makeViolation( - 'SUITE-DUP-002', 'Maintainability', 'major', 5, - `Duplicate child suite name "${cs.name}" in suite "${suiteName}"`, - 'Ensure all child suite names are unique within a parent suite', - ['TestSuite'], - )); + violations.push( + makeViolation( + 'SUITE-DUP-002', + 'Maintainability', + 'major', + 5, + `Duplicate child suite name "${cs.name}" in suite "${suiteName}"`, + 'Ensure all child suite names are unique within a parent suite', + ['TestSuite'] + ) + ); } seenSuites.add(lower); } @@ -378,22 +483,24 @@ function checkSuiteDuplicates( function checkSuiteSize(suiteName: string, data: TestSuiteInput): HierarchyViolation[] { const explicit = data.test_case_count; - const count = explicit ?? (data.test_cases?.length ?? 0); + const count = explicit ?? data.test_cases?.length ?? 0; if (count > 75) { - return [makeViolation( - 'SUITE-SIZE-001', 'SuiteStructure', 'minor', 2, - `Test suite "${suiteName}" contains ${count} test cases (recommended maximum: 75)`, - 'Break large suites into smaller, focused child suites', - ['TestSuite'], - )]; + return [ + makeViolation( + 'SUITE-SIZE-001', + 'SuiteStructure', + 'minor', + 2, + `Test suite "${suiteName}" contains ${count} test cases (recommended maximum: 75)`, + 'Break large suites into smaller, focused child suites', + ['TestSuite'] + ), + ]; } return []; } -export function validateSuite( - data: TestSuiteInput, - qualityThreshold: number -): SuiteResult { +export function validateSuite(data: TestSuiteInput, qualityThreshold: number): SuiteResult { const suiteName = data.name || 'Unnamed Suite'; const childSuites = data.test_suites ?? []; const childCases = data.test_cases ?? []; @@ -402,17 +509,24 @@ export function validateSuite( ...checkSuiteStructure(suiteName, childSuites, childCases), ...checkSuiteDuplicates(suiteName, childSuites, childCases), ...checkSuiteSize(suiteName, data), - ...checkNamingConsistency(childSuites.map((cs) => cs.name), `suite "${suiteName}" child suites`, 'TestSuite', 'SUITE-NAMING-001'), - ...checkNamingConsistency(childCases.map((tc) => tc.name), `suite "${suiteName}" test cases`, 'TestSuite', 'SUITE-NAMING-002'), + ...checkNamingConsistency( + childSuites.map((cs) => cs.name), + `suite "${suiteName}" child suites`, + 'TestSuite', + 'SUITE-NAMING-001' + ), + ...checkNamingConsistency( + childCases.map((tc) => tc.name), + `suite "${suiteName}" test cases`, + 'TestSuite', + 'SUITE-NAMING-002' + ), ]; const suiteResults = childSuites.map((cs) => validateSuite(cs, qualityThreshold)); const caseResults = childCases.map((tc) => validateHierarchyTestCase(tc, qualityThreshold)); - const scores = [ - ...caseResults.map((r) => r.quality_score), - ...suiteResults.map((r) => r.quality_score), - ]; + const scores = [...caseResults.map((r) => r.quality_score), ...suiteResults.map((r) => r.quality_score)]; const quality_score = scores.length ? Math.max(0, Math.round((childAvg(scores) - computeViolationDeduction(violations)) * 100) / 100) : 0; @@ -431,20 +545,94 @@ export function validateSuite( function checkPlanMetadata(planName: string, meta: PlanMetadata): HierarchyViolation[] { const v: HierarchyViolation[] = []; - if (!meta.objectives) v.push(makeViolation('PLAN-META-001', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no objectives defined`, 'Add testing objectives via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.in_scope) v.push(makeViolation('PLAN-META-002', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no in-scope definition`, 'Specify in-scope features via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.testing_methodology) v.push(makeViolation('PLAN-META-003', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no testing methodology defined`, 'Document the testing methodology via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.acceptance_criteria && meta.acceptable_pass_rate === undefined) v.push(makeViolation('PLAN-META-004', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no acceptance criteria or acceptable pass rate`, 'Set acceptance criteria or an acceptable pass rate via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.environments?.length) v.push(makeViolation('PLAN-META-005', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no test environments defined`, 'Specify target environments (e.g., QA, Staging, UAT) via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.test_data_strategy) v.push(makeViolation('PLAN-META-006', 'PlanCompleteness', 'minor', 2, `Test plan "${planName}" has no test data strategy`, 'Document the test data strategy via the Provar Quality Hub app', ['TestPlan'])); - if (!meta.risks) v.push(makeViolation('PLAN-META-007', 'PlanCompleteness', 'info', 1, `Test plan "${planName}" has no risks identified`, 'Identify risks and mitigations via the Provar Quality Hub app', ['TestPlan'])); + if (!meta.objectives) + v.push( + makeViolation( + 'PLAN-META-001', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no objectives defined`, + 'Add testing objectives via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.in_scope) + v.push( + makeViolation( + 'PLAN-META-002', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no in-scope definition`, + 'Specify in-scope features via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.testing_methodology) + v.push( + makeViolation( + 'PLAN-META-003', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no testing methodology defined`, + 'Document the testing methodology via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.acceptance_criteria && meta.acceptable_pass_rate === undefined) + v.push( + makeViolation( + 'PLAN-META-004', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no acceptance criteria or acceptable pass rate`, + 'Set acceptance criteria or an acceptable pass rate via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.environments?.length) + v.push( + makeViolation( + 'PLAN-META-005', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no test environments defined`, + 'Specify target environments (e.g., QA, Staging, UAT) via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.test_data_strategy) + v.push( + makeViolation( + 'PLAN-META-006', + 'PlanCompleteness', + 'minor', + 2, + `Test plan "${planName}" has no test data strategy`, + 'Document the test data strategy via the Provar Quality Hub app', + ['TestPlan'] + ) + ); + if (!meta.risks) + v.push( + makeViolation( + 'PLAN-META-007', + 'PlanCompleteness', + 'info', + 1, + `Test plan "${planName}" has no risks identified`, + 'Identify risks and mitigations via the Provar Quality Hub app', + ['TestPlan'] + ) + ); return v; } -export function validatePlan( - data: TestPlanInput, - qualityThreshold: number -): PlanResult { +export function validatePlan(data: TestPlanInput, qualityThreshold: number): PlanResult { const planName = data.name || 'Unnamed Plan'; const childSuites = data.test_suites ?? []; const childCases = data.test_cases ?? []; @@ -452,19 +640,34 @@ export function validatePlan( const violations: HierarchyViolation[] = []; if (!childSuites.length && !childCases.length) { - violations.push(makeViolation( - 'PLAN-EMPTY-001', 'PlanStructure', 'major', 5, - `Test plan "${planName}" contains no test suites or test cases`, - `Add at least one test suite or test case to "${planName}"`, - ['TestPlan'], - )); + violations.push( + makeViolation( + 'PLAN-EMPTY-001', + 'PlanStructure', + 'major', + 5, + `Test plan "${planName}" contains no test suites or test cases`, + `Add at least one test suite or test case to "${planName}"`, + ['TestPlan'] + ) + ); } const seenSuites = new Set(); for (const s of childSuites) { const lower = s.name.toLowerCase(); if (seenSuites.has(lower)) { - violations.push(makeViolation('PLAN-DUP-001', 'Maintainability', 'major', 5, `Duplicate suite name "${s.name}" in plan "${planName}"`, 'Ensure all suite names within a plan are unique', ['TestPlan'])); + violations.push( + makeViolation( + 'PLAN-DUP-001', + 'Maintainability', + 'major', + 5, + `Duplicate suite name "${s.name}" in plan "${planName}"`, + 'Ensure all suite names within a plan are unique', + ['TestPlan'] + ) + ); } seenSuites.add(lower); } @@ -474,18 +677,32 @@ export function validatePlan( const suiteCount = data.test_suite_count ?? childSuites.length; if (suiteCount > 20) { - violations.push(makeViolation('PLAN-SIZE-001', 'PlanStructure', 'minor', 2, `Test plan "${planName}" contains ${suiteCount} test suites (recommended maximum: 20)`, 'Split the plan into multiple focused test plans', ['TestPlan'])); + violations.push( + makeViolation( + 'PLAN-SIZE-001', + 'PlanStructure', + 'minor', + 2, + `Test plan "${planName}" contains ${suiteCount} test suites (recommended maximum: 20)`, + 'Split the plan into multiple focused test plans', + ['TestPlan'] + ) + ); } - violations.push(...checkNamingConsistency(childSuites.map((s) => s.name), `plan "${planName}"`, 'TestPlan', 'PLAN-NAMING-001')); + violations.push( + ...checkNamingConsistency( + childSuites.map((s) => s.name), + `plan "${planName}"`, + 'TestPlan', + 'PLAN-NAMING-001' + ) + ); const suiteResults = childSuites.map((s) => validateSuite(s, qualityThreshold)); const caseResults = childCases.map((tc) => validateHierarchyTestCase(tc, qualityThreshold)); - const scores = [ - ...suiteResults.map((r) => r.quality_score), - ...caseResults.map((r) => r.quality_score), - ]; + const scores = [...suiteResults.map((r) => r.quality_score), ...caseResults.map((r) => r.quality_score)]; const quality_score = scores.length ? Math.max(0, Math.round((childAvg(scores) - computeViolationDeduction(violations)) * 100) / 100) : 0; @@ -529,7 +746,18 @@ function checkDuplicateNames(registry: Record): HierarchyViolati for (const name of Object.keys(registry)) { const base = path.basename(name, path.extname(name)).toLowerCase(); if (seen[base]) { - violations.push(makeViolation('PROJ-DUP-001', 'Maintainability', 'major', 5, `Duplicate test case name detected: "${base}"`, 'Ensure all test case names are unique across the project', ['Project'], [seen[base], name])); + violations.push( + makeViolation( + 'PROJ-DUP-001', + 'Maintainability', + 'major', + 5, + `Duplicate test case name detected: "${base}"`, + 'Ensure all test case names are unique across the project', + ['Project'], + [seen[base], name] + ) + ); } else { seen[base] = name; } @@ -537,12 +765,18 @@ function checkDuplicateNames(registry: Record): HierarchyViolati return violations; } -function checkCaseCalls(registry: Record, diskTestCaseNames: Set = new Set()): HierarchyViolation[] { +function checkCaseCalls( + registry: Record, + diskTestCaseNames: Set = new Set() +): HierarchyViolation[] { const violations: HierarchyViolation[] = []; // Include all on-disk test cases (callables have no .testinstance so they're absent from // the plan-built registry — merging diskTestCaseNames prevents false PROJ-CALLABLE-001/002) const available = new Set([...Object.keys(registry), ...diskTestCaseNames]); - const availableBase = new Set([...Object.keys(registry).map((n) => path.basename(n, path.extname(n))), ...diskTestCaseNames]); + const availableBase = new Set([ + ...Object.keys(registry).map((n) => path.basename(n, path.extname(n))), + ...diskTestCaseNames, + ]); const calledPaths = new Set(); // Group callers by missing callee — avoids N violations for the same missing test @@ -559,7 +793,9 @@ function checkCaseCalls(registry: Record, diskTestCaseNames: Set const steps = tc['steps'] as Record | undefined; if (!steps) continue; const rawCalls = steps['caseCall']; - const calls = rawCalls ? (Array.isArray(rawCalls) ? rawCalls : [rawCalls]) as Array> : []; + const calls = rawCalls + ? ((Array.isArray(rawCalls) ? rawCalls : [rawCalls]) as Array>) + : []; for (const call of calls) { const called = (call['@_testCasePath'] as string | undefined)?.trim() ?? ''; if (!called) continue; @@ -577,18 +813,33 @@ function checkCaseCalls(registry: Record, diskTestCaseNames: Set // One violation per unique missing callee (affected_files lists all callers) for (const [called, callers] of missingCalleeCallers) { const n = callers.size; - violations.push(makeViolation( - 'PROJ-CALLABLE-001', 'ReusabilityAndCallables', 'major', 5, - `caseCall references non-existent test: "${called}" (referenced in ${n} test case${n > 1 ? 's' : ''})`, - `Ensure "${called}" exists in the project or update the reference`, - ['Project'], - [...callers].sort() - )); + violations.push( + makeViolation( + 'PROJ-CALLABLE-001', + 'ReusabilityAndCallables', + 'major', + 5, + `caseCall references non-existent test: "${called}" (referenced in ${n} test case${n > 1 ? 's' : ''})`, + `Ensure "${called}" exists in the project or update the reference`, + ['Project'], + [...callers].sort() + ) + ); } for (const called of calledPaths) { if (!availableBase.has(called)) { - violations.push(makeViolation('PROJ-CALLABLE-002', 'ReusabilityAndCallables', 'critical', 10, `Callable test "${called}" is referenced but does not exist in project`, `Add "${called}.testcase" to the project or remove the caseCall references`, ['Project'])); + violations.push( + makeViolation( + 'PROJ-CALLABLE-002', + 'ReusabilityAndCallables', + 'critical', + 10, + `Callable test "${called}" is referenced but does not exist in project`, + `Add "${called}.testcase" to the project or remove the caseCall references`, + ['Project'] + ) + ); } } @@ -598,7 +849,7 @@ function checkCaseCalls(registry: Record, diskTestCaseNames: Set function extractConnFromCall(call: Record): string | undefined { if (call['@_apiId'] !== 'com.provar.plugins.forcedotcom.core.testapis.ApexConnect') return undefined; const args = call['argument']; - const argArr = args ? (Array.isArray(args) ? args : [args]) as Array> : []; + const argArr = args ? ((Array.isArray(args) ? args : [args]) as Array>) : []; for (const arg of argArr) { if (arg['@_id'] !== 'connectionName') continue; const val = arg['value'] as Record | undefined; @@ -624,7 +875,9 @@ function checkConnectionConsistency(registry: Record, ctx: Proje const steps = tc['steps'] as Record | undefined; if (!steps) continue; const rawCalls = steps['apiCall']; - const calls = rawCalls ? (Array.isArray(rawCalls) ? rawCalls : [rawCalls]) as Array> : []; + const calls = rawCalls + ? ((Array.isArray(rawCalls) ? rawCalls : [rawCalls]) as Array>) + : []; for (const call of calls) { const conn = extractConnFromCall(call); if (conn && !defined.has(conn)) { @@ -637,7 +890,18 @@ function checkConnectionConsistency(registry: Record, ctx: Proje } for (const [conn, files] of Object.entries(undefinedConns)) { - violations.push(makeViolation('PROJ-CONN-001', 'ConnectionsAndEnvironments', 'major', 5, `Connection "${conn}" used but not defined in project`, `Ensure connection "${conn}" is defined in your Provar project`, ['Project'], files)); + violations.push( + makeViolation( + 'PROJ-CONN-001', + 'ConnectionsAndEnvironments', + 'major', + 5, + `Connection "${conn}" used but not defined in project`, + `Ensure connection "${conn}" is defined in your Provar project`, + ['Project'], + files + ) + ); } return violations; } @@ -652,11 +916,31 @@ function checkEnvironments(ctx: ProjectContext): HierarchyViolation[] { for (const env of envs) { const lower = env.toLowerCase(); if (seen.has(lower)) { - violations.push(makeViolation('PROJ-ENV-001', 'ConnectionsAndEnvironments', 'major', 5, `Duplicate environment name: "${env}"`, 'Remove or rename duplicate environments', ['Project'])); + violations.push( + makeViolation( + 'PROJ-ENV-001', + 'ConnectionsAndEnvironments', + 'major', + 5, + `Duplicate environment name: "${env}"`, + 'Remove or rename duplicate environments', + ['Project'] + ) + ); } seen.add(lower); if (!envPattern.test(env)) { - violations.push(makeViolation('PROJ-ENV-002', 'ConnectionsAndEnvironments', 'major', 5, `Environment name "${env}" contains invalid characters`, 'Use only letters, digits, and underscores for environment names', ['Project'])); + violations.push( + makeViolation( + 'PROJ-ENV-002', + 'ConnectionsAndEnvironments', + 'major', + 5, + `Environment name "${env}" contains invalid characters`, + 'Use only letters, digits, and underscores for environment names', + ['Project'] + ) + ); } } return violations; @@ -676,14 +960,34 @@ export function validateProject(data: ProjectInput, qualityThreshold: number): P const violations: HierarchyViolation[] = []; if (!testPlans.length && !testSuites.length && !testCases.length) { - violations.push(makeViolation('PROJ-EMPTY-001', 'ProjectStructure', 'major', 5, `Project "${projectName}" contains no test plans, test suites, or test cases`, `Add at least one test plan, test suite, or test case to "${projectName}"`, ['Project'])); + violations.push( + makeViolation( + 'PROJ-EMPTY-001', + 'ProjectStructure', + 'major', + 5, + `Project "${projectName}" contains no test plans, test suites, or test cases`, + `Add at least one test plan, test suite, or test case to "${projectName}"`, + ['Project'] + ) + ); } const seenPlans = new Set(); for (const p of testPlans) { const lower = p.name.toLowerCase(); if (seenPlans.has(lower)) { - violations.push(makeViolation('PROJ-DUP-002', 'Maintainability', 'major', 5, `Duplicate plan name "${p.name}" in project "${projectName}"`, 'Ensure all test plan names within a project are unique', ['Project'])); + violations.push( + makeViolation( + 'PROJ-DUP-002', + 'Maintainability', + 'major', + 5, + `Duplicate plan name "${p.name}" in project "${projectName}"`, + 'Ensure all test plan names within a project are unique', + ['Project'] + ) + ); } seenPlans.add(lower); } @@ -696,19 +1000,31 @@ export function validateProject(data: ProjectInput, qualityThreshold: number): P violations.push(...checkEnvironments(ctx)); if (!ctx.secretsPasswordSet) { - violations.push(makeViolation('PROJ-SECRET-001', 'ConnectionsAndEnvironments', 'major', 5, 'Provar Secrets Password is not configured for this project', 'Set a Secrets Password in the Provar project settings to encrypt sensitive test data', ['Project'])); + violations.push( + makeViolation( + 'PROJ-SECRET-001', + 'ConnectionsAndEnvironments', + 'major', + 5, + 'Provar Secrets Password is not configured for this project', + 'Set a Secrets Password in the Provar project settings to encrypt sensitive test data', + ['Project'] + ) + ); } if (ctx.unencrypted_secret_count !== undefined && ctx.unencrypted_secret_count > 0) { - violations.push(makeViolation( - 'PROJ-ENC-001', - 'ConnectionsAndEnvironments', - 'critical', - 20, - `${ctx.unencrypted_secret_count} credential(s) in .secrets are stored as plaintext (missing ENC1() wrapper)`, - 'Re-configure the Provar Secrets Password and re-save all connections so credentials are re-encrypted', - ['Project'] - )); + violations.push( + makeViolation( + 'PROJ-ENC-001', + 'ConnectionsAndEnvironments', + 'critical', + 20, + `${ctx.unencrypted_secret_count} credential(s) in .secrets are stored as plaintext (missing ENC1() wrapper)`, + 'Re-configure the Provar Secrets Password and re-save all connections so credentials are re-encrypted', + ['Project'] + ) + ); } const scores = [ @@ -734,9 +1050,7 @@ export function validateProject(data: ProjectInput, qualityThreshold: number): P // ── Summary builder ─────────────────────────────────────────────────────────── -export function buildHierarchySummary( - result: SuiteResult | PlanResult | ProjectResult -): HierarchySummary { +export function buildHierarchySummary(result: SuiteResult | PlanResult | ProjectResult): HierarchySummary { const stats: HierarchySummary = { total_test_cases: 0, test_cases_valid: 0, @@ -755,7 +1069,8 @@ export function buildHierarchySummary( const nodeLevel = node.level; const nodeViolations = node.level === 'test_case' ? [] : node.violations; stats.total_violations += nodeViolations.length; - if (nodeLevel in stats.violations_by_level) stats.violations_by_level[nodeLevel as keyof typeof stats.violations_by_level] += nodeViolations.length; + if (nodeLevel in stats.violations_by_level) + stats.violations_by_level[nodeLevel as keyof typeof stats.violations_by_level] += nodeViolations.length; for (const v of nodeViolations) { if (v.severity in stats.violations_by_severity) stats.violations_by_severity[v.severity]++; } diff --git a/src/mcp/tools/nitroXTools.ts b/src/mcp/tools/nitroXTools.ts index fba1b888..7166ac9f 100644 --- a/src/mcp/tools/nitroXTools.ts +++ b/src/mcp/tools/nitroXTools.ts @@ -110,15 +110,19 @@ function validateRootProperties(obj: JsonObj, issues: NitroXIssue[]): void { // NX001: componentId must be present and a valid UUID if (obj['componentId'] === undefined || obj['componentId'] === null) { issues.push({ - rule_id: 'NX001', severity: 'ERROR', + rule_id: 'NX001', + severity: 'ERROR', message: 'componentId is required.', - applies_to: 'root', field: 'componentId', + applies_to: 'root', + field: 'componentId', }); } else if (typeof obj['componentId'] !== 'string' || !UUID_RE.test(obj['componentId'])) { issues.push({ - rule_id: 'NX001', severity: 'ERROR', + rule_id: 'NX001', + severity: 'ERROR', message: `componentId must be a valid UUID, got: "${String(obj['componentId'])}".`, - applies_to: 'root', field: 'componentId', + applies_to: 'root', + field: 'componentId', }); } @@ -128,9 +132,11 @@ function validateRootProperties(obj: JsonObj, issues: NitroXIssue[]): void { for (const field of ['name', 'type', 'pageStructureElement', 'fieldDetailsElement'] as const) { if (obj[field] === undefined || obj[field] === null) { issues.push({ - rule_id: 'NX002', severity: 'ERROR', + rule_id: 'NX002', + severity: 'ERROR', message: `Root component requires "${field}".`, - applies_to: 'root', field, + applies_to: 'root', + field, suggestion: `Add a "${field}" property to the root component object.`, }); } @@ -140,9 +146,11 @@ function validateRootProperties(obj: JsonObj, issues: NitroXIssue[]): void { // NX003: tagName must not contain whitespace if (typeof obj['tagName'] === 'string' && /\s/.test(obj['tagName'])) { issues.push({ - rule_id: 'NX003', severity: 'ERROR', + rule_id: 'NX003', + severity: 'ERROR', message: 'tagName should not contain spaces.', - applies_to: 'root', field: 'tagName', + applies_to: 'root', + field: 'tagName', suggestion: 'Remove whitespace from tagName.', }); } @@ -150,9 +158,11 @@ function validateRootProperties(obj: JsonObj, issues: NitroXIssue[]): void { // NX010: bodyTagName (if present) must not contain whitespace if (typeof obj['bodyTagName'] === 'string' && /\s/.test(obj['bodyTagName'])) { issues.push({ - rule_id: 'NX010', severity: 'INFO', + rule_id: 'NX010', + severity: 'INFO', message: 'bodyTagName should not contain spaces.', - applies_to: 'root', field: 'bodyTagName', + applies_to: 'root', + field: 'bodyTagName', suggestion: 'Remove whitespace from bodyTagName.', }); } @@ -204,7 +214,8 @@ function validateElement(el: JsonObj, issues: NitroXIssue[]): void { // NX007: Element should have type if (!el['type']) { issues.push({ - rule_id: 'NX007', severity: 'WARNING', + rule_id: 'NX007', + severity: 'WARNING', message: 'Element is missing required "type".', applies_to: 'element', suggestion: 'Add a "type" field to the element (e.g. "content" or "component::UUID").', @@ -238,17 +249,21 @@ function validateInteraction(interaction: JsonObj, context: string, issues: Nitr for (const field of ['defaultInteraction', 'interactionType', 'name', 'testStepTitlePattern', 'title'] as const) { if (interaction[field] === undefined || interaction[field] === null) { issues.push({ - rule_id: 'NX004', severity: 'ERROR', + rule_id: 'NX004', + severity: 'ERROR', message: `Interaction in ${context} missing required field "${field}".`, - applies_to: 'interaction', field, + applies_to: 'interaction', + field, }); } } if (!Array.isArray(interaction['implementations']) || interaction['implementations'].length === 0) { issues.push({ - rule_id: 'NX004', severity: 'ERROR', + rule_id: 'NX004', + severity: 'ERROR', message: `Interaction in ${context} must have at least one implementation.`, - applies_to: 'interaction', field: 'implementations', + applies_to: 'interaction', + field: 'implementations', }); } else { for (const impl of interaction['implementations']) { @@ -259,9 +274,11 @@ function validateInteraction(interaction: JsonObj, context: string, issues: Nitr // NX009: name should match ^[A-Za-z0-9\s]*$ if (typeof interaction['name'] === 'string' && !INTERACTION_NAME_RE.test(interaction['name'])) { issues.push({ - rule_id: 'NX009', severity: 'INFO', + rule_id: 'NX009', + severity: 'INFO', message: `Interaction name "${interaction['name']}" should contain only alphanumeric characters and spaces.`, - applies_to: 'interaction', field: 'name', + applies_to: 'interaction', + field: 'name', suggestion: 'Remove special characters from the interaction name.', }); } @@ -271,9 +288,11 @@ function validateImplementation(impl: JsonObj, context: string, issues: NitroXIs // NX005: must have javaScriptSnippet if (!impl['javaScriptSnippet']) { issues.push({ - rule_id: 'NX005', severity: 'ERROR', + rule_id: 'NX005', + severity: 'ERROR', message: `Implementation in ${context} missing required "javaScriptSnippet".`, - applies_to: 'implementation', field: 'javaScriptSnippet', + applies_to: 'implementation', + field: 'javaScriptSnippet', }); } } @@ -282,9 +301,11 @@ function validateSelector(sel: JsonObj, issues: NitroXIssue[]): void { // NX006: must have xpath if (!sel['xpath']) { issues.push({ - rule_id: 'NX006', severity: 'ERROR', + rule_id: 'NX006', + severity: 'ERROR', message: 'Selector missing required "xpath".', - applies_to: 'selector', field: 'xpath', + applies_to: 'selector', + field: 'xpath', suggestion: 'Add an "xpath" property to the selector.', }); } @@ -294,9 +315,13 @@ function validateParameter(param: JsonObj, context: string, issues: NitroXIssue[ // NX008: comparisonType must be one of valid enum values if (param['comparisonType'] !== undefined && !VALID_COMPARISON_TYPES.includes(String(param['comparisonType']))) { issues.push({ - rule_id: 'NX008', severity: 'WARNING', - message: `Parameter in ${context} has invalid comparisonType "${String(param['comparisonType'])}". Must be one of: ${VALID_COMPARISON_TYPES.join(', ')}.`, - applies_to: 'parameter', field: 'comparisonType', + rule_id: 'NX008', + severity: 'WARNING', + message: `Parameter in ${context} has invalid comparisonType "${String( + param['comparisonType'] + )}". Must be one of: ${VALID_COMPARISON_TYPES.join(', ')}.`, + applies_to: 'parameter', + field: 'comparisonType', suggestion: `Use one of: ${VALID_COMPARISON_TYPES.join(', ')}`, }); } @@ -402,34 +427,25 @@ function applyMergePatch(target: JsonObj, patch: JsonObj): JsonObj { export function registerNitroXDiscover(server: McpServer): void { server.tool( - 'provar.nitrox.discover', + 'provar_nitrox_discover', [ 'Discover Provar projects containing NitroX (Hybrid Model) page objects.', 'Scans directories for .testproject marker files, then inventories nitroX/ and nitroXPackages/ directories.', - 'NitroX is Provar\'s Hybrid Model for locators — component-based page objects for LWC,', + "NitroX is Provar's Hybrid Model for locators — component-based page objects for LWC,", 'Screen Flow, Industry Components, Experience Cloud, and HTML5 components.', - 'Results provide file paths and package info for use with provar.nitrox.read, validate, and generate.', + 'Results provide file paths and package info for use with provar_nitrox_read, validate, and generate.', ].join(' '), { search_roots: z .array(z.string()) .optional() .describe('Directories to scan (default: cwd; if empty, falls back to ~/git and ~/Provar)'), - max_depth: z - .number() - .int() - .min(1) - .max(20) - .default(6) - .describe('Maximum directory depth for .testproject search'), - include_packages: z - .boolean() - .default(true) - .describe('Include nitroXPackages/ package.json metadata in results'), + max_depth: z.number().int().min(1).max(20).default(6).describe('Maximum directory depth for .testproject search'), + include_packages: z.boolean().default(true).describe('Include nitroXPackages/ package.json metadata in results'), }, ({ search_roots, max_depth, include_packages }) => { const requestId = makeRequestId(); - log('info', 'provar.nitrox.discover', { requestId, search_roots, max_depth }); + log('info', 'provar_nitrox_discover', { requestId, search_roots, max_depth }); try { let roots = search_roots && search_roots.length > 0 ? search_roots : [process.cwd()]; @@ -437,10 +453,7 @@ export function registerNitroXDiscover(server: McpServer): void { // If no .testproject found in cwd, widen to home-dir defaults if (projects.length === 0 && (!search_roots || search_roots.length === 0)) { - const fallbackRoots = [ - path.join(os.homedir(), 'git'), - path.join(os.homedir(), 'Provar'), - ]; + const fallbackRoots = [path.join(os.homedir(), 'git'), path.join(os.homedir(), 'Provar')]; const fallbackProjects = findProvarProjects(fallbackRoots, max_depth); if (fallbackProjects.length > 0) { projects = fallbackProjects; @@ -495,7 +508,7 @@ export function registerNitroXDiscover(server: McpServer): void { } catch (err: unknown) { const error = err as Error; const errResult = makeError('DISCOVER_ERROR', error.message, requestId, false); - log('error', 'provar.nitrox.discover failed', { requestId, error: error.message }); + log('error', 'provar_nitrox_discover failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -504,11 +517,11 @@ export function registerNitroXDiscover(server: McpServer): void { export function registerNitroXRead(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.nitrox.read', + 'provar_nitrox_read', [ 'Read one or more NitroX .po.json (Hybrid Model page object) files and return their parsed content.', 'Use this to load examples before generating or validating.', - 'Provide file_paths for specific files, or project_path to read all .po.json files from a project\'s nitroX/ directory.', + "Provide file_paths for specific files, or project_path to read all .po.json files from a project's nitroX/ directory.", ].join(' '), { file_paths: z.array(z.string()).optional().describe('Specific .po.json file paths to read'), @@ -526,7 +539,7 @@ export function registerNitroXRead(server: McpServer, config: ServerConfig): voi }, ({ file_paths, project_path, max_files }) => { const requestId = makeRequestId(); - log('info', 'provar.nitrox.read', { + log('info', 'provar_nitrox_read', { requestId, file_count: file_paths?.length, project_path, @@ -551,11 +564,7 @@ export function registerNitroXRead(server: McpServer, config: ServerConfig): voi } const nitroxDir = path.join(resolved, 'nitroX'); if (!fs.existsSync(nitroxDir)) { - const err = makeError( - 'FILE_NOT_FOUND', - `No nitroX/ directory found in: ${resolved}`, - requestId - ); + const err = makeError('FILE_NOT_FOUND', `No nitroX/ directory found in: ${resolved}`, requestId); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(err) }] }; } targets = collectPoJsonFiles(nitroxDir); @@ -597,7 +606,7 @@ export function registerNitroXRead(server: McpServer, config: ServerConfig): voi requestId, false ); - log('error', 'provar.nitrox.read failed', { requestId, error: error.message }); + log('error', 'provar_nitrox_read failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -606,7 +615,7 @@ export function registerNitroXRead(server: McpServer, config: ServerConfig): voi export function registerNitroXValidate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.nitrox.validate', + 'provar_nitrox_validate', [ 'Validate a NitroX .po.json (Hybrid Model component page object) against schema rules.', 'Works for any NitroX-mapped component type: LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', @@ -619,7 +628,7 @@ export function registerNitroXValidate(server: McpServer, config: ServerConfig): }, ({ content, file_path }) => { const requestId = makeRequestId(); - log('info', 'provar.nitrox.validate', { requestId, has_content: !!content, file_path }); + log('info', 'provar_nitrox_validate', { requestId, has_content: !!content, file_path }); try { let source = content; @@ -666,7 +675,7 @@ export function registerNitroXValidate(server: McpServer, config: ServerConfig): requestId, false ); - log('error', 'provar.nitrox.validate failed', { requestId, error: error.message }); + log('error', 'provar_nitrox_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -682,9 +691,7 @@ const ParameterInputSchema = z.object({ const ElementInputSchema = z.object({ label: z.string().describe('Human-readable element label'), - type_ref: z - .string() - .describe('Component type reference (e.g. "component::UUID" or "content")'), + type_ref: z.string().describe('Component type reference (e.g. "component::UUID" or "content")'), tag_name: z.string().optional().describe('Optional HTML/LWC tag name override'), parameters: z.array(ParameterInputSchema).optional(), selector_xpath: z.string().optional().describe('XPath selector for this element'), @@ -692,45 +699,29 @@ const ElementInputSchema = z.object({ export function registerNitroXGenerate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.nitrox.generate', + 'provar_nitrox_generate', [ 'Generate a new NitroX .po.json (Hybrid Model page object) from a component description.', - 'Applicable to any component type supported by Provar\'s Hybrid Model:', + "Applicable to any component type supported by Provar's Hybrid Model:", 'LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', 'All componentId fields are assigned fresh UUIDs. Returns JSON content;', 'writes to disk only when dry_run=false.', ].join(' '), { - name: z - .string() - .describe('Path-like component name, e.g. /com/force/myapp/ButtonComponent'), - tag_name: z - .string() - .describe('LWC or HTML tag name, e.g. lightning-button or c-my-component'), + name: z.string().describe('Path-like component name, e.g. /com/force/myapp/ButtonComponent'), + tag_name: z.string().describe('LWC or HTML tag name, e.g. lightning-button or c-my-component'), type: z.enum(['Block', 'Page']).default('Block').describe('Component type'), - page_structure_element: z - .boolean() - .default(true) - .describe('Whether this is a page structure element'), - field_details_element: z - .boolean() - .default(false) - .describe('Whether this is a field details element'), + page_structure_element: z.boolean().default(true).describe('Whether this is a page structure element'), + field_details_element: z.boolean().default(false).describe('Whether this is a field details element'), parameters: z.array(ParameterInputSchema).optional().describe('Component parameters/qualifiers'), elements: z.array(ElementInputSchema).optional().describe('Child elements'), - output_path: z - .string() - .optional() - .describe('File path to write (requires dry_run=false)'), + output_path: z.string().optional().describe('File path to write (requires dry_run=false)'), overwrite: z.boolean().default(false).describe('Overwrite if output_path already exists'), - dry_run: z - .boolean() - .default(true) - .describe('Return JSON without writing to disk (default)'), + dry_run: z.boolean().default(true).describe('Return JSON without writing to disk (default)'), }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.nitrox.generate', { requestId, name: input.name, dry_run: input.dry_run }); + log('info', 'provar_nitrox_generate', { requestId, name: input.name, dry_run: input.dry_run }); try { const generated = buildNitroXJson({ @@ -760,7 +751,7 @@ export function registerNitroXGenerate(server: McpServer, config: ServerConfig): fs.mkdirSync(path.dirname(filePath), { recursive: true }); fs.writeFileSync(filePath, content, 'utf-8'); written = true; - log('info', 'provar.nitrox.generate: wrote file', { requestId, filePath }); + log('info', 'provar_nitrox_generate: wrote file', { requestId, filePath }); } const result = { requestId, content, file_path: filePath, written, dry_run: input.dry_run }; @@ -771,12 +762,12 @@ export function registerNitroXGenerate(server: McpServer, config: ServerConfig): } catch (err: unknown) { const error = err as Error & { code?: string }; const errResult = makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'GENERATE_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'GENERATE_ERROR', error.message, requestId, false ); - log('error', 'provar.nitrox.generate failed', { requestId, error: error.message }); + log('error', 'provar_nitrox_generate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -785,7 +776,7 @@ export function registerNitroXGenerate(server: McpServer, config: ServerConfig): export function registerNitroXPatch(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.nitrox.patch', + 'provar_nitrox_patch', [ 'Apply a JSON merge-patch (RFC 7396) to an existing NitroX .po.json file.', 'Reads the file, merges the patch (null values remove keys, other values replace or recurse into objects),', @@ -796,13 +787,8 @@ export function registerNitroXPatch(server: McpServer, config: ServerConfig): vo file_path: z.string().describe('Path to the existing .po.json file to patch'), patch: z .record(z.unknown()) - .describe( - 'JSON merge-patch to apply (RFC 7396: null removes key, any other value replaces)' - ), - dry_run: z - .boolean() - .default(true) - .describe('Return merged result without writing to disk (default)'), + .describe('JSON merge-patch to apply (RFC 7396: null removes key, any other value replaces)'), + dry_run: z.boolean().default(true).describe('Return merged result without writing to disk (default)'), validate_after: z .boolean() .default(true) @@ -810,7 +796,7 @@ export function registerNitroXPatch(server: McpServer, config: ServerConfig): vo }, ({ file_path, patch, dry_run, validate_after }) => { const requestId = makeRequestId(); - log('info', 'provar.nitrox.patch', { requestId, file_path, dry_run }); + log('info', 'provar_nitrox_patch', { requestId, file_path, dry_run }); try { assertPathAllowed(file_path, config.allowedPaths); @@ -857,7 +843,7 @@ export function registerNitroXPatch(server: McpServer, config: ServerConfig): vo if (!dry_run) { fs.writeFileSync(resolved, content, 'utf-8'); written = true; - log('info', 'provar.nitrox.patch: wrote file', { requestId, filePath: resolved }); + log('info', 'provar_nitrox_patch: wrote file', { requestId, filePath: resolved }); } const result = { @@ -875,12 +861,12 @@ export function registerNitroXPatch(server: McpServer, config: ServerConfig): vo } catch (err: unknown) { const error = err as Error & { code?: string }; const errResult = makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'PATCH_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'PATCH_ERROR', error.message, requestId, false ); - log('error', 'provar.nitrox.patch failed', { requestId, error: error.message }); + log('error', 'provar_nitrox_patch failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/pageObjectGenerate.ts b/src/mcp/tools/pageObjectGenerate.ts index c1c13667..4d877374 100644 --- a/src/mcp/tools/pageObjectGenerate.ts +++ b/src/mcp/tools/pageObjectGenerate.ts @@ -92,17 +92,17 @@ function preflightAndWrite( } fs.mkdirSync(path.dirname(filePath), { recursive: true }); fs.writeFileSync(filePath, javaSource, 'utf-8'); - log('info', 'provar.pageobject.generate: wrote file', { requestId, filePath }); + log('info', 'provar_pageobject_generate: wrote file', { requestId, filePath }); if (ssoSource && ssoFilePath) { fs.writeFileSync(ssoFilePath, ssoSource, 'utf-8'); - log('info', 'provar.pageobject.generate: wrote SSO stub', { requestId, ssoFilePath }); + log('info', 'provar_pageobject_generate: wrote SSO stub', { requestId, ssoFilePath }); } return null; } export function registerPageObjectGenerate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.pageobject.generate', + 'provar_pageobject_generate', [ 'Generate a Provar Java Page Object skeleton with @Page/@SalesforcePage annotation, standard imports, and @FindBy WebElement fields.', 'Returns Java source. Writes to disk only when dry_run=false.', @@ -145,7 +145,7 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.pageobject.generate', { + log('info', 'provar_pageobject_generate', { requestId, class_name: input.class_name, dry_run: input.dry_run, @@ -202,7 +202,7 @@ export function registerPageObjectGenerate(server: McpServer, config: ServerConf requestId, false ); - log('error', 'provar.pageobject.generate failed', { requestId, error: error.message }); + log('error', 'provar_pageobject_generate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/pageObjectValidate.ts b/src/mcp/tools/pageObjectValidate.ts index 56514765..e98c5831 100644 --- a/src/mcp/tools/pageObjectValidate.ts +++ b/src/mcp/tools/pageObjectValidate.ts @@ -18,7 +18,7 @@ import { log } from '../logging/logger.js'; export function registerPageObjectValidate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.pageobject.validate', + 'provar_pageobject_validate', 'Validate a Provar Java Page Object against naming conventions, locator best practices, and structural requirements. Returns quality score (0–100) and list of issues.', { content: z.string().optional().describe('Java source code to validate directly'), @@ -30,7 +30,7 @@ export function registerPageObjectValidate(server: McpServer, config: ServerConf }, ({ content, file_path, expected_class_name }) => { const requestId = makeRequestId(); - log('info', 'provar.pageobject.validate', { requestId, has_content: !!content, file_path }); + log('info', 'provar_pageobject_validate', { requestId, has_content: !!content, file_path }); try { let source = content; @@ -68,7 +68,7 @@ export function registerPageObjectValidate(server: McpServer, config: ServerConf requestId, false ); - log('error', 'provar.pageobject.validate failed', { requestId, error: error.message }); + log('error', 'provar_pageobject_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -78,22 +78,81 @@ export function registerPageObjectValidate(server: McpServer, config: ServerConf // ── Validator (ported from quality-hub-agents/lambda/src/validator/page_object_validator.py) ── const VALID_LOCATOR_STRATEGIES = new Set([ - 'xpath', 'id', 'css', 'name', 'className', 'tagName', - 'linkText', 'partialLinkText', 'visualforce', 'label', + 'xpath', + 'id', + 'css', + 'name', + 'className', + 'tagName', + 'linkText', + 'partialLinkText', + 'visualforce', + 'label', ]); const VALID_ELEMENT_TYPES = new Set([ - 'TextType', 'ButtonType', 'LinkType', 'ChoiceListType', - 'RadioType', 'FileType', 'DateType', 'RichTextType', 'BooleanType', + 'TextType', + 'ButtonType', + 'LinkType', + 'ChoiceListType', + 'RadioType', + 'FileType', + 'DateType', + 'RichTextType', + 'BooleanType', ]); const PASCAL_CASE_RE = /^[A-Z][A-Za-z0-9]*$/; const VALID_JAVA_IDENT_RE = /^[A-Za-z_$][A-Za-z0-9_$]*$/; const JAVA_RESERVED = new Set([ - 'abstract', 'assert', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class', - 'const', 'continue', 'default', 'do', 'double', 'else', 'enum', 'extends', 'final', - 'finally', 'float', 'for', 'goto', 'if', 'implements', 'import', 'instanceof', 'int', - 'interface', 'long', 'native', 'new', 'package', 'private', 'protected', 'public', - 'return', 'short', 'static', 'strictfp', 'super', 'switch', 'synchronized', 'this', - 'throw', 'throws', 'transient', 'try', 'void', 'volatile', 'while', + 'abstract', + 'assert', + 'boolean', + 'break', + 'byte', + 'case', + 'catch', + 'char', + 'class', + 'const', + 'continue', + 'default', + 'do', + 'double', + 'else', + 'enum', + 'extends', + 'final', + 'finally', + 'float', + 'for', + 'goto', + 'if', + 'implements', + 'import', + 'instanceof', + 'int', + 'interface', + 'long', + 'native', + 'new', + 'package', + 'private', + 'protected', + 'public', + 'return', + 'short', + 'static', + 'strictfp', + 'super', + 'switch', + 'synchronized', + 'this', + 'throw', + 'throws', + 'transient', + 'try', + 'void', + 'volatile', + 'while', ]); const SF_DYNAMIC_ATTRS_RE = /data-aura-rendered-by|data-aura-class|aura-id|data-component-id/i; const INDEXED_XPATH_RE = /\[\d+\]/; @@ -114,10 +173,7 @@ let poRulePenaltiesCache: Record | null = null; function getRulePenalties(): Record { if (!poRulePenaltiesCache) { try { - const raw = fs.readFileSync( - join(poRulesDirPath, '..', 'rules', 'page_object_validation_rules.json'), - 'utf-8' - ); + const raw = fs.readFileSync(join(poRulesDirPath, '..', 'rules', 'page_object_validation_rules.json'), 'utf-8'); const rules = JSON.parse(raw) as PoRule[]; poRulePenaltiesCache = Object.fromEntries(rules.map((r) => [r.id, r.penalty])); } catch { @@ -141,10 +197,7 @@ export interface PageObjectValidationResult { } /** Pure function — exported for unit testing */ -export function validatePageObject( - source: string, - expectedClassName?: string -): PageObjectValidationResult { +export function validatePageObject(source: string, expectedClassName?: string): PageObjectValidationResult { const issues: ValidationIssue[] = []; const stripped = stripComments(source); @@ -153,14 +206,16 @@ export function validatePageObject( const packageName = packageMatch ? packageMatch[1] : null; if (!packageMatch) { issues.push({ - rule_id: 'PO_001', severity: 'ERROR', + rule_id: 'PO_001', + severity: 'ERROR', message: 'Missing package declaration.', applies_to: 'class', suggestion: "Add 'package pageobjects;' at the top of the file.", }); } else if (!/^[a-z][a-z0-9_]*(\.[a-z][a-z0-9_]*)*$/.test(packageName!)) { issues.push({ - rule_id: 'PO_002', severity: 'ERROR', + rule_id: 'PO_002', + severity: 'ERROR', message: `Invalid package name: "${packageName}".`, applies_to: 'class', suggestion: 'Package names should be valid lower-case Java identifiers separated by dots.', @@ -175,7 +230,8 @@ export function validatePageObject( const closeBraces = (stripped.match(/\}/g) ?? []).length; if (openBraces !== closeBraces) { issues.push({ - rule_id: 'PO_060', severity: 'ERROR', + rule_id: 'PO_060', + severity: 'ERROR', message: `Mismatched braces: ${openBraces} opening vs ${closeBraces} closing.`, applies_to: 'class', suggestion: 'Check for missing or extra curly braces.', @@ -185,7 +241,8 @@ export function validatePageObject( // ── Imports ────────────────────────────────────────────────────────────────── if (!stripped.includes('import com.provar.core.testapi.annotations')) { issues.push({ - rule_id: 'PO_012', severity: 'WARNING', + rule_id: 'PO_012', + severity: 'WARNING', message: 'Missing import for Provar annotations.', applies_to: 'class', suggestion: "Add 'import com.provar.core.testapi.annotations.*;'", @@ -201,7 +258,8 @@ export function validatePageObject( const fields = extractFields(source); if (fields.length === 0 && !issues.some((i) => i.rule_id === 'PO_003')) { issues.push({ - rule_id: 'PO_030', severity: 'WARNING', + rule_id: 'PO_030', + severity: 'WARNING', message: 'No WebElement or WebComponent fields found.', applies_to: 'class', suggestion: 'Add at least one WebElement field with @FindBy locator and type annotation.', @@ -215,7 +273,8 @@ export function validatePageObject( // ── Commented code ─────────────────────────────────────────────────────────── if (/\/\/\s*(public|private|protected|@FindBy|WebElement|@\w+Type)/.test(source)) { issues.push({ - rule_id: 'PO_080', severity: 'INFO', + rule_id: 'PO_080', + severity: 'INFO', message: 'Commented-out code detected.', applies_to: 'class', suggestion: 'Remove commented code. Use version control to track history.', @@ -252,7 +311,8 @@ function checkClassDeclaration( const className = classMatch ? classMatch[1] : null; if (!classMatch) { issues.push({ - rule_id: 'PO_003', severity: 'ERROR', + rule_id: 'PO_003', + severity: 'ERROR', message: 'Missing public class declaration.', applies_to: 'class', suggestion: "Ensure the file has a 'public class ClassName' declaration.", @@ -260,7 +320,8 @@ function checkClassDeclaration( } else { if (!PASCAL_CASE_RE.test(className!)) { issues.push({ - rule_id: 'PO_004', severity: 'WARNING', + rule_id: 'PO_004', + severity: 'WARNING', message: `Class name "${className}" is not PascalCase.`, applies_to: 'class', suggestion: "Rename to PascalCase (e.g., 'MyPageObject').", @@ -268,7 +329,8 @@ function checkClassDeclaration( } if (!VALID_JAVA_IDENT_RE.test(className!)) { issues.push({ - rule_id: 'PO_005', severity: 'ERROR', + rule_id: 'PO_005', + severity: 'ERROR', message: `Class name "${className}" is not a valid Java identifier.`, applies_to: 'class', suggestion: 'Class names must start with a letter, $, or _ and contain only letters, digits, $, or _.', @@ -276,7 +338,8 @@ function checkClassDeclaration( } if (expectedClassName && className !== expectedClassName) { issues.push({ - rule_id: 'PO_006', severity: 'ERROR', + rule_id: 'PO_006', + severity: 'ERROR', message: `Class name "${className}" does not match expected "${expectedClassName}".`, applies_to: 'class', suggestion: 'Class name must match the filename.', @@ -291,7 +354,8 @@ function checkAnnotations(stripped: string, issues: ValidationIssue[]): boolean const hasSalesforceAnnotation = /@SalesforcePage\s*\(/.test(stripped); if (!hasPageAnnotation && !hasSalesforceAnnotation) { issues.push({ - rule_id: 'PO_020', severity: 'WARNING', + rule_id: 'PO_020', + severity: 'WARNING', message: 'Missing @Page or @SalesforcePage annotation.', applies_to: 'annotation', suggestion: 'Add @Page annotation before the class declaration.', @@ -300,7 +364,8 @@ function checkAnnotations(stripped: string, issues: ValidationIssue[]): boolean const m = /@Page\s*\(([^)]*)\)/.exec(stripped); if (m && !m[1].includes('title')) { issues.push({ - rule_id: 'PO_021', severity: 'WARNING', + rule_id: 'PO_021', + severity: 'WARNING', message: '@Page annotation missing title attribute.', applies_to: 'annotation', suggestion: 'Add title attribute to @Page annotation.', @@ -311,7 +376,8 @@ function checkAnnotations(stripped: string, issues: ValidationIssue[]): boolean if (m) { if (!m[1].includes('title') || !m[1].includes('connection')) { issues.push({ - rule_id: 'PO_022', severity: 'ERROR', + rule_id: 'PO_022', + severity: 'ERROR', message: '@SalesforcePage missing required title or connection attribute.', applies_to: 'annotation', suggestion: 'Add required attributes to @SalesforcePage annotation.', @@ -320,7 +386,8 @@ function checkAnnotations(stripped: string, issues: ValidationIssue[]): boolean const pageTypes = ['page', 'auraComponent', 'object', 'lightningWebComponent']; if (!pageTypes.some((t) => m[1].includes(t))) { issues.push({ - rule_id: 'PO_023', severity: 'WARNING', + rule_id: 'PO_023', + severity: 'WARNING', message: '@SalesforcePage should specify page type attribute.', applies_to: 'annotation', suggestion: 'Add one of: page, auraComponent, object, or lightningWebComponent.', @@ -331,16 +398,13 @@ function checkAnnotations(stripped: string, issues: ValidationIssue[]): boolean return hasSalesforceAnnotation; } -function validateFields( - fields: FieldInfo[], - hasSalesforceAnnotation: boolean, - issues: ValidationIssue[] -): void { +function validateFields(fields: FieldInfo[], hasSalesforceAnnotation: boolean, issues: ValidationIssue[]): void { const fieldNames = new Set(); for (const field of fields) { if (fieldNames.has(field.name)) { issues.push({ - rule_id: 'PO_031', severity: 'ERROR', + rule_id: 'PO_031', + severity: 'ERROR', message: `Duplicate field name: "${field.name}".`, applies_to: 'field', suggestion: 'Rename one of the duplicate fields.', @@ -350,7 +414,8 @@ function validateFields( } if (!VALID_JAVA_IDENT_RE.test(field.name)) { issues.push({ - rule_id: 'PO_032', severity: 'ERROR', + rule_id: 'PO_032', + severity: 'ERROR', message: `Invalid field name: "${field.name}".`, applies_to: 'field', suggestion: 'Field names must be valid Java identifiers.', @@ -358,7 +423,8 @@ function validateFields( } if (JAVA_RESERVED.has(field.name)) { issues.push({ - rule_id: 'PO_033', severity: 'ERROR', + rule_id: 'PO_033', + severity: 'ERROR', message: `Field name "${field.name}" is a Java reserved word.`, applies_to: 'field', suggestion: 'Rename field to avoid Java reserved words.', @@ -366,7 +432,8 @@ function validateFields( } if (field.locatorStrategy && !VALID_LOCATOR_STRATEGIES.has(field.locatorStrategy)) { issues.push({ - rule_id: 'PO_034', severity: 'ERROR', + rule_id: 'PO_034', + severity: 'ERROR', message: `Invalid locator strategy: "${field.locatorStrategy}".`, applies_to: 'field', suggestion: @@ -375,7 +442,8 @@ function validateFields( } if (field.elementType && !VALID_ELEMENT_TYPES.has(field.elementType)) { issues.push({ - rule_id: 'PO_036', severity: 'WARNING', + rule_id: 'PO_036', + severity: 'WARNING', message: `Invalid element type: "@${field.elementType}". (CheckboxType is not valid — use BooleanType.)`, applies_to: 'field', suggestion: @@ -386,17 +454,14 @@ function validateFields( } } -function checkLocatorQuality( - field: FieldInfo, - hasSalesforceAnnotation: boolean, - issues: ValidationIssue[] -): void { +function checkLocatorQuality(field: FieldInfo, hasSalesforceAnnotation: boolean, issues: ValidationIssue[]): void { const lv = field.locatorValue ?? ''; const strat = field.locatorStrategy ?? ''; if (!lv) return; if ((strat === 'xpath' || strat === '') && /^\/html|^\/body/i.test(lv)) { issues.push({ - rule_id: 'PO_071', severity: 'ERROR', + rule_id: 'PO_071', + severity: 'ERROR', message: `Absolute XPath for field "${field.name}".`, applies_to: 'field', suggestion: 'Use relative XPath starting with // or .//', @@ -404,7 +469,8 @@ function checkLocatorQuality( } if (SF_DYNAMIC_ATTRS_RE.test(lv)) { issues.push({ - rule_id: 'PO_073', severity: 'ERROR', + rule_id: 'PO_073', + severity: 'ERROR', message: `Salesforce dynamic attribute in locator for "${field.name}".`, applies_to: 'field', suggestion: 'Use stable identifiers like labels, data-testid, or semantic selectors.', @@ -412,7 +478,8 @@ function checkLocatorQuality( } if (strat === 'id' && hasSalesforceAnnotation) { issues.push({ - rule_id: 'PO_070', severity: 'WARNING', + rule_id: 'PO_070', + severity: 'WARNING', message: `ID-based locator on Salesforce page for "${field.name}". IDs may be dynamic.`, applies_to: 'field', suggestion: 'Prefer xpath/css with stable attributes like data-testid, aria-label, name.', @@ -420,7 +487,8 @@ function checkLocatorQuality( } if (INDEXED_XPATH_RE.test(lv)) { issues.push({ - rule_id: 'PO_072', severity: 'WARNING', + rule_id: 'PO_072', + severity: 'WARNING', message: `Indexed XPath [n] for field "${field.name}".`, applies_to: 'field', suggestion: 'Prefer attribute-based selection over positional indexes.', @@ -430,7 +498,8 @@ function checkLocatorQuality( const segments = (lv.match(/\/{2}/g) ?? []).length; if (segments > 4) { issues.push({ - rule_id: 'PO_076', severity: 'WARNING', + rule_id: 'PO_076', + severity: 'WARNING', message: `Complex XPath (${segments} descent operators) for "${field.name}".`, applies_to: 'field', suggestion: 'Simplify with a more direct path or data-testid attributes.', @@ -439,7 +508,8 @@ function checkLocatorQuality( } if (POSITION_FN_RE.test(lv)) { issues.push({ - rule_id: 'PO_079', severity: 'WARNING', + rule_id: 'PO_079', + severity: 'WARNING', message: `Position function (last/first/position) in XPath for "${field.name}".`, applies_to: 'field', suggestion: 'Use unique identifiers instead of position-based selection.', @@ -447,7 +517,8 @@ function checkLocatorQuality( } if (strat === 'css' && /[a-z0-9]+-[a-z0-9]+-[a-z0-9]+/.test(lv)) { issues.push({ - rule_id: 'PO_075', severity: 'INFO', + rule_id: 'PO_075', + severity: 'INFO', message: `Possibly autogenerated CSS class pattern for "${field.name}".`, applies_to: 'field', suggestion: 'Prefer stable attributes over autogenerated CSS classes.', @@ -455,7 +526,8 @@ function checkLocatorQuality( } if (lv.length > 200) { issues.push({ - rule_id: 'PO_078', severity: 'INFO', + rule_id: 'PO_078', + severity: 'INFO', message: `Very long locator (${lv.length} chars) for "${field.name}".`, applies_to: 'field', suggestion: 'Consider a shorter, more maintainable locator.', @@ -467,8 +539,8 @@ function checkLocatorQuality( function stripComments(source: string): string { return source - .replace(/\/\*[\s\S]*?\*\//g, '') // block comments - .replace(/\/\/[^\n]*/g, ''); // line comments + .replace(/\/\*[\s\S]*?\*\//g, '') // block comments + .replace(/\/\/[^\n]*/g, ''); // line comments } interface FieldInfo { diff --git a/src/mcp/tools/projectInspect.ts b/src/mcp/tools/projectInspect.ts index ff01c815..40ebff79 100644 --- a/src/mcp/tools/projectInspect.ts +++ b/src/mcp/tools/projectInspect.ts @@ -17,7 +17,7 @@ import { log } from '../logging/logger.js'; export function registerProjectInspect(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.project.inspect', + 'provar_project_inspect', [ 'Inspect a Provar project folder and return a structured inventory.', 'Returns: provardx-properties.json config files (for ProvarDX CLI runs),', @@ -31,13 +31,11 @@ export function registerProjectInspect(server: McpServer, config: ServerConfig): '(Salesforce, UI Testing, Web Services, Quality Hub, Database, and other connection types).', ].join(' '), { - project_path: z - .string() - .describe('Absolute or relative path to the Provar project root directory'), + project_path: z.string().describe('Absolute or relative path to the Provar project root directory'), }, ({ project_path }) => { const requestId = makeRequestId(); - log('info', 'provar.project.inspect', { requestId, project_path }); + log('info', 'provar_project_inspect', { requestId, project_path }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -56,12 +54,12 @@ export function registerProjectInspect(server: McpServer, config: ServerConfig): } catch (err: unknown) { const error = err as Error & { code?: string }; const errResult = makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'INSPECT_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'INSPECT_ERROR', error.message, requestId, false ); - log('error', 'provar.project.inspect failed', { requestId, error: error.message }); + log('error', 'provar_project_inspect failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -71,19 +69,19 @@ export function registerProjectInspect(server: McpServer, config: ServerConfig): // ─── types ──────────────────────────────────────────────────────────────────── interface PageObjectDir { - path: string; // relative path from project root + path: string; // relative path from project root java_file_count: number; } interface SecretsValidation { - secrets_file: string; // relative path (from secureStoragePath in .testproject) - found: boolean; // whether the file exists + secrets_file: string; // relative path (from secureStoragePath in .testproject) + found: boolean; // whether the file exists encryptor_check_present: boolean; // Provar Secrets Password has been configured - all_values_encrypted: boolean; // every non-comment entry has an ENC1() value - total_entries: number; // total key=value entries (excluding comments) - encrypted_count: number; // entries with ENC1() values - unencrypted_key_count: number; // entries WITHOUT ENC1() — pass to PROJ-ENC-001 - unencrypted_keys: string[]; // key names only, never values — e.g. ["uuid.password"] + all_values_encrypted: boolean; // every non-comment entry has an ENC1() value + total_entries: number; // total key=value entries (excluding comments) + encrypted_count: number; // entries with ENC1() values + unencrypted_key_count: number; // entries WITHOUT ENC1() — pass to PROJ-ENC-001 + unencrypted_keys: string[]; // key names only, never values — e.g. ["uuid.password"] } interface SfConnection { @@ -143,8 +141,8 @@ function buildProjectInventory(projectPath: string, requestId: string): Record(); // uncapped — used for coverage + const testCaseFilesDisplay: string[] = []; // capped at 500 for API display + const allTestCasePaths = new Set(); // uncapped — used for coverage let customTestStepFileCount = 0; let dataSourceFileCount = 0; const dataSourceDirs: string[] = []; @@ -315,7 +313,9 @@ function validateSecretsFile(projectPath: string, testProject: TestProjectInfo): const raw = fs.readFileSync(path.join(projectPath, '.testproject'), 'utf-8'); const match = raw.match(/([^<]+)<\/secureStoragePath>/); if (match?.[1]) secretsRelPath = match[1].trim(); - } catch { /* use default */ } + } catch { + /* use default */ + } } const notFound: SecretsValidation = { @@ -425,12 +425,12 @@ function parseConnectionClasses(content: string): ConnectionOverview { } // Build summary totals - const sfCommunities = result.salesforce.filter((c) => c.sub_type === 'communities').length; - const sfPortal = result.salesforce.filter((c) => c.sub_type === 'portal').length; - const sfLoginAs = result.salesforce.filter((c) => c.auth_method === 'login-as').length; - const sfOAuth = result.salesforce.filter((c) => c.auth_method === 'oauth').length; - const sfBasic = result.salesforce.filter((c) => c.auth_method === 'basic').length; - const sfDirectLogin = result.salesforce.filter((c) => c.is_direct_login).length; + const sfCommunities = result.salesforce.filter((c) => c.sub_type === 'communities').length; + const sfPortal = result.salesforce.filter((c) => c.sub_type === 'portal').length; + const sfLoginAs = result.salesforce.filter((c) => c.auth_method === 'login-as').length; + const sfOAuth = result.salesforce.filter((c) => c.auth_method === 'oauth').length; + const sfBasic = result.salesforce.filter((c) => c.auth_method === 'basic').length; + const sfDirectLogin = result.salesforce.filter((c) => c.is_direct_login).length; result.summary = { salesforce: result.salesforce.length, salesforce_standard: result.salesforce.length - sfCommunities - sfPortal, @@ -439,7 +439,7 @@ function parseConnectionClasses(content: string): ConnectionOverview { salesforce_auth_login_as: sfLoginAs, salesforce_auth_oauth: sfOAuth, salesforce_auth_basic: sfBasic, - salesforce_direct_login: sfDirectLogin, // review these — may be admin users + salesforce_direct_login: sfDirectLogin, // review these — may be admin users ui_testing: result.ui_testing.length, quality_hub: result.quality_hub.length, web_service_rest: result.web_service_rest.length, @@ -468,12 +468,7 @@ function parseConnectionClasses(content: string): ConnectionOverview { } // eslint-disable-next-line complexity -function categoriseConnection( - result: ConnectionOverview, - className: string, - name: string, - urls: string[] -): void { +function categoriseConnection(result: ConnectionOverview, className: string, name: string, urls: string[]): void { // Use the first URL for type inference; env-override URLs share the same class const primaryUrl = urls[0] ?? ''; @@ -485,8 +480,8 @@ function categoriseConnection( const subType: 'standard' | 'communities' | 'portal' = isCommunities ? 'communities' : isPortal - ? 'portal' - : 'standard'; + ? 'portal' + : 'standard'; // ── auth method ─────────────────────────────────────────────────────── const logonAsMatch = primaryUrl.match(/logonAsConnection=([^;]+)/); @@ -496,19 +491,22 @@ function categoriseConnection( const authMethod: SfConnection['auth_method'] = isLogonAs ? 'login-as' : isOAuth - ? 'oauth' - : isBasic - ? 'basic' - : 'unknown'; + ? 'oauth' + : isBasic + ? 'basic' + : 'unknown'; // ── SF environment (org type) ───────────────────────────────────────── const envMatch = primaryUrl.match(/(?:^|;)environment=([^;]+)/); const envValue = envMatch?.[1]?.toUpperCase(); const sfEnvironment: SfConnection['sf_environment'] = - envValue === 'SANDBOX' ? 'sandbox' : - envValue === 'PROD_DEV' ? 'production-developer' : - envValue ? 'other' : - null; + envValue === 'SANDBOX' + ? 'sandbox' + : envValue === 'PROD_DEV' + ? 'production-developer' + : envValue + ? 'other' + : null; result.salesforce.push({ name, @@ -539,8 +537,8 @@ function categoriseConnection( case 'google': result.google.push(name); break; - case 'msexc': // Microsoft Exchange / Outlook (EWS) - case 'microsoft': // kept as fallback in case variant exists + case 'msexc': // Microsoft Exchange / Outlook (EWS) + case 'microsoft': // kept as fallback in case variant exists result.microsoft.push(name); break; case 'zephyr': @@ -591,8 +589,9 @@ function buildProjectTestCaseIdMap(projectPath: string): Map { for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { if (entry.name.startsWith('.') || entry.name === 'node_modules') continue; const fullPath = path.join(dir, entry.name); - if (entry.isDirectory()) { walk(fullPath); } - else if (entry.name.endsWith('.testcase')) { + if (entry.isDirectory()) { + walk(fullPath); + } else if (entry.name.endsWith('.testcase')) { try { const content = fs.readFileSync(fullPath, 'utf-8'); const rel = path.relative(projectPath, fullPath).replace(/\\/g, '/'); @@ -600,10 +599,14 @@ function buildProjectTestCaseIdMap(projectPath: string): Map { const m = content.match(new RegExp(`${attr}=["']([^"']+)["']`)); if (m?.[1] && !idMap.has(m[1])) idMap.set(m[1], rel); } - } catch { /* skip */ } + } catch { + /* skip */ + } } } - } catch { /* skip */ } + } catch { + /* skip */ + } } walk(testsDir); return idMap; @@ -668,7 +671,9 @@ function buildPlanCoverage(projectPath: string, allTestCasePaths: Set): const resolvedPath = testCaseIdMap.get(idMatch[1]); if (resolvedPath) referencedPaths.add(resolvedPath); } - } catch { /* skip */ } + } catch { + /* skip */ + } return true; } @@ -687,8 +692,7 @@ function buildPlanCoverage(projectPath: string, allTestCasePaths: Set): test_instance_count: testInstanceCount, covered_test_case_paths: coveredPaths, uncovered_test_case_paths: uncoveredPaths, - coverage_percent: - allTestCasePaths.size > 0 ? Math.round((coveredPaths.length / allTestCasePaths.size) * 100) : 0, + coverage_percent: allTestCasePaths.size > 0 ? Math.round((coveredPaths.length / allTestCasePaths.size) * 100) : 0, }; } @@ -710,7 +714,9 @@ function detectProvarHome( if (typeof props['provarHome'] === 'string') { return { provarHome: props['provarHome'], provarHomeSource: `provardx-properties.json (${rel})` }; } - } catch { /* skip */ } + } catch { + /* skip */ + } } // 3. ANT build.xml — @@ -722,7 +728,9 @@ function detectProvarHome( content.match(/name=["']provarHome["'][^/]*value=["']([^"']+)["']/i) ?? content.match(/value=["']([^"']+)["'][^/]*name=["']provarHome["']/i); if (match?.[1]) return { provarHome: match[1], provarHomeSource: `ANT build file (${rel})` }; - } catch { /* skip */ } + } catch { + /* skip */ + } } return { provarHome: null, provarHomeSource: null }; @@ -740,7 +748,9 @@ function getTopLevelTestSuites(projectPath: string): string[] { for (const entry of entries) { if (entry.isDirectory() && !entry.name.startsWith('.')) suites.push(`${candidate}/${entry.name}`); } - } catch { /* skip */ } + } catch { + /* skip */ + } break; } return suites; @@ -769,7 +779,9 @@ function countFilesRecursive(dir: string, filter: (name: string) => boolean): nu count++; } } - } catch { /* skip */ } + } catch { + /* skip */ + } return count; } @@ -777,11 +789,7 @@ function countFilesRecursive(dir: string, filter: (name: string) => boolean): nu * General project walker — skips dot-files and node_modules. * Return false from visitor to skip recursion into a directory. */ -function walkDir( - dir: string, - visitor: (filePath: string, isDir: boolean, name: string) => boolean, - depth = 0 -): void { +function walkDir(dir: string, visitor: (filePath: string, isDir: boolean, name: string) => boolean, depth = 0): void { if (depth > 10) return; let entries: fs.Dirent[]; try { diff --git a/src/mcp/tools/projectValidateFromPath.ts b/src/mcp/tools/projectValidateFromPath.ts index 5151f3cf..c1dc16ab 100644 --- a/src/mcp/tools/projectValidateFromPath.ts +++ b/src/mcp/tools/projectValidateFromPath.ts @@ -32,8 +32,7 @@ interface ViolationSummary { } function buildPlanSummary(plan: ValidatedPlan): PlanSummary { - const test_case_count = plan.suites.reduce((n, s) => n + s.test_cases.length, 0) - + plan.unplanned_test_cases.length; + const test_case_count = plan.suites.reduce((n, s) => n + s.test_cases.length, 0) + plan.unplanned_test_cases.length; return { name: plan.name, quality_score: plan.quality_score, @@ -69,9 +68,7 @@ function shapeResponse( const coverage = { ...coverageRest, uncovered_test_cases: uncovered_shown, - ...(uncovered_truncated - ? { uncovered_truncated: true, uncovered_total: uncovered_test_cases.length } - : {}), + ...(uncovered_truncated ? { uncovered_truncated: true, uncovered_total: uncovered_test_cases.length } : {}), }; if (includePlanDetails) { @@ -107,7 +104,7 @@ function shapeResponse( export function registerProjectValidateFromPath(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.project.validate', + 'provar_project_validate', [ 'Validate a Provar project directly from its directory on disk.', 'Reads the plan/suite/testinstance hierarchy from the plans/ directory,', @@ -124,17 +121,32 @@ export function registerProjectValidateFromPath(server: McpServer, config: Serve 'Pass a project_path and let this tool handle all file reading.', ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root (the directory containing the .testproject file)'), - quality_threshold: z.number().min(0).max(100).optional().default(80).describe('Minimum quality score for a test case to be considered valid (default: 80)'), - save_results: z.boolean().optional().default(true).describe('Write a QH-compatible JSON report to provardx/validation/ (default: true)'), - results_dir: z.string().optional().describe('Override the output directory for the saved report (default: {project_path}/provardx/validation)'), + project_path: z + .string() + .describe('Absolute path to the Provar project root (the directory containing the .testproject file)'), + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .default(80) + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + save_results: z + .boolean() + .optional() + .default(true) + .describe('Write a QH-compatible JSON report to provardx/validation/ (default: true)'), + results_dir: z + .string() + .optional() + .describe('Override the output directory for the saved report (default: {project_path}/provardx/validation)'), include_plan_details: z .boolean() .optional() .default(false) .describe( 'When true, include full per-suite and per-test-case violation data in the response. ' + - 'Default false to keep response small. Use only when you need to inspect specific test case failures.' + 'Default false to keep response small. Use only when you need to inspect specific test case failures.' ), max_uncovered: z .number() @@ -142,18 +154,30 @@ export function registerProjectValidateFromPath(server: McpServer, config: Serve .min(0) .optional() .default(20) - .describe('Maximum number of uncovered test case paths to include in the response (default: 20). Set to 0 for none, or a large number for all.'), + .describe( + 'Maximum number of uncovered test case paths to include in the response (default: 20). Set to 0 for none, or a large number for all.' + ), max_violations: z .number() .int() .min(0) .optional() .default(50) - .describe('When include_plan_details:true, caps project_violations returned (default: 50). Ignored in slim mode where violations are grouped by rule_id instead.'), + .describe( + 'When include_plan_details:true, caps project_violations returned (default: 50). Ignored in slim mode where violations are grouped by rule_id instead.' + ), }, - ({ project_path, quality_threshold, save_results, results_dir, include_plan_details, max_uncovered, max_violations }) => { + ({ + project_path, + quality_threshold, + save_results, + results_dir, + include_plan_details, + max_uncovered, + max_violations, + }) => { const requestId = makeRequestId(); - log('info', 'provar.project.validate', { requestId, project_path, include_plan_details }); + log('info', 'provar_project_validate', { requestId, project_path, include_plan_details }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -167,7 +191,7 @@ export function registerProjectValidateFromPath(server: McpServer, config: Serve }); if (result.save_error) { - log('warn', 'provar.project.validate: could not save results', { requestId, error: result.save_error }); + log('warn', 'provar_project_validate: could not save results', { requestId, error: result.save_error }); } const shaped = shapeResponse(result, include_plan_details, max_uncovered, max_violations); @@ -179,14 +203,15 @@ export function registerProjectValidateFromPath(server: McpServer, config: Serve }; } catch (err: unknown) { const error = err as Error & { code?: string }; - const code = error instanceof PathPolicyError - ? error.code - : error instanceof ProjectValidationError + const code = + error instanceof PathPolicyError + ? error.code + : error instanceof ProjectValidationError ? error.code - : (error.code ?? 'VALIDATE_ERROR'); + : error.code ?? 'VALIDATE_ERROR'; const isUserError = error instanceof PathPolicyError || error instanceof ProjectValidationError; const errResult = makeError(code, error.message, requestId, !isUserError); - log('error', 'provar.project.validate failed', { requestId, error: error.message }); + log('error', 'provar_project_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/propertiesTools.ts b/src/mcp/tools/propertiesTools.ts index 7dd6cc2c..fbc57db0 100644 --- a/src/mcp/tools/propertiesTools.ts +++ b/src/mcp/tools/propertiesTools.ts @@ -156,16 +156,16 @@ function deepMerge(target: Record, source: Record { const requestId = makeRequestId(); - log('info', 'provar.properties.generate', { requestId, output_path }); + log('info', 'provar_properties_generate', { requestId, output_path }); try { assertPathAllowed(output_path, config.allowedPaths); @@ -231,8 +231,8 @@ export function registerPropertiesGenerate(server: McpServer, config: ServerConf } const nextSteps = dry_run - ? 'Review the content, write to disk, then run provar.automation.config.load to register this file before compiling or running tests.' - : `Run provar.automation.config.load with properties_path "${resolved}" to register this configuration. Required before provar.automation.compile or provar.automation.testrun will work.`; + ? 'Review the content, write to disk, then run provar_automation_config_load to register this file before compiling or running tests.' + : `Run provar_automation_config_load with properties_path "${resolved}" to register this configuration. Required before provar_automation_compile or provar_automation_testrun will work.`; const response = { requestId, @@ -312,22 +312,22 @@ function buildDivergenceWarning( .join(', '); return ( `The file you read (${diskPath}) differs from the active sf config (${activePath}) on: ${details}. ` + - 'Test runs use the active config values — run provar.automation.config.load with the correct file to sync.' + 'Test runs use the active config values — run provar_automation_config_load with the correct file to sync.' ); } -// ── provar.properties.read ──────────────────────────────────────────────────── +// ── provar_properties_read ──────────────────────────────────────────────────── export function registerPropertiesRead(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.properties.read', - 'Read and parse a provardx-properties.json file. Returns the parsed content so you can inspect current settings before making changes with provar.properties.set.', + 'provar_properties_read', + 'Read and parse a provardx-properties.json file. Returns the parsed content so you can inspect current settings before making changes with provar_properties_set.', { file_path: z.string().describe('Path to the provardx-properties.json file'), }, ({ file_path }) => { const requestId = makeRequestId(); - log('info', 'provar.properties.read', { requestId, file_path }); + log('info', 'provar_properties_read', { requestId, file_path }); try { assertPathAllowed(file_path, config.allowedPaths); @@ -342,7 +342,7 @@ export function registerPropertiesRead(server: McpServer, config: ServerConfig): text: JSON.stringify( makeError( 'PROPERTIES_FILE_NOT_FOUND', - `Properties file not found: ${resolved}. Use provar.properties.generate to create it.`, + `Properties file not found: ${resolved}. Use provar_properties_generate to create it.`, requestId ) ), @@ -410,7 +410,7 @@ export function registerPropertiesRead(server: McpServer, config: ServerConfig): ); } -// ── provar.properties.set ───────────────────────────────────────────────────── +// ── provar_properties_set ───────────────────────────────────────────────────── const updatesSchema = z .object({ @@ -456,7 +456,7 @@ const updatesSchema = z .array(z.string()) .optional() .describe( - 'Specific test case file paths to run (relative to projectPath/tests/). NOTE: data-driven iteration does NOT work in this mode — data table variables resolve as null. To run data-driven tests, add the test case to a plan with provar.testplan.add-instance and run via testPlan instead.' + 'Specific test case file paths to run (relative to projectPath/tests/). NOTE: data-driven iteration does NOT work in this mode — data table variables resolve as null. To run data-driven tests, add the test case to a plan with provar_testplan_add-instance and run via testPlan instead.' ), testPlan: z.array(z.string()).optional().describe('Test plan names to run (wildcards permitted)'), connectionOverride: z @@ -473,13 +473,13 @@ const updatesSchema = z export function registerPropertiesSet(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.properties.set', + 'provar_properties_set', [ 'Update one or more fields in a provardx-properties.json file.', 'Only the provided fields are changed — all other fields are preserved.', 'Object fields (environment, metadata) are deep-merged.', 'Array fields (testCase, testPlan, connectionOverride) replace the existing value entirely.', - 'Use provar.properties.read first to inspect the current state.', + 'Use provar_properties_read first to inspect the current state.', ].join(' '), { file_path: z.string().describe('Path to the provardx-properties.json file to update'), @@ -487,7 +487,7 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): }, ({ file_path, updates }) => { const requestId = makeRequestId(); - log('info', 'provar.properties.set', { requestId, file_path }); + log('info', 'provar_properties_set', { requestId, file_path }); try { assertPathAllowed(file_path, config.allowedPaths); @@ -502,7 +502,7 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): text: JSON.stringify( makeError( 'PROPERTIES_FILE_NOT_FOUND', - `File not found: ${resolved}. Use provar.properties.generate to create it first.`, + `File not found: ${resolved}. Use provar_properties_generate to create it first.`, requestId ) ), @@ -559,11 +559,11 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): ); } -// ── provar.properties.validate ──────────────────────────────────────────────── +// ── provar_properties_validate ──────────────────────────────────────────────── export function registerPropertiesValidate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.properties.validate', + 'provar_properties_validate', [ 'Validate a provardx-properties.json file against the ProvarDX schema.', 'Checks required fields, valid enum values, and warns about unfilled placeholder values.', @@ -575,7 +575,7 @@ export function registerPropertiesValidate(server: McpServer, config: ServerConf }, ({ file_path, content }) => { const requestId = makeRequestId(); - log('info', 'provar.properties.validate', { requestId, file_path }); + log('info', 'provar_properties_validate', { requestId, file_path }); if (!file_path && !content) { return { diff --git a/src/mcp/tools/qualityHubApiTools.ts b/src/mcp/tools/qualityHubApiTools.ts index 5e9f2f94..3321c205 100644 --- a/src/mcp/tools/qualityHubApiTools.ts +++ b/src/mcp/tools/qualityHubApiTools.ts @@ -42,14 +42,14 @@ const CORPUS_UNREACHABLE_WARNING = 'Check your network connection or try again later.\n' + CORPUS_FALLBACK_HINT; -// ── Tool: provar.qualityhub.examples.retrieve ───────────────────────────────── +// ── Tool: provar_qualityhub_examples_retrieve ───────────────────────────────── export function registerCorpusExamplesRetrieve(server: McpServer): void { server.tool( - 'provar.qualityhub.examples.retrieve', + 'provar_qualityhub_examples_retrieve', [ 'Retrieve N similar Provar test case examples from the Quality Hub corpus (1000+ tests in Bedrock KB).', - 'Use this BEFORE writing any Provar .testcase XML — whether via provar.testcase.generate, Write, or Edit.', + 'Use this BEFORE writing any Provar .testcase XML — whether via provar_testcase_generate, Write, or Edit.', 'Pass a user story, requirement, source test file content, or step type keywords as the query.', 'Returns up to N example Provar XML test cases ordered by similarity score.', 'If retrieval fails (no auth, network error, rate limit), returns empty examples with a warning — the', @@ -91,7 +91,7 @@ export function registerCorpusExamplesRetrieve(server: McpServer): void { }, async ({ query, n, app_filter, prefer_high_quality }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.examples.retrieve', { requestId, query_length: query.length, n, app_filter }); + log('info', 'provar_qualityhub_examples_retrieve', { requestId, query_length: query.length, n, app_filter }); if (!query || query.trim().length === 0) { return { @@ -108,7 +108,7 @@ export function registerCorpusExamplesRetrieve(server: McpServer): void { const apiKey = credentialsService.resolveApiKey(); if (!apiKey) { - log('warn', 'provar.qualityhub.examples.retrieve: no api key', { requestId }); + log('warn', 'provar_qualityhub_examples_retrieve: no api key', { requestId }); const result = { requestId, examples: [], @@ -129,10 +129,10 @@ export function registerCorpusExamplesRetrieve(server: McpServer): void { }); if (response.query_truncated) { - log('warn', 'provar.qualityhub.examples.retrieve: query truncated', { requestId }); + log('warn', 'provar_qualityhub_examples_retrieve: query truncated', { requestId }); } - log('info', 'provar.qualityhub.examples.retrieve: success', { + log('info', 'provar_qualityhub_examples_retrieve: success', { requestId, retrieval_id: response.retrieval_id, count: response.count, @@ -145,14 +145,14 @@ export function registerCorpusExamplesRetrieve(server: McpServer): void { let warning: string; if (err instanceof QualityHubAuthError) { warning = CORPUS_AUTH_WARNING; - log('warn', 'provar.qualityhub.examples.retrieve: auth error', { requestId }); + log('warn', 'provar_qualityhub_examples_retrieve: auth error', { requestId }); } else if (err instanceof QualityHubRateLimitError) { warning = CORPUS_RATE_LIMIT_WARNING; - log('warn', 'provar.qualityhub.examples.retrieve: rate limited', { requestId }); + log('warn', 'provar_qualityhub_examples_retrieve: rate limited', { requestId }); } else { warning = CORPUS_UNREACHABLE_WARNING; const errMsg = (err as Error).message.slice(0, 200); - log('warn', 'provar.qualityhub.examples.retrieve: api error', { requestId, error: errMsg }); + log('warn', 'provar_qualityhub_examples_retrieve: api error', { requestId, error: errMsg }); } // Degrade gracefully — never isError:true. The LLM continues without grounding. diff --git a/src/mcp/tools/qualityHubTools.ts b/src/mcp/tools/qualityHubTools.ts index a1b81201..ad0c40eb 100644 --- a/src/mcp/tools/qualityHubTools.ts +++ b/src/mcp/tools/qualityHubTools.ts @@ -30,11 +30,11 @@ function handleSpawnError( }; } -// ── Tool: provar.qualityhub.connect ─────────────────────────────────────────── +// ── Tool: provar_qualityhub_connect ─────────────────────────────────────────── export function registerQualityHubConnect(server: McpServer): void { server.tool( - 'provar.qualityhub.connect', + 'provar_qualityhub_connect', 'Connect to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` with the supplied flags.', { target_org: z.string().describe('SF org alias or username to connect as'), @@ -46,7 +46,7 @@ export function registerQualityHubConnect(server: McpServer): void { }, ({ target_org, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.connect', { requestId, target_org }); + log('info', 'provar_qualityhub_connect', { requestId, target_org }); try { const result = runSfCommand(['provar', 'quality-hub', 'connect', '--target-org', target_org, ...flags]); @@ -66,17 +66,17 @@ export function registerQualityHubConnect(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.connect'); + return handleSpawnError(err, requestId, 'provar_qualityhub_connect'); } } ); } -// ── Tool: provar.qualityhub.display ────────────────────────────────────────── +// ── Tool: provar_qualityhub_display ────────────────────────────────────────── export function registerQualityHubDisplay(server: McpServer): void { server.tool( - 'provar.qualityhub.display', + 'provar_qualityhub_display', 'Display connected Quality Hub org info. Invokes `sf provar quality-hub display`.', { target_org: z.string().optional().describe('SF org alias or username (uses default if omitted)'), @@ -84,7 +84,7 @@ export function registerQualityHubDisplay(server: McpServer): void { }, ({ target_org, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.display', { requestId, target_org }); + log('info', 'provar_qualityhub_display', { requestId, target_org }); try { const args = ['provar', 'quality-hub', 'display', ...flags]; @@ -107,13 +107,13 @@ export function registerQualityHubDisplay(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.display'); + return handleSpawnError(err, requestId, 'provar_qualityhub_display'); } } ); } -// ── Tool: provar.qualityhub.testrun ────────────────────────────────────────── +// ── Tool: provar_qualityhub_testrun ────────────────────────────────────────── function detectWildcardFlags(flags: string[]): string | undefined { for (let i = 0; i < flags.length - 1; i++) { @@ -133,7 +133,7 @@ function detectWildcardFlags(flags: string[]): string | undefined { export function registerQualityHubTestRun(server: McpServer): void { server.tool( - 'provar.qualityhub.testrun', + 'provar_qualityhub_testrun', 'Trigger a Quality Hub test run. Invokes `sf provar quality-hub test run`. ' + 'Warning: wildcard characters (* or ?) in flag values will cause QH plan-level reporting to be skipped — use exact plan names.', { @@ -148,7 +148,7 @@ export function registerQualityHubTestRun(server: McpServer): void { }, ({ target_org, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.testrun', { requestId, target_org }); + log('info', 'provar_qualityhub_testrun', { requestId, target_org }); try { const wildcardWarning = detectWildcardFlags(flags); @@ -178,26 +178,26 @@ export function registerQualityHubTestRun(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.testrun'); + return handleSpawnError(err, requestId, 'provar_qualityhub_testrun'); } } ); } -// ── Tool: provar.qualityhub.testrun.report ──────────────────────────────────── +// ── Tool: provar_qualityhub_testrun_report ──────────────────────────────────── export function registerQualityHubTestRunReport(server: McpServer): void { server.tool( - 'provar.qualityhub.testrun.report', + 'provar_qualityhub_testrun_report', 'Poll the status of a Quality Hub test run. Invokes `sf provar quality-hub test run report`.', { target_org: z.string().describe('SF org alias or username'), - run_id: z.string().describe('Test run ID returned by provar.qualityhub.testrun'), + run_id: z.string().describe('Test run ID returned by provar_qualityhub_testrun'), flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags'), }, ({ target_org, run_id, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.testrun.report', { requestId, target_org, run_id }); + log('info', 'provar_qualityhub_testrun_report', { requestId, target_org, run_id }); try { const result = runSfCommand([ @@ -238,7 +238,7 @@ export function registerQualityHubTestRunReport(server: McpServer): void { hasFailures = normalizedStatus !== undefined && failureStatuses.has(normalizedStatus); } const suggestion = hasFailures - ? 'Failures detected. Use provar.qualityhub.defect.create with run_id and target_org to automatically create Defect__c records for each failure (syncs to Jira/ADO if configured).' + ? 'Failures detected. Use provar_qualityhub_defect_create with run_id and target_org to automatically create Defect__c records for each failure (syncs to Jira/ADO if configured).' : ''; const responseWithSuggestion = { ...response, suggestion: suggestion || undefined }; @@ -247,17 +247,17 @@ export function registerQualityHubTestRunReport(server: McpServer): void { structuredContent: responseWithSuggestion, }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.testrun.report'); + return handleSpawnError(err, requestId, 'provar_qualityhub_testrun_report'); } } ); } -// ── Tool: provar.qualityhub.testrun.abort ───────────────────────────────────── +// ── Tool: provar_qualityhub_testrun_abort ───────────────────────────────────── export function registerQualityHubTestRunAbort(server: McpServer): void { server.tool( - 'provar.qualityhub.testrun.abort', + 'provar_qualityhub_testrun_abort', 'Abort an in-progress Quality Hub test run. Invokes `sf provar quality-hub test run abort`.', { target_org: z.string().describe('SF org alias or username'), @@ -266,7 +266,7 @@ export function registerQualityHubTestRunAbort(server: McpServer): void { }, ({ target_org, run_id, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.testrun.abort', { requestId, target_org, run_id }); + log('info', 'provar_qualityhub_testrun_abort', { requestId, target_org, run_id }); try { const result = runSfCommand([ @@ -297,17 +297,17 @@ export function registerQualityHubTestRunAbort(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.testrun.abort'); + return handleSpawnError(err, requestId, 'provar_qualityhub_testrun_abort'); } } ); } -// ── Tool: provar.qualityhub.testcase.retrieve ───────────────────────────────── +// ── Tool: provar_qualityhub_testcase_retrieve ───────────────────────────────── export function registerQualityHubTestcaseRetrieve(server: McpServer): void { server.tool( - 'provar.qualityhub.testcase.retrieve', + 'provar_qualityhub_testcase_retrieve', 'Retrieve Quality Hub test cases by user story or component. Invokes `sf provar quality-hub testcase retrieve`.', { target_org: z.string().describe('SF org alias or username'), @@ -319,7 +319,7 @@ export function registerQualityHubTestcaseRetrieve(server: McpServer): void { }, ({ target_org, flags }) => { const requestId = makeRequestId(); - log('info', 'provar.qualityhub.testcase.retrieve', { requestId, target_org }); + log('info', 'provar_qualityhub_testcase_retrieve', { requestId, target_org }); try { const result = runSfCommand([ @@ -347,7 +347,7 @@ export function registerQualityHubTestcaseRetrieve(server: McpServer): void { return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { - return handleSpawnError(err, requestId, 'provar.qualityhub.testcase.retrieve'); + return handleSpawnError(err, requestId, 'provar_qualityhub_testcase_retrieve'); } } ); diff --git a/src/mcp/tools/rcaTools.ts b/src/mcp/tools/rcaTools.ts index 564b3362..1b4e4813 100644 --- a/src/mcp/tools/rcaTools.ts +++ b/src/mcp/tools/rcaTools.ts @@ -121,7 +121,7 @@ const RCA_RULES: RcaRule[] = [ category: 'MISSING_CALLABLE', pattern: /caseCall.*cannot.*resolv|callable.*not.*found/i, summary: 'caseCall references unresolvable callable', - recommendation: 'Use provar.project.validate to diagnose PROJ-CALLABLE violations', + recommendation: 'Use provar_project_validate to diagnose PROJ-CALLABLE violations', }, { category: 'METADATA_CACHE', @@ -133,7 +133,7 @@ const RCA_RULES: RcaRule[] = [ category: 'PAGE_OBJECT_COMPILE', pattern: /ClassNotFoundException|CompilationException/i, summary: 'Page object class not compiled', - recommendation: 'Run provar.automation.compile before testrun', + recommendation: 'Run provar_automation_compile before testrun', }, { category: 'CONNECTION_REFUSED', @@ -388,11 +388,11 @@ function resolveResultsLocation( }; } -// ── provar.testrun.report.locate tool ───────────────────────────────────────── +// ── provar_testrun_report_locate tool ───────────────────────────────────────── export function registerTestRunLocate(server: McpServer): void { server.tool( - 'provar.testrun.report.locate', + 'provar_testrun_report_locate', [ 'Resolve exactly where Provar test run artifacts were written, without parsing them.', 'Returns the results directory, paths to JUnit.xml and Index.html if they exist,', @@ -414,7 +414,7 @@ export function registerTestRunLocate(server: McpServer): void { }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.testrun.report.locate', { requestId }); + log('info', 'provar_testrun_report_locate', { requestId }); try { const resolved = resolveResultsLocation(input.project_path, input.results_path, input.run_index); @@ -472,7 +472,7 @@ export function registerTestRunLocate(server: McpServer): void { } catch (err: unknown) { const error = err as Error; const errResult = makeError('LOCATE_ERROR', error.message, requestId); - log('error', 'provar.testrun.report.locate failed', { requestId, error: error.message }); + log('error', 'provar_testrun_report_locate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -665,11 +665,11 @@ function buildFailureReports( return reports; } -// ── provar.testrun.rca tool ─────────────────────────────────────────────────── +// ── provar_testrun_rca tool ─────────────────────────────────────────────────── export function registerTestRunRca(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testrun.rca', + 'provar_testrun_rca', [ 'Parse a completed Provar test run and produce a structured Root Cause Analysis (RCA) report.', 'Resolves the results directory, parses JUnit.xml, classifies each failure by category,', @@ -706,7 +706,7 @@ export function registerTestRunRca(server: McpServer, config: ServerConfig): voi }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.testrun.rca', { requestId, locate_only: input.locate_only, mode: input.mode }); + log('info', 'provar_testrun_rca', { requestId, locate_only: input.locate_only, mode: input.mode }); try { // ── Path policy ────────────────────────────────────────────────────── @@ -853,7 +853,7 @@ export function registerTestRunRca(server: McpServer, config: ServerConfig): voi } catch (err: unknown) { const error = err as Error; const errResult = makeError('RCA_ERROR', error.message, requestId); - log('error', 'provar.testrun.rca failed', { requestId, error: error.message }); + log('error', 'provar_testrun_rca failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/testCaseGenerate.ts b/src/mcp/tools/testCaseGenerate.ts index d7dff526..28df1c87 100644 --- a/src/mcp/tools/testCaseGenerate.ts +++ b/src/mcp/tools/testCaseGenerate.ts @@ -127,7 +127,7 @@ const TOOL_DESCRIPTION = [ 'Shorthand XML attributes on are silently ignored at runtime; always supply arguments via the attributes map.', 'Data-driven note: only iterates rows when the test case runs via a test plan instance (.testinstance).', 'Running directly via the provardx testCase property resolves all data table variables as null.', - 'Use provar.testplan.add-instance to wire into a plan for data-driven execution.', + 'Use provar_testplan_add-instance to wire into a plan for data-driven execution.', 'ApexReadObject requires field names in attributes; omitting them produces MALFORMED_QUERY. Prefer ApexSoqlQuery.', 'AssertValues on SOQL results: index paths like "ResultList[0].Field" are not supported.', 'Use ForEach to iterate the result list, or SetValues to extract a field into a variable first.', @@ -140,12 +140,12 @@ const TOOL_DESCRIPTION = [ 'locator argument (UiDoAction/UiAssert): pass the URI value; emitted as class="uiLocator" uri="...".', 'Cleanup warning: ApexDeleteObject steps near end of test will be skipped if an earlier step fails (stopOnError=false). Use a TearDown callable.', 'Validation: when validate_after_edit=true (default) the response includes a validation field and returns TESTCASE_INVALID if the generated XML fails structural checks.', - 'Grounding: call provar.qualityhub.examples.retrieve before generating to get corpus examples for the scenario — correct XML structure for the step types you need.', + 'Grounding: call provar_qualityhub_examples_retrieve before generating to get corpus examples for the scenario — correct XML structure for the step types you need.', ].join(' '); export function registerTestCaseGenerate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testcase.generate', + 'provar_testcase_generate', TOOL_DESCRIPTION, { test_case_name: z.string().describe('Test case name (human-readable label)'), @@ -175,7 +175,7 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.testcase.generate', { + log('info', 'provar_testcase_generate', { requestId, test_case_name: input.test_case_name, dry_run: input.dry_run, @@ -202,7 +202,7 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig fs.mkdirSync(path.dirname(filePath), { recursive: true }); fs.writeFileSync(filePath, xmlContent, 'utf-8'); written = true; - log('info', 'provar.testcase.generate: wrote file', { requestId, filePath }); + log('info', 'provar_testcase_generate: wrote file', { requestId, filePath }); } const warnings = buildStepWarnings(input.steps); @@ -236,7 +236,7 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig false, { validation: validationSlim } ); - log('warn', 'provar.testcase.generate: TESTCASE_INVALID', { requestId }); + log('warn', 'provar_testcase_generate: TESTCASE_INVALID', { requestId }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } const result = { ...baseResult, validation: validationSlim }; @@ -258,7 +258,7 @@ export function registerTestCaseGenerate(server: McpServer, config: ServerConfig requestId, false ); - log('error', 'provar.testcase.generate failed', { requestId, error: error.message }); + log('error', 'provar_testcase_generate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -299,11 +299,7 @@ function buildArgumentsXml(attributes: Record, baseIndent = ' const argLines = entries .map(([k, v]) => { const valueXml = buildArgumentValue(k, v, `${baseIndent} `, false, apiId); - return ( - `${baseIndent}\n` + - valueXml + '\n' + - `${baseIndent}` - ); + return `${baseIndent}\n` + valueXml + '\n' + `${baseIndent}`; }) .join('\n'); return `\n${baseIndent}\n${argLines}\n${baseIndent}\n${baseIndent.slice(0, -2)}`; @@ -325,7 +321,8 @@ function buildSetValuesXml(attributes: Record, baseIndent: strin `${i(0)}\n` + `${i(1)}\n` + `${i(2)}\n` + - namedValueLines + '\n' + + namedValueLines + + '\n' + `${i(2)}\n` + `${i(1)}\n` + `${i(0)}\n` + @@ -379,9 +376,10 @@ function buildTestCaseXml(input: { } return ( - '\n' + + '\n' + `\n` + + ' \n' + ' \n' + stepLines + '\n \n' + @@ -406,7 +404,11 @@ function buildUiWithScreenXml( ' \n '; return ( ` ${buildArgumentsXml({ target: targetUri }, ' ', wrapperApiId).trimEnd()}${clausesXml}` + ` name="With page" testItemId="1">${buildArgumentsXml( + { target: targetUri }, + ' ', + wrapperApiId + ).trimEnd()}${clausesXml}` ); } diff --git a/src/mcp/tools/testCaseStepTools.ts b/src/mcp/tools/testCaseStepTools.ts index 7ba6e080..46ca11f1 100644 --- a/src/mcp/tools/testCaseStepTools.ts +++ b/src/mcp/tools/testCaseStepTools.ts @@ -83,7 +83,7 @@ function parseNewStep(stepXml: string): { step: ApiCallNode } | { error: string export function registerTestCaseStepEdit(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testcase.step.edit', + 'provar_testcase_step_edit', [ 'Add or remove a single step (apiCall) in a Provar XML test case file.', 'Uses write-to-temp-then-rename to minimise partial-write risk.', @@ -118,11 +118,11 @@ export function registerTestCaseStepEdit(server: McpServer, config: ServerConfig .boolean() .optional() .default(true) - .describe('Run provar.testcase.validate after the mutation; restores backup on failure (default: true)'), + .describe('Run provar_testcase_validate after the mutation; restores backup on failure (default: true)'), }, (input) => { const requestId = makeRequestId(); - log('info', 'provar.testcase.step.edit', { requestId, mode: input.mode, test_item_id: input.test_item_id }); + log('info', 'provar_testcase_step_edit', { requestId, mode: input.mode, test_item_id: input.test_item_id }); try { const resolvedPath = path.resolve(input.test_case_path); @@ -255,7 +255,7 @@ export function registerTestCaseStepEdit(server: McpServer, config: ServerConfig error.message, requestId ); - log('error', 'provar.testcase.step.edit failed', { requestId, error: error.message }); + log('error', 'provar_testcase_step_edit failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/testCaseValidate.ts b/src/mcp/tools/testCaseValidate.ts index 34a6691f..35035cf4 100644 --- a/src/mcp/tools/testCaseValidate.ts +++ b/src/mcp/tools/testCaseValidate.ts @@ -43,7 +43,7 @@ const UNREACHABLE_WARNING = export function registerTestCaseValidate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testcase.validate', + 'provar_testcase_validate', 'Validate a Provar XML test case for structural correctness and quality. Checks XML declaration, root element, required attributes (guid UUID v4, testItemId integer), presence, and applies best-practice rules. When a Provar API key is configured (via sf provar auth login or PROVAR_API_KEY env var), calls the Quality Hub API for full 170-rule scoring. Falls back to local validation if no key is set or the API is unavailable. Returns validity_score (schema compliance), quality_score (best practices, 0–100), and validation_source indicating which ruleset was applied.', { content: z.string().optional().describe('XML content to validate directly (alias: xml)'), @@ -52,7 +52,7 @@ export function registerTestCaseValidate(server: McpServer, config: ServerConfig }, async ({ content, xml, file_path }) => { const requestId = makeRequestId(); - log('info', 'provar.testcase.validate', { requestId, has_content: !!(content ?? xml), file_path }); + log('info', 'provar_testcase_validate', { requestId, has_content: !!(content ?? xml), file_path }); try { // Resolve xml alias: the batch validation API uses "xml", MCP originally used "content" @@ -90,7 +90,7 @@ export function registerTestCaseValidate(server: McpServer, config: ServerConfig test_case_name: localMeta.test_case_name, validation_source: 'quality_hub' as const, }; - log('info', 'provar.testcase.validate: quality_hub', { requestId }); + log('info', 'provar_testcase_validate: quality_hub', { requestId }); return { content: [{ type: 'text' as const, text: JSON.stringify(result) }], structuredContent: result, @@ -100,13 +100,13 @@ export function registerTestCaseValidate(server: McpServer, config: ServerConfig let warning: string; if (apiErr instanceof QualityHubAuthError) { warning = AUTH_WARNING; - log('warn', 'provar.testcase.validate: auth error, falling back', { requestId }); + log('warn', 'provar_testcase_validate: auth error, falling back', { requestId }); } else if (apiErr instanceof QualityHubRateLimitError) { warning = RATE_LIMIT_WARNING; - log('warn', 'provar.testcase.validate: rate limited, falling back', { requestId }); + log('warn', 'provar_testcase_validate: rate limited, falling back', { requestId }); } else { warning = UNREACHABLE_WARNING; - log('warn', 'provar.testcase.validate: api unreachable, falling back', { requestId }); + log('warn', 'provar_testcase_validate: api unreachable, falling back', { requestId }); } const localResult = { requestId, @@ -140,7 +140,7 @@ export function registerTestCaseValidate(server: McpServer, config: ServerConfig requestId, false ); - log('error', 'provar.testcase.validate failed', { requestId, error: error.message }); + log('error', 'provar_testcase_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } @@ -323,7 +323,11 @@ export function validateTestCase(xmlContent: string, testName?: string): TestCas severity: 'WARNING', message: `Argument value "{${varMatch[1]}}" looks like a variable reference but is stored as a plain string — Provar will not resolve it at runtime.`, applies_to: 'argument', - suggestion: `Replace with . In provar.testcase.generate, use the {VarName} syntax in the attributes object — the generator converts it automatically.`, + suggestion: `Replace with . In provar_testcase_generate, use the {VarName} syntax in the attributes object — the generator converts it automatically.`, }); } @@ -403,7 +407,12 @@ function validateApiCall(call: Record, issues: ValidationIssue[ if (apiId) validateApiCallArgs(call, apiId, name, issues); } -function checkUiTarget(call: Record, apiId: string, stepName: string, issues: ValidationIssue[]): void { +function checkUiTarget( + call: Record, + apiId: string, + stepName: string, + issues: ValidationIssue[] +): void { const targetArg = getArgList(call).find((a) => (a['@_id'] as string | undefined) === 'target'); if (!targetArg) return; const valueNode = targetArg['value'] as Record | undefined; @@ -414,11 +423,13 @@ function checkUiTarget(call: Record, apiId: string, stepName: s issues.push({ rule_id: 'UI-TARGET-001', severity: 'ERROR', - message: `${apiLabel} step "${stepName}" target argument uses class="${valClass ?? '(missing)'}" — must be class="uiTarget".`, + message: `${apiLabel} step "${stepName}" target argument uses class="${ + valClass ?? '(missing)' + }" — must be class="uiTarget".`, applies_to: 'apiCall', suggestion: 'Emit the target as: or uri="ui:pageobject:target?pageId=...". ' + - 'In provar.testcase.generate the "target" attribute is converted automatically.', + 'In provar_testcase_generate the "target" attribute is converted automatically.', }); } } @@ -450,11 +461,13 @@ function validateApiCallArgs( issues.push({ rule_id: 'UI-LOCATOR-001', severity: 'ERROR', - message: `"${stepName}" locator argument uses class="${valClass ?? '(missing)'}" — must be class="uiLocator".`, + message: `"${stepName}" locator argument uses class="${ + valClass ?? '(missing)' + }" — must be class="uiLocator".`, applies_to: 'apiCall', suggestion: 'Emit the locator as: . ' + - 'In provar.testcase.generate the "locator" attribute is converted automatically.', + 'In provar_testcase_generate the "locator" attribute is converted automatically.', }); } } @@ -474,12 +487,14 @@ function validateApiCallArgs( issues.push({ rule_id: 'SETVALUES-STRUCTURE-001', severity: 'ERROR', - message: `SetValues step "${stepName}" values argument uses class="${valClass ?? '(missing)'}" — must use class="valueList" with children.`, + message: `SetValues step "${stepName}" values argument uses class="${ + valClass ?? '(missing)' + }" — must use class="valueList" with children.`, applies_to: 'apiCall', suggestion: 'Wrap variable assignments in: ' + 'value' + - '. In provar.testcase.generate pass each variable as a flat key/value pair ' + + '. In provar_testcase_generate pass each variable as a flat key/value pair ' + 'in attributes — the generator builds the valueList structure automatically.', }); } diff --git a/src/mcp/tools/testPlanTools.ts b/src/mcp/tools/testPlanTools.ts index d165c4ca..2c567392 100644 --- a/src/mcp/tools/testPlanTools.ts +++ b/src/mcp/tools/testPlanTools.ts @@ -57,14 +57,14 @@ function buildPlanItemXml(guid: string): string { ].join('\n'); } -// ── provar.testplan.create ──────────────────────────────────────────────────── +// ── provar_testplan_create ──────────────────────────────────────────────────── export function registerTestPlanCreate(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testplan.create', + 'provar_testplan_create', [ 'Create a new Provar test plan: makes the plans/{plan_name}/ directory and writes the root .planitem file.', - 'Use this before provar.testplan.create-suite or provar.testplan.add-instance, which both require the plan to already exist.', + 'Use this before provar_testplan_create-suite or provar_testplan_add-instance, which both require the plan to already exist.', 'Returns the guid assigned to the new plan, the plan directory path, and the .planitem path written.', ].join(' '), { @@ -83,7 +83,7 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): }, ({ project_path, plan_name, overwrite, dry_run }) => { const requestId = makeRequestId(); - log('info', 'provar.testplan.create', { requestId, project_path, plan_name }); + log('info', 'provar_testplan_create', { requestId, project_path, plan_name }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -99,7 +99,9 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): content: [ { type: 'text' as const, - text: JSON.stringify(makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId)), + text: JSON.stringify( + makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId) + ), }, ], }; @@ -111,7 +113,13 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): content: [ { type: 'text' as const, - text: JSON.stringify(makeError('INVALID_PLAN_NAME', `plan_name must start with a letter or digit and contain only letters, digits, underscores, hyphens, or spaces: "${plan_name}"`, requestId)), + text: JSON.stringify( + makeError( + 'INVALID_PLAN_NAME', + `plan_name must start with a letter or digit and contain only letters, digits, underscores, hyphens, or spaces: "${plan_name}"`, + requestId + ) + ), }, ], }; @@ -154,8 +162,8 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): dry_run: dry_run ?? false, created: !dry_run, next_steps: dry_run - ? 'Review the plan structure, then call provar.testplan.create with dry_run=false to write to disk.' - : `Plan created at ${planDir}. Use provar.testplan.create-suite to add suites, then provar.testplan.add-instance to wire test cases into the plan.`, + ? 'Review the plan structure, then call provar_testplan_create with dry_run=false to write to disk.' + : `Plan created at ${planDir}. Use provar_testplan_create-suite to add suites, then provar_testplan_add-instance to wire test cases into the plan.`, }; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], @@ -170,7 +178,7 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): type: 'text' as const, text: JSON.stringify( makeError( - error instanceof PathPolicyError ? error.code : (error.code ?? 'CREATE_PLAN_ERROR'), + error instanceof PathPolicyError ? error.code : error.code ?? 'CREATE_PLAN_ERROR', error.message, requestId ) @@ -183,11 +191,11 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): ); } -// ── provar.testplan.add-instance ────────────────────────────────────────────── +// ── provar_testplan_add-instance ────────────────────────────────────────────── export function registerTestPlanAddInstance(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testplan.add-instance', + 'provar_testplan_add-instance', [ 'Add a .testinstance file to an existing Provar test plan suite directory.', 'The plan directory and suite directory must already exist.', @@ -197,15 +205,28 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon ].join(' '), { project_path: z.string().describe('Absolute path to the Provar project root'), - test_case_path: z.string().describe('Path to the .testcase file, relative to project root (e.g. "tests/MyTest.testcase")'), + test_case_path: z + .string() + .describe('Path to the .testcase file, relative to project root (e.g. "tests/MyTest.testcase")'), plan_name: z.string().describe('Name of the test plan (directory under plans/)'), - suite_path: z.string().optional().describe('Path within the plan to place the instance (e.g. "MySuite" or "MySuite/SubSuite")'), - overwrite: z.boolean().optional().default(false).describe('Overwrite the .testinstance file if it already exists (default: false)'), - dry_run: z.boolean().optional().default(false).describe('Return what would be written without writing to disk (default: false)'), + suite_path: z + .string() + .optional() + .describe('Path within the plan to place the instance (e.g. "MySuite" or "MySuite/SubSuite")'), + overwrite: z + .boolean() + .optional() + .default(false) + .describe('Overwrite the .testinstance file if it already exists (default: false)'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be written without writing to disk (default: false)'), }, ({ project_path, test_case_path, plan_name, suite_path, overwrite, dry_run }) => { const requestId = makeRequestId(); - log('info', 'provar.testplan.add-instance', { requestId, project_path, test_case_path, plan_name }); + log('info', 'provar_testplan_add-instance', { requestId, project_path, test_case_path, plan_name }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -219,7 +240,14 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon if (testProjectFiles.length === 0) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId) + ), + }, + ], }; } @@ -228,13 +256,25 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon if (!fs.existsSync(absoluteTestCasePath)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `Test case not found: ${absoluteTestCasePath}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('FILE_NOT_FOUND', `Test case not found: ${absoluteTestCasePath}`, requestId) + ), + }, + ], }; } if (!test_case_path.endsWith('.testcase')) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('INVALID_PATH', 'test_case_path must end with .testcase', requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('INVALID_PATH', 'test_case_path must end with .testcase', requestId)), + }, + ], }; } @@ -244,7 +284,18 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon if (!testCaseId) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('NO_TEST_CASE_ID', `Cannot extract registryId, id, or guid from ${absoluteTestCasePath}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'NO_TEST_CASE_ID', + `Cannot extract registryId, id, or guid from ${absoluteTestCasePath}`, + requestId + ) + ), + }, + ], }; } @@ -256,7 +307,18 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon if (!fs.existsSync(instanceDir)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('DIR_NOT_FOUND', `Suite directory does not exist: ${instanceDir}. Create it with provar.testplan.create-suite first.`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'DIR_NOT_FOUND', + `Suite directory does not exist: ${instanceDir}. Create it with provar_testplan_create-suite first.`, + requestId + ) + ), + }, + ], }; } @@ -267,7 +329,18 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon if (!overwrite && fs.existsSync(instanceFilePath)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_EXISTS', `Instance file already exists: ${instanceFilePath}. Set overwrite: true to replace it.`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + 'FILE_EXISTS', + `Instance file already exists: ${instanceFilePath}. Set overwrite: true to replace it.`, + requestId + ) + ), + }, + ], }; } @@ -297,18 +370,29 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon const error = err as Error & { code?: string }; return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'ADD_INSTANCE_ERROR'), error.message, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'ADD_INSTANCE_ERROR', + error.message, + requestId + ) + ), + }, + ], }; } } ); } -// ── provar.testplan.create-suite ────────────────────────────────────────────── +// ── provar_testplan_create-suite ────────────────────────────────────────────── export function registerTestPlanCreateSuite(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testplan.create-suite', + 'provar_testplan_create-suite', [ 'Create a new suite directory inside a Provar test plan.', 'The plan directory must already exist with a .planitem file at its root.', @@ -319,12 +403,19 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon project_path: z.string().describe('Absolute path to the Provar project root'), plan_name: z.string().describe('Name of the test plan (directory under plans/)'), suite_name: z.string().describe('Name of the new suite directory to create'), - parent_suite_path: z.string().optional().describe('Path of the parent suite within the plan (e.g. "MySuite"). Omit to create at plan root.'), - dry_run: z.boolean().optional().default(false).describe('Return what would be created without writing to disk (default: false)'), + parent_suite_path: z + .string() + .optional() + .describe('Path of the parent suite within the plan (e.g. "MySuite"). Omit to create at plan root.'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be created without writing to disk (default: false)'), }, ({ project_path, plan_name, suite_name, parent_suite_path, dry_run }) => { const requestId = makeRequestId(); - log('info', 'provar.testplan.create-suite', { requestId, project_path, plan_name, suite_name }); + log('info', 'provar_testplan_create-suite', { requestId, project_path, plan_name, suite_name }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -338,7 +429,14 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon if (testProjectFiles.length === 0) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('NOT_A_PROJECT', `No .testproject file found in ${projectRoot}`, requestId) + ), + }, + ], }; } @@ -347,7 +445,14 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon if (!fs.existsSync(planDir)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('DIR_NOT_FOUND', `Plan directory does not exist: ${planDir}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('DIR_NOT_FOUND', `Plan directory does not exist: ${planDir}`, requestId) + ), + }, + ], }; } @@ -356,7 +461,14 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon if (!fs.existsSync(planItemPath)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `Plan .planitem file does not exist: ${planItemPath}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('FILE_NOT_FOUND', `Plan .planitem file does not exist: ${planItemPath}`, requestId) + ), + }, + ], }; } @@ -369,7 +481,12 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon if (fs.existsSync(suiteDir)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('DIR_EXISTS', `Suite directory already exists: ${suiteDir}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('DIR_EXISTS', `Suite directory already exists: ${suiteDir}`, requestId)), + }, + ], }; } @@ -398,18 +515,29 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon const error = err as Error & { code?: string }; return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'CREATE_SUITE_ERROR'), error.message, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'CREATE_SUITE_ERROR', + error.message, + requestId + ) + ), + }, + ], }; } } ); } -// ── provar.testplan.remove-instance ────────────────────────────────────────── +// ── provar_testplan_remove-instance ────────────────────────────────────────── export function registerTestPlanRemoveInstance(server: McpServer, config: ServerConfig): void { server.tool( - 'provar.testplan.remove-instance', + 'provar_testplan_remove-instance', [ 'Remove a .testinstance file from a Provar test plan.', 'instance_path is relative to the project root.', @@ -418,11 +546,15 @@ export function registerTestPlanRemoveInstance(server: McpServer, config: Server { project_path: z.string().describe('Absolute path to the Provar project root'), instance_path: z.string().describe('Path to the .testinstance file, relative to project root'), - dry_run: z.boolean().optional().default(false).describe('Return what would be removed without deleting (default: false)'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be removed without deleting (default: false)'), }, ({ project_path, instance_path, dry_run }) => { const requestId = makeRequestId(); - log('info', 'provar.testplan.remove-instance', { requestId, project_path, instance_path }); + log('info', 'provar_testplan_remove-instance', { requestId, project_path, instance_path }); try { assertPathAllowed(project_path, config.allowedPaths); @@ -436,7 +568,14 @@ export function registerTestPlanRemoveInstance(server: McpServer, config: Server if (!resolvedAbsolute.startsWith(resolvedProjectRoot + path.sep) && resolvedAbsolute !== resolvedProjectRoot) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('PATH_TRAVERSAL', `Path traversal detected: ${instance_path}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('PATH_TRAVERSAL', `Path traversal detected: ${instance_path}`, requestId) + ), + }, + ], }; } @@ -444,7 +583,12 @@ export function registerTestPlanRemoveInstance(server: McpServer, config: Server if (!instance_path.endsWith('.testinstance')) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('INVALID_PATH', 'instance_path must end with .testinstance', requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify(makeError('INVALID_PATH', 'instance_path must end with .testinstance', requestId)), + }, + ], }; } @@ -452,7 +596,14 @@ export function registerTestPlanRemoveInstance(server: McpServer, config: Server if (!fs.existsSync(absolutePath)) { return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError('FILE_NOT_FOUND', `Instance file not found: ${absolutePath}`, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError('FILE_NOT_FOUND', `Instance file not found: ${absolutePath}`, requestId) + ), + }, + ], }; } @@ -474,7 +625,18 @@ export function registerTestPlanRemoveInstance(server: McpServer, config: Server const error = err as Error & { code?: string }; return { isError: true, - content: [{ type: 'text' as const, text: JSON.stringify(makeError(error instanceof PathPolicyError ? error.code : (error.code ?? 'REMOVE_INSTANCE_ERROR'), error.message, requestId)) }], + content: [ + { + type: 'text' as const, + text: JSON.stringify( + makeError( + error instanceof PathPolicyError ? error.code : error.code ?? 'REMOVE_INSTANCE_ERROR', + error.message, + requestId + ) + ), + }, + ], }; } } diff --git a/src/mcp/tools/testPlanValidate.ts b/src/mcp/tools/testPlanValidate.ts index a272b024..cdb62d64 100644 --- a/src/mcp/tools/testPlanValidate.ts +++ b/src/mcp/tools/testPlanValidate.ts @@ -14,14 +14,15 @@ import { validatePlan, buildHierarchySummary, type TestPlanInput } from './hiera // ── Zod schemas ─────────────────────────────────────────────────────────────── -const testCaseSchema = z.object({ - name: z.string().describe('Test case filename (e.g. CreateAccount.testcase)'), - xml_content: z.string().optional().describe('Full XML content of the test case file'), - xml: z.string().optional().describe('Full XML content (API-compatible alias for xml_content)'), -}).refine( - (d) => d.xml_content !== undefined || d.xml !== undefined, - { message: 'Either xml_content or xml must be provided' } -); +const testCaseSchema = z + .object({ + name: z.string().describe('Test case filename (e.g. CreateAccount.testcase)'), + xml_content: z.string().optional().describe('Full XML content of the test case file'), + xml: z.string().optional().describe('Full XML content (API-compatible alias for xml_content)'), + }) + .refine((d) => d.xml_content !== undefined || d.xml !== undefined, { + message: 'Either xml_content or xml must be provided', + }); const innerSuiteSchema = z.object({ name: z.string().describe('Suite name'), @@ -36,32 +37,64 @@ const suiteSchema = z.object({ test_case_count: z.number().int().min(0).optional().describe('Explicit test case count for size check'), }); -const metadataSchema = z.object({ - objectives: z.string().optional().describe('Testing objectives for this plan (configured in Provar Quality Hub)'), - in_scope: z.string().optional().describe('Features and areas in scope (configured in Provar Quality Hub)'), - testing_methodology: z.string().optional().describe('Testing approach, e.g. risk-based, regression, exploratory (configured in Provar Quality Hub)'), - acceptance_criteria: z.string().optional().describe('Criteria to determine when testing is complete (configured in Provar Quality Hub)'), - acceptable_pass_rate: z.number().min(0).max(100).optional().describe('Minimum pass rate 0-100 for the plan to be considered successful (configured in Provar Quality Hub)'), - environments: z.array(z.string()).optional().describe('Target environments, e.g. ["QA", "Staging", "UAT"] (configured in Provar Quality Hub)'), - test_data_strategy: z.string().optional().describe('How test data will be prepared and cleaned up (configured in Provar Quality Hub)'), - risks: z.string().optional().describe('Identified risks and mitigations (configured in Provar Quality Hub)'), -}).optional().describe('Plan completeness metadata — these fields are configured in the Provar Quality Hub app, not in local project files'); +const metadataSchema = z + .object({ + objectives: z.string().optional().describe('Testing objectives for this plan (configured in Provar Quality Hub)'), + in_scope: z.string().optional().describe('Features and areas in scope (configured in Provar Quality Hub)'), + testing_methodology: z + .string() + .optional() + .describe('Testing approach, e.g. risk-based, regression, exploratory (configured in Provar Quality Hub)'), + acceptance_criteria: z + .string() + .optional() + .describe('Criteria to determine when testing is complete (configured in Provar Quality Hub)'), + acceptable_pass_rate: z + .number() + .min(0) + .max(100) + .optional() + .describe('Minimum pass rate 0-100 for the plan to be considered successful (configured in Provar Quality Hub)'), + environments: z + .array(z.string()) + .optional() + .describe('Target environments, e.g. ["QA", "Staging", "UAT"] (configured in Provar Quality Hub)'), + test_data_strategy: z + .string() + .optional() + .describe('How test data will be prepared and cleaned up (configured in Provar Quality Hub)'), + risks: z.string().optional().describe('Identified risks and mitigations (configured in Provar Quality Hub)'), + }) + .optional() + .describe( + 'Plan completeness metadata — these fields are configured in the Provar Quality Hub app, not in local project files' + ); export function registerTestPlanValidate(server: McpServer): void { server.tool( - 'provar.testplan.validate', + 'provar_testplan_validate', 'Validate a Provar test plan: checks for empty plans, duplicate suite names, oversized plans (>20 suites), plan completeness (objectives, scope, methodology, environments, acceptance criteria, test data strategy, risk assessment), and naming consistency. Recursively validates child suites and test cases. Returns quality score, plan-level violations, and full hierarchy results.', { plan_name: z.string().describe('Name of the test plan'), test_suites: z.array(suiteSchema).optional().describe('Test suites belonging to this plan'), test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this plan (not in a suite)'), - test_suite_count: z.number().int().min(0).optional().describe('Explicit suite count for size check (overrides counting test_suites)'), + test_suite_count: z + .number() + .int() + .min(0) + .optional() + .describe('Explicit suite count for size check (overrides counting test_suites)'), metadata: metadataSchema, - quality_threshold: z.number().min(0).max(100).optional().describe('Minimum quality score for a test case to be considered valid (default: 80)'), + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), }, ({ plan_name, test_suites, test_cases, test_suite_count, metadata, quality_threshold }) => { const requestId = makeRequestId(); - log('info', 'provar.testplan.validate', { requestId, plan_name }); + log('info', 'provar_testplan_validate', { requestId, plan_name }); try { const threshold = quality_threshold ?? 80; @@ -84,7 +117,7 @@ export function registerTestPlanValidate(server: McpServer): void { } catch (err: unknown) { const error = err as Error; const errResult = makeError('VALIDATE_ERROR', error.message, requestId, false); - log('error', 'provar.testplan.validate failed', { requestId, error: error.message }); + log('error', 'provar_testplan_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/mcp/tools/testSuiteValidate.ts b/src/mcp/tools/testSuiteValidate.ts index e1ceea41..71efe879 100644 --- a/src/mcp/tools/testSuiteValidate.ts +++ b/src/mcp/tools/testSuiteValidate.ts @@ -14,19 +14,25 @@ import { validateSuite, buildHierarchySummary, type TestSuiteInput } from './hie // ── Zod schemas ─────────────────────────────────────────────────────────────── -const testCaseSchema = z.object({ - name: z.string().describe('Test case filename (e.g. CreateAccount.testcase)'), - xml_content: z.string().optional().describe('Full XML content of the test case file'), - xml: z.string().optional().describe('Full XML content (API-compatible alias for xml_content)'), -}).refine( - (d) => d.xml_content !== undefined || d.xml !== undefined, - { message: 'Either xml_content or xml must be provided' } -); +const testCaseSchema = z + .object({ + name: z.string().describe('Test case filename (e.g. CreateAccount.testcase)'), + xml_content: z.string().optional().describe('Full XML content of the test case file'), + xml: z.string().optional().describe('Full XML content (API-compatible alias for xml_content)'), + }) + .refine((d) => d.xml_content !== undefined || d.xml !== undefined, { + message: 'Either xml_content or xml must be provided', + }); const innerSuiteSchema = z.object({ name: z.string().describe('Child suite name'), test_cases: z.array(testCaseSchema).optional().describe('Test cases in this child suite'), - test_case_count: z.number().int().min(0).optional().describe('Explicit test case count (overrides counting test_cases)'), + test_case_count: z + .number() + .int() + .min(0) + .optional() + .describe('Explicit test case count (overrides counting test_cases)'), }); const childSuiteSchema = z.object({ @@ -38,18 +44,31 @@ const childSuiteSchema = z.object({ export function registerTestSuiteValidate(server: McpServer): void { server.tool( - 'provar.testsuite.validate', + 'provar_testsuite_validate', 'Validate a Provar test suite: checks for empty suites, duplicate names, oversized suites (>75 tests), and naming convention consistency. Recursively validates child suites and individual test case XML. Returns quality score, suite-level violations, and per-test-case results.', { suite_name: z.string().describe('Name of the test suite'), test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this suite'), - child_suites: z.array(childSuiteSchema).optional().describe('Child test suites (supports up to 2 levels of nesting)'), - test_case_count: z.number().int().min(0).optional().describe('Explicit total test case count for size check (overrides counting test_cases)'), - quality_threshold: z.number().min(0).max(100).optional().describe('Minimum quality score for a test case to be considered valid (default: 80)'), + child_suites: z + .array(childSuiteSchema) + .optional() + .describe('Child test suites (supports up to 2 levels of nesting)'), + test_case_count: z + .number() + .int() + .min(0) + .optional() + .describe('Explicit total test case count for size check (overrides counting test_cases)'), + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), }, ({ suite_name, test_cases, child_suites, test_case_count, quality_threshold }) => { const requestId = makeRequestId(); - log('info', 'provar.testsuite.validate', { requestId, suite_name }); + log('info', 'provar_testsuite_validate', { requestId, suite_name }); try { const threshold = quality_threshold ?? 80; @@ -71,7 +90,7 @@ export function registerTestSuiteValidate(server: McpServer): void { } catch (err: unknown) { const error = err as Error; const errResult = makeError('VALIDATE_ERROR', error.message, requestId, false); - log('error', 'provar.testsuite.validate failed', { requestId, error: error.message }); + log('error', 'provar_testsuite_validate failed', { requestId, error: error.message }); return { isError: true, content: [{ type: 'text' as const, text: JSON.stringify(errResult) }] }; } } diff --git a/src/services/projectValidation.ts b/src/services/projectValidation.ts index e54ccc97..df40ece1 100644 --- a/src/services/projectValidation.ts +++ b/src/services/projectValidation.ts @@ -40,8 +40,8 @@ export class ProjectValidationError extends Error { export interface ProjectValidationOptions { project_path: string; quality_threshold?: number; // default 80 - save_results?: boolean; // default true (any value !== false means save) - results_dir?: string; // default '{project_path}/provardx/validation' + save_results?: boolean; // default true (any value !== false means save) + results_dir?: string; // default '{project_path}/provardx/validation' } export interface ValidatedTestCase { @@ -101,6 +101,8 @@ export interface ProjectValidationResult { uncovered_test_cases: string[]; }; saved_to: string | null; + /** Directories within plans/ that are missing a .planitem file and will be silently ignored by the Provar runner. */ + plan_integrity_warnings?: string[]; /** Set when save_results was requested but the write failed (disk full, permissions, etc.). */ save_error?: string; } @@ -153,7 +155,9 @@ export function resolveProjectRoot(givenPath: string): { root: string; candidate const sub = path.join(givenPath, entry.name); if (fs.existsSync(path.join(sub, '.testproject'))) candidates.push(sub); } - } catch { /* skip */ } + } catch { + /* skip */ + } if (candidates.length === 1) return { root: candidates[0], candidates: [] }; return { root: givenPath, candidates }; // caller handles 0 or multiple @@ -198,7 +202,8 @@ export function readProjectContext(projectPath: string): { const secretsFullPath = path.resolve(path.join(projectPath, secretsRelPath)); const projectPathResolved = path.resolve(projectPath); // Bounds check: only read secrets file if it's within the project directory - const secretsInBounds = secretsFullPath === projectPathResolved || secretsFullPath.startsWith(projectPathResolved + path.sep); + const secretsInBounds = + secretsFullPath === projectPathResolved || secretsFullPath.startsWith(projectPathResolved + path.sep); if (secretsInBounds && fs.existsSync(secretsFullPath)) { try { const secretsContent = fs.readFileSync(secretsFullPath, 'utf-8'); @@ -212,7 +217,9 @@ export function readProjectContext(projectPath: string): { const value = trimmed.slice(eqIdx + 1).trim(); if (value && !value.startsWith('ENC1(')) unencryptedSecretCount++; } - } catch { /* skip */ } + } catch { + /* skip */ + } } return { @@ -256,16 +263,17 @@ function resolveTestInstanceFull(instancePath: string, projectPath: string): Tes // Bounds check: only read test case files within the project directory const tcInBounds = tcFullPath === projResolved || tcFullPath.startsWith(projResolved + path.sep); // Derive name from the bounds-checked resolved path to prevent injection via crafted testCasePath - const tcName = tcInBounds - ? path.basename(tcFullPath, '.testcase') - : path.basename(testCasePath, '.testcase'); + const tcName = tcInBounds ? path.basename(tcFullPath, '.testcase') : path.basename(testCasePath, '.testcase'); if (tcInBounds && fs.existsSync(tcFullPath)) { try { xml_content = fs.readFileSync(tcFullPath, 'utf-8'); - } catch { /* xml_content stays undefined */ } + } catch { + /* xml_content stays undefined */ + } } - return { testCase: { name: tcName, xml_content }, testCasePath, testCaseId }; + // Only expose testCasePath when in-bounds — out-of-bounds paths must not affect coverage totals + return { testCase: { name: tcName, xml_content }, testCasePath: tcInBounds ? testCasePath : null, testCaseId }; } catch { return { testCase: null, testCasePath: null, testCaseId: null }; } @@ -283,7 +291,7 @@ function accumulateCoveredPath( testCasePath: string | null, testCaseId: string | null, coveredPaths: Set, - idMap: Map, + idMap: Map ): void { if (testCasePath) coveredPaths.add(testCasePath); if (testCaseId) { @@ -299,6 +307,7 @@ export function readSuiteDirectory( depth = 0, coveredPaths?: Set, idMap?: Map, + planIntegrityWarnings?: string[] ): TestSuiteInput { const testCases: TestCaseInput[] = []; const testSuites: TestSuiteInput[] = []; @@ -311,14 +320,31 @@ export function readSuiteDirectory( if (entry.name === 'node_modules') continue; const fullPath = path.join(dirPath, entry.name); if (entry.isDirectory() && !entry.name.startsWith('.')) { - testSuites.push(readSuiteDirectory(fullPath, entry.name, projectPath, depth + 1, coveredPaths, idMap)); + const hasPlanItem = fs.existsSync(path.join(fullPath, '.planitem')); + if (!hasPlanItem && planIntegrityWarnings) { + planIntegrityWarnings.push(path.relative(projectPath, fullPath).replace(/\\/g, '/')); + } + // Always recurse for display; only forward coverage state when .planitem is present + testSuites.push( + readSuiteDirectory( + fullPath, + entry.name, + projectPath, + depth + 1, + hasPlanItem ? coveredPaths : undefined, + hasPlanItem ? idMap : undefined, + planIntegrityWarnings + ) + ); } else if (entry.name.endsWith('.testinstance')) { const { testCase, testCasePath, testCaseId } = resolveTestInstanceFull(fullPath, projectPath); if (testCase) testCases.push(testCase); if (coveredPaths && idMap) accumulateCoveredPath(testCasePath, testCaseId, coveredPaths, idMap); } } - } catch { /* skip */ } + } catch { + /* skip */ + } return { name, test_cases: testCases, test_suites: testSuites }; } @@ -329,6 +355,7 @@ export function readPlanDirectory( projectPath: string, coveredPaths?: Set, idMap?: Map, + planIntegrityWarnings?: string[] ): TestPlanInput { const testCases: TestCaseInput[] = []; const testSuites: TestSuiteInput[] = []; @@ -339,22 +366,43 @@ export function readPlanDirectory( if (entry.name === 'node_modules') continue; const fullPath = path.join(planPath, entry.name); if (entry.isDirectory() && !entry.name.startsWith('.')) { - testSuites.push(readSuiteDirectory(fullPath, entry.name, projectPath, 0, coveredPaths, idMap)); + const hasPlanItem = fs.existsSync(path.join(fullPath, '.planitem')); + if (!hasPlanItem && planIntegrityWarnings) { + planIntegrityWarnings.push(path.relative(projectPath, fullPath).replace(/\\/g, '/')); + } + testSuites.push( + readSuiteDirectory( + fullPath, + entry.name, + projectPath, + 0, + hasPlanItem ? coveredPaths : undefined, + hasPlanItem ? idMap : undefined, + planIntegrityWarnings + ) + ); } else if (entry.name.endsWith('.testinstance')) { const { testCase, testCasePath, testCaseId } = resolveTestInstanceFull(fullPath, projectPath); if (testCase) testCases.push(testCase); if (coveredPaths && idMap) accumulateCoveredPath(testCasePath, testCaseId, coveredPaths, idMap); } } - } catch { /* skip */ } + } catch { + /* skip */ + } return { name, test_cases: testCases, test_suites: testSuites }; } -export function readPlansDir(projectPath: string): { plans: TestPlanInput[]; coveredPaths: Set } { +export function readPlansDir(projectPath: string): { + plans: TestPlanInput[]; + coveredPaths: Set; + planIntegrityWarnings: string[]; +} { const plansDir = path.join(projectPath, 'plans'); const coveredPaths = new Set(); - if (!fs.existsSync(plansDir)) return { plans: [], coveredPaths }; + const planIntegrityWarnings: string[] = []; + if (!fs.existsSync(plansDir)) return { plans: [], coveredPaths, planIntegrityWarnings }; // Build UUID→path map once so the plan walk can resolve testCaseId fallbacks // without a separate pass over the tests/ directory later. @@ -366,11 +414,13 @@ export function readPlansDir(projectPath: string): { plans: TestPlanInput[]; cov for (const entry of entries) { if (!entry.isDirectory() || entry.name.startsWith('.') || entry.name === 'node_modules') continue; const planPath = path.join(plansDir, entry.name); - plans.push(readPlanDirectory(planPath, entry.name, projectPath, coveredPaths, idMap)); + plans.push(readPlanDirectory(planPath, entry.name, projectPath, coveredPaths, idMap, planIntegrityWarnings)); } - } catch { /* skip */ } + } catch { + /* skip */ + } - return { plans, coveredPaths }; + return { plans, coveredPaths, planIntegrityWarnings }; } /** @@ -388,19 +438,24 @@ function buildTestCaseIdMap(projectPath: string): Map { for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { if (entry.name.startsWith('.') || entry.name === 'node_modules') continue; const fullPath = path.join(dir, entry.name); - if (entry.isDirectory()) { walk(fullPath); } - else if (entry.name.endsWith('.testcase')) { + if (entry.isDirectory()) { + walk(fullPath); + } else if (entry.name.endsWith('.testcase')) { try { const content = fs.readFileSync(fullPath, 'utf-8'); const rel = path.relative(projectPath, fullPath).replace(/\\/g, '/'); for (const attr of ['registryId', 'id', 'guid'] as const) { const m = content.match(new RegExp(`${attr}=["']([^"']+)["']`)); - if (m?.[1] && !idMap.has(m[1])) idMap.set(m[1], rel); + if (m?.[1] && m[1] !== '1' && !idMap.has(m[1])) idMap.set(m[1], rel); } - } catch { /* skip */ } + } catch { + /* skip */ + } } } - } catch { /* skip */ } + } catch { + /* skip */ + } } walk(testsDir); return idMap; @@ -420,10 +475,13 @@ export function collectAllTestCaseNames(projectPath: string): string[] { try { for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { if (entry.name.startsWith('.') || entry.name === 'node_modules') continue; - if (entry.isDirectory()) { walk(path.join(dir, entry.name)); } - else if (entry.name.endsWith('.testcase')) names.push(path.basename(entry.name, '.testcase')); + if (entry.isDirectory()) { + walk(path.join(dir, entry.name)); + } else if (entry.name.endsWith('.testcase')) names.push(path.basename(entry.name, '.testcase')); } - } catch { /* skip */ } + } catch { + /* skip */ + } } walk(testsDir); return names; @@ -446,8 +504,9 @@ export function collectCoveredPathsFromDisk(projectPath: string): Set { for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { if (entry.name === 'node_modules') continue; const fullPath = path.join(dir, entry.name); - if (entry.isDirectory()) { walk(fullPath); } - else if (entry.name.endsWith('.testinstance')) { + if (entry.isDirectory()) { + walk(fullPath); + } else if (entry.name.endsWith('.testinstance')) { try { const content = fs.readFileSync(fullPath, 'utf-8'); // Primary: path-based match @@ -459,10 +518,14 @@ export function collectCoveredPathsFromDisk(projectPath: string): Set { const resolvedPath = idMap.get(idM[1]); if (resolvedPath) covered.add(resolvedPath); } - } catch { /* skip */ } + } catch { + /* skip */ + } } } - } catch { /* skip */ } + } catch { + /* skip */ + } } walk(plansDir); return covered; @@ -478,13 +541,16 @@ export function findUncoveredTestCases(projectPath: string, coveredPaths: Set, projectName: string ): string { - const targetDir = resultsDir - ? path.resolve(resultsDir) - : path.join(projectPath, 'provardx', 'validation'); + const targetDir = resultsDir ? path.resolve(resultsDir) : path.join(projectPath, 'provardx', 'validation'); fs.mkdirSync(targetDir, { recursive: true }); @@ -719,9 +789,7 @@ export function saveResults( * ambiguous project, not a Provar project directory). Lets unexpected I/O * errors propagate as-is. */ -export function validateProjectFromPath( - options: ProjectValidationOptions -): ProjectValidationResult { +export function validateProjectFromPath(options: ProjectValidationOptions): ProjectValidationResult { const { project_path, quality_threshold, save_results, results_dir } = options; const resolved = path.resolve(project_path); @@ -734,7 +802,9 @@ export function validateProjectFromPath( if (candidates.length > 1) { throw new ProjectValidationError( 'AMBIGUOUS_PROJECT', - `Multiple Provar projects found under "${resolved}". Specify the exact project directory: ${candidates.map((c) => path.basename(c)).join(', ')}` + `Multiple Provar projects found under "${resolved}". Specify the exact project directory: ${candidates + .map((c) => path.basename(c)) + .join(', ')}` ); } @@ -752,7 +822,7 @@ export function validateProjectFromPath( // 2. Read plan hierarchy from plans/ directory; covered paths are computed // as a byproduct of the walk — no second traversal needed. - const { plans: testPlans, coveredPaths } = readPlansDir(projectRoot); + const { plans: testPlans, coveredPaths, planIntegrityWarnings } = readPlansDir(projectRoot); // 3. Validate const input: ProjectInput = { @@ -837,6 +907,7 @@ export function validateProjectFromPath( uncovered_test_cases: uncoveredTestCases, }, saved_to: savedTo, + ...(planIntegrityWarnings.length > 0 ? { plan_integrity_warnings: planIntegrityWarnings } : {}), save_error: saveError, }; } diff --git a/test/unit/mcp/antTools.test.ts b/test/unit/mcp/antTools.test.ts index 65fdce4e..3b76f5f4 100644 --- a/test/unit/mcp/antTools.test.ts +++ b/test/unit/mcp/antTools.test.ts @@ -104,12 +104,12 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.ant.generate ──────────────────────────────────────────────────────── +// ── provar_ant_generate ──────────────────────────────────────────────────────── -describe('provar.ant.generate', () => { +describe('provar_ant_generate', () => { describe('dry_run', () => { it('returns xml_content without writing to disk', () => { - const result = server.call('provar.ant.generate', minimalInput()); + const result = server.call('provar_ant_generate', minimalInput()); assert.equal(isError(result), false); const body = parseText(result); @@ -120,7 +120,7 @@ describe('provar.ant.generate', () => { it('does NOT write a file even when output_path is provided', () => { const outPath = path.join(tmpDir, 'build.xml'); - server.call('provar.ant.generate', minimalInput({ output_path: outPath, dry_run: true })); + server.call('provar_ant_generate', minimalInput({ output_path: outPath, dry_run: true })); assert.equal(fs.existsSync(outPath), false, 'file must not be written in dry_run mode'); }); @@ -128,7 +128,7 @@ describe('provar.ant.generate', () => { describe('generated XML structure', () => { function getXml(overrides: Record = {}): string { - const result = server.call('provar.ant.generate', minimalInput(overrides)); + const result = server.call('provar_ant_generate', minimalInput(overrides)); return parseText(result)['xml_content'] as string; } @@ -337,7 +337,7 @@ describe('provar.ant.generate', () => { describe('writing to disk', () => { it('writes file when dry_run=false and output_path provided', () => { const outPath = path.join(tmpDir, 'build.xml'); - const result = server.call('provar.ant.generate', minimalInput({ output_path: outPath, dry_run: false })); + const result = server.call('provar_ant_generate', minimalInput({ output_path: outPath, dry_run: false })); assert.equal(isError(result), false); assert.equal(fs.existsSync(outPath), true, 'file should be written'); @@ -346,14 +346,14 @@ describe('provar.ant.generate', () => { it('written file contains valid XML with root', () => { const outPath = path.join(tmpDir, 'build.xml'); - server.call('provar.ant.generate', minimalInput({ output_path: outPath, dry_run: false })); + server.call('provar_ant_generate', minimalInput({ output_path: outPath, dry_run: false })); const content = fs.readFileSync(outPath, 'utf-8'); assert.ok(content.includes(''); }); it('does NOT write when dry_run=false but no output_path', () => { - const result = server.call('provar.ant.generate', minimalInput({ dry_run: false, output_path: undefined })); + const result = server.call('provar_ant_generate', minimalInput({ dry_run: false, output_path: undefined })); assert.equal(isError(result), false); assert.equal(parseText(result)['written'], false); @@ -364,7 +364,7 @@ describe('provar.ant.generate', () => { fs.writeFileSync(outPath, '', 'utf-8'); const result = server.call( - 'provar.ant.generate', + 'provar_ant_generate', minimalInput({ output_path: outPath, dry_run: false, overwrite: false }) ); @@ -377,7 +377,7 @@ describe('provar.ant.generate', () => { fs.writeFileSync(outPath, '', 'utf-8'); const result = server.call( - 'provar.ant.generate', + 'provar_ant_generate', minimalInput({ output_path: outPath, dry_run: false, overwrite: true }) ); @@ -388,7 +388,7 @@ describe('provar.ant.generate', () => { it('creates parent directories as needed', () => { const outPath = path.join(tmpDir, 'ANT', 'build.xml'); - server.call('provar.ant.generate', minimalInput({ output_path: outPath, dry_run: false })); + server.call('provar_ant_generate', minimalInput({ output_path: outPath, dry_run: false })); assert.equal(fs.existsSync(outPath), true, 'nested directory should be created'); }); @@ -411,7 +411,7 @@ describe('provar.ant.generate', () => { registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); const result = strictServer.call( - 'provar.ant.generate', + 'provar_ant_generate', strictInput({ output_path: path.join(os.tmpdir(), 'evil-build.xml'), dry_run: false, @@ -431,7 +431,7 @@ describe('provar.ant.generate', () => { // Input paths are within tmpDir; only the output_path is outside — but dry_run // skips the write so the output_path should not be validated. const result = strictServer.call( - 'provar.ant.generate', + 'provar_ant_generate', strictInput({ output_path: '/etc/evil-build.xml', dry_run: true }) ); @@ -443,7 +443,7 @@ describe('provar.ant.generate', () => { registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); const result = strictServer.call( - 'provar.ant.generate', + 'provar_ant_generate', strictInput({ provar_home: path.join(os.tmpdir(), 'evil-provar'), dry_run: true }) ); @@ -456,7 +456,7 @@ describe('provar.ant.generate', () => { const strictServer = new MockMcpServer(); registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.ant.generate', strictInput({ project_path: '../evil', dry_run: true })); + const result = strictServer.call('provar_ant_generate', strictInput({ project_path: '../evil', dry_run: true })); assert.equal(isError(result), true); assert.equal(parseText(result)['error_code'], 'PATH_TRAVERSAL'); @@ -467,7 +467,7 @@ describe('provar.ant.generate', () => { registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); const result = strictServer.call( - 'provar.ant.generate', + 'provar_ant_generate', strictInput({ results_path: path.join(os.tmpdir(), 'evil-results'), dry_run: true }) ); @@ -481,7 +481,7 @@ describe('provar.ant.generate', () => { registerAntGenerate(strictServer as never, { allowedPaths: [tmpDir] }); const result = strictServer.call( - 'provar.ant.generate', + 'provar_ant_generate', strictInput({ license_path: path.join(os.tmpdir(), 'evil-licenses'), dry_run: true }) ); @@ -769,7 +769,7 @@ describe('validateAntXml', () => { describe('round-trip: generate then validate', () => { it('XML produced by the generator passes validation', () => { // Use the mock server to generate, then validate the output - const result = server.call('provar.ant.generate', minimalInput()); + const result = server.call('provar_ant_generate', minimalInput()); const xml = parseText(result)['xml_content'] as string; const validation = validateAntXml(xml); diff --git a/test/unit/mcp/automationTools.test.ts b/test/unit/mcp/automationTools.test.ts index 110da3ff..48979008 100644 --- a/test/unit/mcp/automationTools.test.ts +++ b/test/unit/mcp/automationTools.test.ts @@ -90,12 +90,12 @@ describe('automationTools', () => { sinon.restore(); }); - // ── provar.automation.testrun ───────────────────────────────────────────── + // ── provar_automation_testrun ───────────────────────────────────────────── - describe('provar.automation.testrun', () => { + describe('provar_automation_testrun', () => { it('calls sf with correct args and returns stdout', () => { spawnStub.returns(makeSpawnResult('tests passed', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); assert.equal(body.stdout, 'tests passed'); @@ -106,7 +106,7 @@ describe('automationTools', () => { it('forwards extra flags to sf', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.testrun', { flags: ['--project-path', '/my/project'] }); + server.call('provar_automation_testrun', { flags: ['--project-path', '/my/project'] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('--project-path')); assert.ok(args.includes('/my/project')); @@ -114,7 +114,7 @@ describe('automationTools', () => { it('returns isError and AUTOMATION_TESTRUN_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'compilation error', 1)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body.error_code, 'AUTOMATION_TESTRUN_FAILED'); @@ -123,14 +123,14 @@ describe('automationTools', () => { it('uses stdout as message when stderr is empty', () => { spawnStub.returns(makeSpawnResult('test failed: assertion error', '', 1)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.message, 'test failed: assertion error'); }); it('returns SF_NOT_FOUND on ENOENT with actionable message', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body.error_code, 'SF_NOT_FOUND'); @@ -144,7 +144,7 @@ describe('automationTools', () => { 'Tests: 5 passed, 0 failed', ].join('\n'); spawnStub.returns(makeSpawnResult(noisy, '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.ok(!(body.stdout as string).includes('networknt'), 'Filtered stdout should not contain schema noise'); assert.ok((body.stdout as string).includes('Tests: 5 passed'), 'Real output should remain'); @@ -153,7 +153,7 @@ describe('automationTools', () => { it('does not set output_lines_suppressed when stdout has no noise', () => { spawnStub.returns(makeSpawnResult('Tests: 3 passed, 0 failed', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.output_lines_suppressed, undefined, 'output_lines_suppressed should be absent'); }); @@ -175,7 +175,7 @@ describe('automationTools', () => { try { spawnStub.returns(makeSpawnResult('tests done', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.ok(Array.isArray(body.steps), 'steps should be an array'); const steps = body.steps as Array<{ testItemId: string; title: string; status: string; errorMessage?: string }>; @@ -199,7 +199,7 @@ describe('automationTools', () => { try { spawnStub.returns(makeSpawnResult('tests done', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.steps, undefined, 'steps should be absent when no XML found'); assert.ok(body.details, 'details should be present'); @@ -218,7 +218,7 @@ describe('automationTools', () => { try { spawnStub.returns(makeSpawnResult('tests done', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.steps, undefined, 'steps should be absent when XML is malformed'); assert.ok(body.details, 'details should be present with warning'); @@ -229,12 +229,12 @@ describe('automationTools', () => { }); }); - // ── provar.automation.compile ───────────────────────────────────────────── + // ── provar_automation_compile ───────────────────────────────────────────── - describe('provar.automation.compile', () => { + describe('provar_automation_compile', () => { it('calls sf with project compile args', () => { spawnStub.returns(makeSpawnResult('compiled ok', '', 0)); - const result = server.call('provar.automation.compile', { flags: [] }); + const result = server.call('provar_automation_compile', { flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); const args = spawnStub.firstCall.args[1] as string[]; @@ -243,28 +243,28 @@ describe('automationTools', () => { it('forwards project-path flag', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.compile', { flags: ['--project-path', '/my/project'] }); + server.call('provar_automation_compile', { flags: ['--project-path', '/my/project'] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('/my/project')); }); it('returns AUTOMATION_COMPILE_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'syntax error in TestCase.testcase', 1)); - const result = server.call('provar.automation.compile', { flags: [] }); + const result = server.call('provar_automation_compile', { flags: [] }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'AUTOMATION_COMPILE_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.compile', { flags: [] }); + const result = server.call('provar_automation_compile', { flags: [] }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); }); - // ── provar.automation.setup ─────────────────────────────────────────────── + // ── provar_automation_setup ─────────────────────────────────────────────── - describe('provar.automation.setup', () => { + describe('provar_automation_setup', () => { let existsStub: sinon.SinonStub; let readdirStub: sinon.SinonStub; let readFileStub: sinon.SinonStub; @@ -302,7 +302,7 @@ describe('automationTools', () => { it('returns already_installed with install path when local ProvarHome exists', () => { makeValidInstall(localProvarHome); - const result = server.call('provar.automation.setup', { force: false }); + const result = server.call('provar_automation_setup', { force: false }); const body = parseBody(result); assert.ok(!isError(result)); @@ -316,7 +316,7 @@ describe('automationTools', () => { makeValidInstall(localProvarHome); readFileStub.withArgs(path.join(path.resolve(localProvarHome), 'version.txt'), 'utf-8').returns('2.12.0\n'); - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); assert.equal(body.version, '2.12.0'); }); @@ -325,7 +325,7 @@ describe('automationTools', () => { makeValidInstall(localProvarHome); // readFileStub already throws for all paths by default - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); assert.equal(body.version, null); }); @@ -335,7 +335,7 @@ describe('automationTools', () => { process.env['PROVAR_HOME'] = envPath; makeValidInstall(envPath); - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); const installs = body.installations as Array<{ source: string; path: string }>; assert.equal(body.already_installed, true); @@ -346,7 +346,7 @@ describe('automationTools', () => { process.env['PROVAR_HOME'] = localProvarHome; // same as CWD-relative ProvarHome makeValidInstall(localProvarHome); - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); const installs = body.installations as unknown[]; assert.equal(installs.length, 1); @@ -358,7 +358,7 @@ describe('automationTools', () => { makeValidInstall(envPath); makeValidInstall(localProvarHome); - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); const installs = body.installations as unknown[]; assert.equal(installs.length, 2); @@ -368,7 +368,7 @@ describe('automationTools', () => { makeValidInstall(localProvarHome); spawnStub.returns(makeSpawnResult('setup complete', '', 0)); - const body = parseBody(server.call('provar.automation.setup', { force: true })); + const body = parseBody(server.call('provar_automation_setup', { force: true })); assert.equal(body.already_installed, false); assert.equal(body.forced, true); @@ -392,7 +392,7 @@ describe('automationTools', () => { return makeSpawnResult('Provar downloaded successfully', '', 0); }); - const result = server.call('provar.automation.setup', {}); + const result = server.call('provar_automation_setup', {}); const body = parseBody(result); assert.ok(!isError(result)); @@ -405,7 +405,7 @@ describe('automationTools', () => { it('forwards --version flag to sf when version is specified', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.setup', { version: '2.10.0' }); + server.call('provar_automation_setup', { version: '2.10.0' }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('--version')); @@ -415,7 +415,7 @@ describe('automationTools', () => { it('does not forward --version flag when version is omitted', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.setup', {}); + server.call('provar_automation_setup', {}); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(!args.includes('--version')); @@ -424,7 +424,7 @@ describe('automationTools', () => { it('returns AUTOMATION_SETUP_FAILED when sf exits non-zero', () => { spawnStub.returns(makeSpawnResult('', 'Provided version is not a valid version.', 1)); - const result = server.call('provar.automation.setup', { version: '0.0.0' }); + const result = server.call('provar_automation_setup', { version: '0.0.0' }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'AUTOMATION_SETUP_FAILED'); @@ -434,7 +434,7 @@ describe('automationTools', () => { it('uses stdout as error message when stderr is empty', () => { spawnStub.returns(makeSpawnResult('Network timeout', '', 1)); - const body = parseBody(server.call('provar.automation.setup', {})); + const body = parseBody(server.call('provar_automation_setup', {})); assert.equal(body.message, 'Network timeout'); }); @@ -442,7 +442,7 @@ describe('automationTools', () => { it('returns SF_NOT_FOUND when sf CLI is not installed', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.setup', {}); + const result = server.call('provar_automation_setup', {}); assert.ok(isError(result)); const body = parseBody(result); @@ -467,7 +467,7 @@ describe('automationTools', () => { readdirStub.withArgs(base).returns(['Provar 2.12.0', 'SomeOtherApp']); makeValidInstall(provarDir); - const body = parseBody(server.call('provar.automation.setup', { force: false })); + const body = parseBody(server.call('provar_automation_setup', { force: false })); const installs = body.installations as Array<{ source: string; path: string }>; assert.ok( @@ -490,19 +490,19 @@ describe('automationTools', () => { readdirStub.withArgs(base).returns(['Chrome', 'Firefox', 'NodeJS']); // None of these have provardx.jar - const result = server.call('provar.automation.setup', { force: false }); + const result = server.call('provar_automation_setup', { force: false }); // No existing installs found → sf should be called assert.ok(spawnStub.calledOnce || isError(result)); // either calls sf or errors (ENOENT if sf not found) }); }); - // ── provar.automation.metadata.download ────────────────────────────────── + // ── provar_automation_metadata_download ────────────────────────────────── - describe('provar.automation.metadata.download', () => { + describe('provar_automation_metadata_download', () => { it('calls sf with metadata download args', () => { spawnStub.returns(makeSpawnResult('downloaded', '', 0)); - const result = server.call('provar.automation.metadata.download', { flags: [] }); + const result = server.call('provar_automation_metadata_download', { flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); const args = spawnStub.firstCall.args[1] as string[]; @@ -511,7 +511,7 @@ describe('automationTools', () => { it('forwards --target-org flag', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.metadata.download', { flags: ['--target-org', 'myorg'] }); + server.call('provar_automation_metadata_download', { flags: ['--target-org', 'myorg'] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('--target-org')); assert.ok(args.includes('myorg')); @@ -519,20 +519,20 @@ describe('automationTools', () => { it('returns AUTOMATION_METADATA_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'auth failed', 1)); - const result = server.call('provar.automation.metadata.download', { flags: [] }); + const result = server.call('provar_automation_metadata_download', { flags: [] }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'AUTOMATION_METADATA_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.metadata.download', { flags: [] }); + const result = server.call('provar_automation_metadata_download', { flags: [] }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); it('includes suggestion in details when [DOWNLOAD_ERROR] is in the message', () => { spawnStub.returns(makeSpawnResult('', 'Error (1): [DOWNLOAD_ERROR] ERROR\n', 1)); - const result = server.call('provar.automation.metadata.download', { flags: ['-c', 'MyOrg'] }); + const result = server.call('provar_automation_metadata_download', { flags: ['-c', 'MyOrg'] }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body.error_code, 'AUTOMATION_METADATA_FAILED'); @@ -544,7 +544,7 @@ describe('automationTools', () => { it('does NOT include suggestion for other failure messages', () => { spawnStub.returns(makeSpawnResult('', 'Error (2): Nonexistent flag: --properties-file\n', 1)); - const result = server.call('provar.automation.metadata.download', { flags: [] }); + const result = server.call('provar_automation_metadata_download', { flags: [] }); assert.ok(isError(result)); const body = parseBody(result); assert.ok( @@ -554,12 +554,12 @@ describe('automationTools', () => { }); }); - // ── provar.automation.config.load ───────────────────────────────────────── + // ── provar_automation_config_load ───────────────────────────────────────── - describe('provar.automation.config.load', () => { + describe('provar_automation_config_load', () => { it('calls sf with config load args and the given properties_path', () => { spawnStub.returns(makeSpawnResult('', '', 0)); - server.call('provar.automation.config.load', { properties_path: '/my/project/provardx-properties.json' }); + server.call('provar_automation_config_load', { properties_path: '/my/project/provardx-properties.json' }); const [cmd, args] = spawnStub.firstCall.args as [string, string[]]; assert.equal(cmd, 'sf'); assert.deepEqual(args, [ @@ -574,7 +574,7 @@ describe('automationTools', () => { it('returns properties_path in the response', () => { spawnStub.returns(makeSpawnResult('Config loaded', '', 0)); - const result = server.call('provar.automation.config.load', { + const result = server.call('provar_automation_config_load', { properties_path: '/my/project/provardx-properties.json', }); assert.ok(!isError(result)); @@ -584,14 +584,14 @@ describe('automationTools', () => { it('returns AUTOMATION_CONFIG_LOAD_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'INVALID_PATH', 1)); - const result = server.call('provar.automation.config.load', { properties_path: '/missing.json' }); + const result = server.call('provar_automation_config_load', { properties_path: '/missing.json' }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'AUTOMATION_CONFIG_LOAD_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.config.load', { + const result = server.call('provar_automation_config_load', { properties_path: '/my/project/provardx-properties.json', }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); @@ -599,7 +599,7 @@ describe('automationTools', () => { it('uses the explicit sf_path when provided', () => { spawnStub.returns(makeSpawnResult('', '', 0)); - server.call('provar.automation.config.load', { properties_path: '/proj/props.json', sf_path: '/custom/bin/sf' }); + server.call('provar_automation_config_load', { properties_path: '/proj/props.json', sf_path: '/custom/bin/sf' }); const [cmd] = spawnStub.firstCall.args as [string, string[]]; assert.equal(cmd, '/custom/bin/sf'); }); @@ -618,7 +618,7 @@ describe('automationTools', () => { }); it('rejects properties_path outside allowed paths', () => { - const result = restrictedServer.call('provar.automation.config.load', { + const result = restrictedServer.call('provar_automation_config_load', { properties_path: '/etc/passwd', }); assert.ok(isError(result)); @@ -629,7 +629,7 @@ describe('automationTools', () => { it('rejects properties_path with .. traversal', () => { // Use string concatenation (not path.join) so the ".." segment is preserved // in the raw string that assertPathAllowed inspects. - const result = restrictedServer.call('provar.automation.config.load', { + const result = restrictedServer.call('provar_automation_config_load', { properties_path: allowedDir + '/../etc/passwd', }); assert.ok(isError(result)); @@ -640,7 +640,7 @@ describe('automationTools', () => { it('allows properties_path within allowed paths', () => { spawnStub.returns(makeSpawnResult('', '', 0)); const allowed = path.join(allowedDir, 'provardx-properties.json'); - const result = restrictedServer.call('provar.automation.config.load', { + const result = restrictedServer.call('provar_automation_config_load', { properties_path: allowed, }); assert.ok(!isError(result)); @@ -693,7 +693,7 @@ describe('automationTools', () => { it('passes shell: true when sf_path is a .cmd file', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.compile', { flags: [], sf_path: 'C:\\npm\\sf.cmd' }); + server.call('provar_automation_compile', { flags: [], sf_path: 'C:\\npm\\sf.cmd' }); const opts = spawnStub.firstCall.args[2] as { shell: boolean }; assert.ok(opts.shell === true, 'shell should be true for a .cmd executable'); }); @@ -702,21 +702,21 @@ describe('automationTools', () => { // .exe has an explicit extension that is neither .cmd nor .bat, so no // shell is required even on Windows. spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.compile', { flags: [], sf_path: 'C:\\Program Files\\sf.exe' }); + server.call('provar_automation_compile', { flags: [], sf_path: 'C:\\Program Files\\sf.exe' }); const opts = spawnStub.firstCall.args[2] as { shell: boolean }; assert.ok(opts.shell === false, 'shell should be false for a .exe executable'); }); it('passes shell: false for a .js node script path', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.testrun', { flags: [], sf_path: 'C:\\npm\\sf.js' }); + server.call('provar_automation_testrun', { flags: [], sf_path: 'C:\\npm\\sf.js' }); const opts = spawnStub.firstCall.args[2] as { shell: boolean }; assert.ok(opts.shell === false, 'shell should be false for a .js script'); }); it('passes shell: true when sf_path is a .bat file', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.testrun', { flags: [], sf_path: 'C:\\tools\\sf.bat' }); + server.call('provar_automation_testrun', { flags: [], sf_path: 'C:\\tools\\sf.bat' }); const opts = spawnStub.firstCall.args[2] as { shell: boolean }; assert.ok(opts.shell === true, 'shell should be true for a .bat executable'); }); @@ -739,7 +739,7 @@ describe('automationTools', () => { it('phase 1 uses shell:false for the --version probe', () => { spawnStub.onFirstCall().returns(makeSpawnResult('sf/2.0.0 linux-x64 node-v18', '', 0)); // probe spawnStub.onSecondCall().returns(makeSpawnResult('testrun ok', '', 0)); // actual command - server.call('provar.automation.testrun', { flags: [] }); + server.call('provar_automation_testrun', { flags: [] }); const probeArgs = spawnStub.firstCall.args as [string, string[], { shell: boolean }]; assert.deepEqual(probeArgs[1], ['--version']); assert.equal(probeArgs[2].shell, false); @@ -748,7 +748,7 @@ describe('automationTools', () => { it('phase 1 success — does not attempt phase 2', () => { spawnStub.onFirstCall().returns(makeSpawnResult('sf/2.0.0', '', 0)); // probe succeeds spawnStub.onSecondCall().returns(makeSpawnResult('ok', '', 0)); // actual command - server.call('provar.automation.testrun', { flags: [] }); + server.call('provar_automation_testrun', { flags: [] }); // Exactly 2 spawns: phase 1 probe + actual command; no phase 2 retry const versionProbes = Array.from({ length: spawnStub.callCount }, (_, i) => spawnStub.getCall(i)).filter((c) => (c.args[1] as string[]).includes('--version') @@ -761,7 +761,7 @@ describe('automationTools', () => { spawnStub.onFirstCall().returns(makeEnoentResult()); // phase 1 ENOENT spawnStub.onSecondCall().returns(makeSpawnResult('sf/2.0.0 win32', '', 0)); // phase 2 success spawnStub.onThirdCall().returns(makeSpawnResult('testrun ok', '', 0)); // actual command - server.call('provar.automation.testrun', { flags: [] }); + server.call('provar_automation_testrun', { flags: [] }); assert.ok(spawnStub.callCount >= 2, 'phase 2 probe should have been called'); const phase2Args = spawnStub.secondCall.args as [string, string[], { shell: boolean }]; assert.deepEqual(phase2Args[1], ['--version']); @@ -773,7 +773,7 @@ describe('automationTools', () => { spawnStub.onFirstCall().returns(makeEnoentResult()); // probe ENOENT // Safe default for any subsequent calls (e.g. if sf is found via common-path fallback) spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.testrun', { flags: [] }); + server.call('provar_automation_testrun', { flags: [] }); // Only one --version probe; no shell:true retry on non-win32 const versionProbes = Array.from({ length: spawnStub.callCount }, (_, i) => spawnStub.getCall(i)).filter((c) => (c.args[1] as string[]).includes('--version') @@ -796,21 +796,21 @@ describe('automationTools', () => { it('rejects sf_path with & when the path requires shell (extensionless on win32)', () => { // 'sf&evil' has no extension → needsWindowsShell returns true → assertShellSafePath rejects it - const result = server.call('provar.automation.testrun', { flags: [], sf_path: 'sf&evil' }); + const result = server.call('provar_automation_testrun', { flags: [], sf_path: 'sf&evil' }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'INVALID_SF_PATH'); assert.ok(spawnStub.notCalled, 'spawnSync should not be called when path is rejected'); }); it('rejects sf_path with | when the path requires shell', () => { - const result = server.call('provar.automation.testrun', { flags: [], sf_path: 'sf|evil' }); + const result = server.call('provar_automation_testrun', { flags: [], sf_path: 'sf|evil' }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'INVALID_SF_PATH'); }); it('accepts a clean Windows .cmd path', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - const result = server.call('provar.automation.testrun', { flags: [], sf_path: 'C:\\npm\\sf.cmd' }); + const result = server.call('provar_automation_testrun', { flags: [], sf_path: 'C:\\npm\\sf.cmd' }); assert.ok(!isError(result)); }); @@ -818,7 +818,7 @@ describe('automationTools', () => { setSfPlatformForTesting('linux'); spawnStub.returns(makeSpawnResult('ok', '', 0)); // On linux needsWindowsShell returns false → assertShellSafePath is never called - const result = server.call('provar.automation.testrun', { flags: [], sf_path: '/usr/bin/sf' }); + const result = server.call('provar_automation_testrun', { flags: [], sf_path: '/usr/bin/sf' }); assert.ok(!isError(result)); }); }); @@ -828,19 +828,19 @@ describe('automationTools', () => { describe('sf_path explicit executable', () => { it('testrun uses sf_path as the executable', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.testrun', { flags: [], sf_path: '/opt/sf/bin/sf' }); + server.call('provar_automation_testrun', { flags: [], sf_path: '/opt/sf/bin/sf' }); assert.equal(spawnStub.firstCall.args[0], '/opt/sf/bin/sf'); }); it('compile uses sf_path as the executable', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.automation.compile', { flags: [], sf_path: '/opt/sf/bin/sf' }); + server.call('provar_automation_compile', { flags: [], sf_path: '/opt/sf/bin/sf' }); assert.equal(spawnStub.firstCall.args[0], '/opt/sf/bin/sf'); }); it('SF_NOT_FOUND message names the explicit path when sf_path is provided and missing', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.compile', { flags: [], sf_path: '/missing/sf' }); + const result = server.call('provar_automation_compile', { flags: [], sf_path: '/missing/sf' }); const body = parseBody(result); assert.equal(body.error_code, 'SF_NOT_FOUND'); assert.ok((body.message as string).includes('/missing/sf'), 'message should name the explicit path'); @@ -848,7 +848,7 @@ describe('automationTools', () => { it('SF_NOT_FOUND message mentions sf_path option when no explicit path was given', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.automation.testrun', { flags: [] }); + const result = server.call('provar_automation_testrun', { flags: [] }); const body = parseBody(result); assert.equal(body.error_code, 'SF_NOT_FOUND'); assert.ok((body.message as string).includes('sf_path'), 'message should hint at sf_path parameter'); @@ -895,11 +895,11 @@ describe('filterTestRunOutput', () => { assert.equal(suppressed, 3); }); - it('appends suppressed-count note referencing provar.testrun.rca', () => { + it('appends suppressed-count note referencing provar_testrun_rca', () => { const raw = 'com.networknt.schema.SchemaLoader\nINFO Done'; const { filtered } = filterTestRunOutput(raw); assert.ok(filtered.includes('lines suppressed'), 'Should append suppressed note'); - assert.ok(filtered.includes('provar.testrun.rca'), 'Should mention rca tool'); + assert.ok(filtered.includes('provar_testrun_rca'), 'Should mention rca tool'); }); it('does not append note when nothing was suppressed', () => { diff --git a/test/unit/mcp/connectionTools.test.ts b/test/unit/mcp/connectionTools.test.ts index 4da062fb..5021563b 100644 --- a/test/unit/mcp/connectionTools.test.ts +++ b/test/unit/mcp/connectionTools.test.ts @@ -96,13 +96,13 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.connection.list ──────────────────────────────────────────────────── +// ── provar_connection_list ──────────────────────────────────────────────────── -describe('provar.connection.list', () => { +describe('provar_connection_list', () => { describe('happy path', () => { it('returns connections array with name, type, and url', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); assert.equal(isError(result), false); const body = parseText(result); @@ -113,7 +113,7 @@ describe('provar.connection.list', () => { it('maps sf class to Salesforce type', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const connections = parseText(result)['connections'] as Array>; const sfConns = connections.filter((c) => c['type'] === 'Salesforce'); assert.equal(sfConns.length, 2, 'Expected 2 Salesforce connections'); @@ -122,7 +122,7 @@ describe('provar.connection.list', () => { it('maps ui class to Web type', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const connections = parseText(result)['connections'] as Array>; const webConns = connections.filter((c) => c['type'] === 'Web'); assert.equal(webConns.length, 1); @@ -131,7 +131,7 @@ describe('provar.connection.list', () => { it('marks sso class connections as sso_configured=true', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const connections = parseText(result)['connections'] as Array>; const ssoConn = connections.find((c) => c['name'] === 'OktaSso'); assert.ok(ssoConn, 'Expected OktaSso connection'); @@ -140,7 +140,7 @@ describe('provar.connection.list', () => { it('marks non-sso connections as sso_configured=false', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const connections = parseText(result)['connections'] as Array>; const sfConn = connections.find((c) => c['name'] === 'MyOrg'); assert.ok(sfConn); @@ -149,7 +149,7 @@ describe('provar.connection.list', () => { it('returns environments with name, connection, and url', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const environments = parseText(result)['environments'] as Array>; assert.ok(Array.isArray(environments)); assert.equal(environments.length, 2); @@ -161,7 +161,7 @@ describe('provar.connection.list', () => { it('returns environment without url when not present', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const environments = parseText(result)['environments'] as Array>; const uat = environments.find((e) => e['name'] === 'UAT'); assert.ok(uat); @@ -170,7 +170,7 @@ describe('provar.connection.list', () => { it('returns summary with correct counts', () => { writeTestProject(tmpDir, BASIC_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const summary = parseText(result)['summary'] as Record; assert.equal(summary['connection_count'], 4); assert.equal(summary['environment_count'], 2); @@ -178,7 +178,7 @@ describe('provar.connection.list', () => { it('returns empty arrays for project with no connections or environments', () => { writeTestProject(tmpDir, EMPTY_TEST_PROJECT); - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); assert.equal(isError(result), false); const body = parseText(result); assert.deepEqual(body['connections'], []); @@ -188,14 +188,14 @@ describe('provar.connection.list', () => { describe('error cases', () => { it('returns CONNECTION_FILE_NOT_FOUND when .testproject is missing', () => { - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); assert.equal(isError(result), true); const body = parseText(result); assert.equal(body['error_code'], 'CONNECTION_FILE_NOT_FOUND'); }); it('CONNECTION_FILE_NOT_FOUND includes a suggestion', () => { - const result = server.call('provar.connection.list', { project_path: tmpDir }); + const result = server.call('provar_connection_list', { project_path: tmpDir }); const body = parseText(result); const details = body['details'] as Record; assert.ok(details?.['suggestion'], 'Expected suggestion in details'); @@ -204,7 +204,7 @@ describe('provar.connection.list', () => { it('returns PATH_NOT_ALLOWED when project_path is outside allowed paths', () => { const strictServer = new MockMcpServer(); registerConnectionList(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.connection.list', { + const result = strictServer.call('provar_connection_list', { project_path: path.join(os.tmpdir(), 'some-other-project'), }); assert.equal(isError(result), true); @@ -214,7 +214,7 @@ describe('provar.connection.list', () => { it('returns CONNECTION_XML_PARSE_ERROR for malformed .testproject XML', () => { writeTestProject(tmpDir, '; signal: NodeJS.Signals | null }; +type SpawnResult = { + stdout: string; + stderr: string; + status: number | null; + error: Error | undefined; + pid: number | undefined; + output: Array; + signal: NodeJS.Signals | null; +}; function makeSpawnResult(stdout: string, status = 0): SpawnResult { return { stdout, stderr: '', status, error: undefined, pid: 1, output: [null, stdout, ''], signal: null }; @@ -84,26 +92,44 @@ function makeHappyPathStub(stub: sinon.SinonStub): void { // Call 0: job query stub.onCall(0).returns(makeSpawnResult(queryResult([{ Id: JOB_ID }]))); // Call 1: cycle query - stub.onCall(1).returns(makeSpawnResult(queryResult([{ - Id: CYCLE_ID, - provar__Web_Browser__c: 'Chrome', - provar__Browser_Version__c: '120', - provar__Environment_Text__c: 'Production', - }]))); + stub.onCall(1).returns( + makeSpawnResult( + queryResult([ + { + Id: CYCLE_ID, + provar__Web_Browser__c: 'Chrome', + provar__Browser_Version__c: '120', + provar__Environment_Text__c: 'Production', + }, + ]) + ) + ); // Call 2: failed executions query - stub.onCall(2).returns(makeSpawnResult(queryResult([{ - Id: EXEC_ID, - provar__Test_Case__c: TC_ID, - provar__Tester__c: 'tester@example.com', - }]))); + stub.onCall(2).returns( + makeSpawnResult( + queryResult([ + { + Id: EXEC_ID, + provar__Test_Case__c: TC_ID, + provar__Tester__c: 'tester@example.com', + }, + ]) + ) + ); // Call 3: failed step query - stub.onCall(3).returns(makeSpawnResult(queryResult([{ - Id: STEP_EXEC_ID, - provar__Test_Step__c: STEP_ID, - provar__ActionObs__c: 'Click Login button', - provar__Actual_Result__c: 'Element not found', - provar__Sequence_No__c: 3, - }]))); + stub.onCall(3).returns( + makeSpawnResult( + queryResult([ + { + Id: STEP_EXEC_ID, + provar__Test_Step__c: STEP_ID, + provar__ActionObs__c: 'Click Login button', + provar__Actual_Result__c: 'Element not found', + provar__Sequence_No__c: 3, + }, + ]) + ) + ); // Call 4: create Defect__c stub.onCall(4).returns(makeSpawnResult(createResult(DEFECT_ID))); // Call 5: create Test_Case_Defect__c @@ -149,12 +175,18 @@ describe('createDefectsForRun', () => { it('returns empty result when no failed executions exist', () => { stub.onCall(0).returns(makeSpawnResult(queryResult([{ Id: JOB_ID }]))); - stub.onCall(1).returns(makeSpawnResult(queryResult([{ - Id: CYCLE_ID, - provar__Web_Browser__c: 'Firefox', - provar__Browser_Version__c: '121', - provar__Environment_Text__c: 'Staging', - }]))); + stub.onCall(1).returns( + makeSpawnResult( + queryResult([ + { + Id: CYCLE_ID, + provar__Web_Browser__c: 'Firefox', + provar__Browser_Version__c: '121', + provar__Environment_Text__c: 'Staging', + }, + ]) + ) + ); stub.onCall(2).returns(makeSpawnResult(queryResult([], 0))); const result = createDefectsForRun(RUN_ID, ORG); @@ -186,24 +218,40 @@ describe('createDefectsForRun', () => { // Two failed executions; filter keeps only the one matching TC_ID const OTHER_TC = 'a0t000000000OTH'; stub.onCall(0).returns(makeSpawnResult(queryResult([{ Id: JOB_ID }]))); - stub.onCall(1).returns(makeSpawnResult(queryResult([{ - Id: CYCLE_ID, - provar__Web_Browser__c: 'Chrome', - provar__Browser_Version__c: '120', - provar__Environment_Text__c: 'Production', - }]))); - stub.onCall(2).returns(makeSpawnResult(queryResult([ - { Id: EXEC_ID, provar__Test_Case__c: TC_ID, provar__Tester__c: 'tester@example.com' }, - { Id: 'a0s000000000EX2', provar__Test_Case__c: OTHER_TC, provar__Tester__c: 'tester@example.com' }, - ]))); + stub.onCall(1).returns( + makeSpawnResult( + queryResult([ + { + Id: CYCLE_ID, + provar__Web_Browser__c: 'Chrome', + provar__Browser_Version__c: '120', + provar__Environment_Text__c: 'Production', + }, + ]) + ) + ); + stub.onCall(2).returns( + makeSpawnResult( + queryResult([ + { Id: EXEC_ID, provar__Test_Case__c: TC_ID, provar__Tester__c: 'tester@example.com' }, + { Id: 'a0s000000000EX2', provar__Test_Case__c: OTHER_TC, provar__Tester__c: 'tester@example.com' }, + ]) + ) + ); // step query for the one kept execution - stub.onCall(3).returns(makeSpawnResult(queryResult([{ - Id: STEP_EXEC_ID, - provar__Test_Step__c: STEP_ID, - provar__ActionObs__c: 'Click', - provar__Actual_Result__c: 'Failed', - provar__Sequence_No__c: 1, - }]))); + stub.onCall(3).returns( + makeSpawnResult( + queryResult([ + { + Id: STEP_EXEC_ID, + provar__Test_Step__c: STEP_ID, + provar__ActionObs__c: 'Click', + provar__Actual_Result__c: 'Failed', + provar__Sequence_No__c: 1, + }, + ]) + ) + ); stub.onCall(4).returns(makeSpawnResult(createResult(DEFECT_ID))); stub.onCall(5).returns(makeSpawnResult(createResult(TC_DEFECT_ID))); stub.onCall(6).returns(makeSpawnResult(createResult(EXEC_DEFECT_ID))); @@ -216,17 +264,29 @@ describe('createDefectsForRun', () => { it('handles missing step gracefully (no step found for execution)', () => { stub.onCall(0).returns(makeSpawnResult(queryResult([{ Id: JOB_ID }]))); - stub.onCall(1).returns(makeSpawnResult(queryResult([{ - Id: CYCLE_ID, - provar__Web_Browser__c: 'Safari', - provar__Browser_Version__c: '17', - provar__Environment_Text__c: 'UAT', - }]))); - stub.onCall(2).returns(makeSpawnResult(queryResult([{ - Id: EXEC_ID, - provar__Test_Case__c: TC_ID, - provar__Tester__c: '', - }]))); + stub.onCall(1).returns( + makeSpawnResult( + queryResult([ + { + Id: CYCLE_ID, + provar__Web_Browser__c: 'Safari', + provar__Browser_Version__c: '17', + provar__Environment_Text__c: 'UAT', + }, + ]) + ) + ); + stub.onCall(2).returns( + makeSpawnResult( + queryResult([ + { + Id: EXEC_ID, + provar__Test_Case__c: TC_ID, + provar__Tester__c: '', + }, + ]) + ) + ); stub.onCall(3).returns(makeSpawnResult(queryResult([], 0))); // no steps stub.onCall(4).returns(makeSpawnResult(createResult(DEFECT_ID))); stub.onCall(5).returns(makeSpawnResult(createResult(TC_DEFECT_ID))); @@ -259,7 +319,7 @@ describe('createDefectsForRun', () => { // ── Tests: MCP tool registration ────────────────────────────────────────────── -describe('provar.qualityhub.defect.create (MCP tool)', () => { +describe('provar_qualityhub_defect_create (MCP tool)', () => { let server: MockMcpServer; let stub: sinon.SinonStub; @@ -276,7 +336,7 @@ describe('provar.qualityhub.defect.create (MCP tool)', () => { it('returns structured content with created defects on success', () => { makeHappyPathStub(stub); - const result = server.call('provar.qualityhub.defect.create', { + const result = server.call('provar_qualityhub_defect_create', { run_id: RUN_ID, target_org: ORG, }); @@ -289,7 +349,7 @@ describe('provar.qualityhub.defect.create (MCP tool)', () => { it('returns isError with DEFECT_CREATE_FAILED on job-not-found error', () => { stub.onCall(0).returns(makeSpawnResult(queryResult([], 0))); - const result = server.call('provar.qualityhub.defect.create', { + const result = server.call('provar_qualityhub_defect_create', { run_id: RUN_ID, target_org: ORG, }); @@ -300,7 +360,7 @@ describe('provar.qualityhub.defect.create (MCP tool)', () => { it('returns isError with SF_NOT_FOUND when sf CLI is missing', () => { stub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.defect.create', { + const result = server.call('provar_qualityhub_defect_create', { run_id: RUN_ID, target_org: ORG, }); @@ -312,19 +372,31 @@ describe('provar.qualityhub.defect.create (MCP tool)', () => { it('passes failed_tests filter through to createDefectsForRun', () => { // Return empty executions - filtered out stub.onCall(0).returns(makeSpawnResult(queryResult([{ Id: JOB_ID }]))); - stub.onCall(1).returns(makeSpawnResult(queryResult([{ - Id: CYCLE_ID, - provar__Web_Browser__c: 'Edge', - provar__Browser_Version__c: '120', - provar__Environment_Text__c: 'Dev', - }]))); - stub.onCall(2).returns(makeSpawnResult(queryResult([{ - Id: EXEC_ID, - provar__Test_Case__c: TC_ID, - provar__Tester__c: 'qa@example.com', - }]))); - - const result = server.call('provar.qualityhub.defect.create', { + stub.onCall(1).returns( + makeSpawnResult( + queryResult([ + { + Id: CYCLE_ID, + provar__Web_Browser__c: 'Edge', + provar__Browser_Version__c: '120', + provar__Environment_Text__c: 'Dev', + }, + ]) + ) + ); + stub.onCall(2).returns( + makeSpawnResult( + queryResult([ + { + Id: EXEC_ID, + provar__Test_Case__c: TC_ID, + provar__Tester__c: 'qa@example.com', + }, + ]) + ) + ); + + const result = server.call('provar_qualityhub_defect_create', { run_id: RUN_ID, target_org: ORG, failed_tests: ['no-match'], diff --git a/test/unit/mcp/loopPrompts.test.ts b/test/unit/mcp/loopPrompts.test.ts index 1c49a270..a183c54e 100644 --- a/test/unit/mcp/loopPrompts.test.ts +++ b/test/unit/mcp/loopPrompts.test.ts @@ -107,8 +107,8 @@ describe('loopPrompts — provar.loop.generate', () => { it('includes corpus retrieval workflow step', () => { const result = server.call('provar.loop.generate', { story: 'any story' }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should reference corpus tool'); - assert.ok(text.includes('provar.testcase.validate'), 'should reference validator tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should reference corpus tool'); + assert.ok(text.includes('provar_testcase_validate'), 'should reference validator tool'); }); it('includes objectName in message when provided', () => { @@ -161,8 +161,8 @@ describe('loopPrompts — provar.loop.fix', () => { rcaOutput: 'any failure', }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should reference corpus tool'); - assert.ok(text.includes('provar.testcase.validate'), 'should reference validator tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should reference corpus tool'); + assert.ok(text.includes('provar_testcase_validate'), 'should reference validator tool'); }); it('starts workflow by reading the file', () => { @@ -190,13 +190,13 @@ describe('loopPrompts — provar.loop.review', () => { }); const text = getMessageText(result); assert.ok(text.includes('Coverage') || text.includes('UiAssert'), 'message should include quality checklist'); - assert.ok(text.includes('provar.testcase.validate'), 'should reference validator tool'); + assert.ok(text.includes('provar_testcase_validate'), 'should reference validator tool'); }); it('includes corpus retrieval step', () => { const result = server.call('provar.loop.review', { testcasePath: '/any/path.testcase' }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should reference corpus tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should reference corpus tool'); }); it('includes projectPath in message when provided', () => { @@ -248,7 +248,7 @@ describe('loopPrompts — provar.loop.coverage', () => { }); const text = getMessageText(result); assert.ok( - text.includes('provar.qualityhub.testcase.retrieve') || text.includes('my-org-alias'), + text.includes('provar_qualityhub_testcase_retrieve') || text.includes('my-org-alias'), 'message should include Quality Hub retrieval when targetOrg provided' ); }); @@ -260,7 +260,7 @@ describe('loopPrompts — provar.loop.coverage', () => { }); const text = getMessageText(result); // When no targetOrg, the step 2 should be corpus retrieval, not QH testcase retrieve - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should still include corpus retrieval'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should still include corpus retrieval'); }); }); @@ -281,8 +281,8 @@ describe('loopPrompts — provar.loop.db', () => { it('includes corpus retrieval and validate in workflow', () => { const result = server.call('provar.loop.db', { story: 'any db test' }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should reference corpus tool'); - assert.ok(text.includes('provar.testcase.validate'), 'should reference validator tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should reference corpus tool'); + assert.ok(text.includes('provar_testcase_validate'), 'should reference validator tool'); }); it('references DbConnect and SqlQuery step types', () => { diff --git a/test/unit/mcp/migrationPrompts.test.ts b/test/unit/mcp/migrationPrompts.test.ts index cb7a8183..9b0a78ea 100644 --- a/test/unit/mcp/migrationPrompts.test.ts +++ b/test/unit/mcp/migrationPrompts.test.ts @@ -99,8 +99,8 @@ describe('migrationPrompts — provar.migrate.crt', () => { it('includes workflow steps in message', () => { const result = server.call('provar.migrate.crt', { source: 'any source' }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'message should reference corpus retrieval tool'); - assert.ok(text.includes('provar.testcase.validate'), 'message should reference validator tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'message should reference corpus retrieval tool'); + assert.ok(text.includes('provar_testcase_validate'), 'message should reference validator tool'); }); it('uses provided testName when present', () => { @@ -144,7 +144,7 @@ describe('migrationPrompts — provar.migrate.selenium', () => { it('includes workflow steps', () => { const result = server.call('provar.migrate.selenium', { source: 'any' }); const text = getMessageText(result); - assert.ok(text.includes('provar.qualityhub.examples.retrieve'), 'should reference corpus tool'); + assert.ok(text.includes('provar_qualityhub_examples_retrieve'), 'should reference corpus tool'); }); }); diff --git a/test/unit/mcp/nitroXTools.test.ts b/test/unit/mcp/nitroXTools.test.ts index e290315a..0d80d948 100644 --- a/test/unit/mcp/nitroXTools.test.ts +++ b/test/unit/mcp/nitroXTools.test.ts @@ -77,16 +77,16 @@ describe('nitroXTools', () => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); - // ── provar.nitrox.discover ───────────────────────────────────────────────── + // ── provar_nitrox_discover ───────────────────────────────────────────────── - describe('provar.nitrox.discover', () => { + describe('provar_nitrox_discover', () => { it('finds project when .testproject marker exists', () => { fs.writeFileSync(path.join(tmpDir, '.testproject'), ''); const nitroxDir = path.join(tmpDir, 'nitroX'); fs.mkdirSync(nitroxDir); fs.writeFileSync(path.join(nitroxDir, 'Component.po.json'), JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.discover', { search_roots: [tmpDir] }); + const result = server.call('provar_nitrox_discover', { search_roots: [tmpDir] }); const body = parseBody(result); const projects = body['projects'] as Array>; assert.ok(Array.isArray(projects)); @@ -96,14 +96,14 @@ describe('nitroXTools', () => { }); it('returns empty projects when no .testproject found', () => { - const result = server.call('provar.nitrox.discover', { search_roots: [tmpDir] }); + const result = server.call('provar_nitrox_discover', { search_roots: [tmpDir] }); const body = parseBody(result); const projects = body['projects'] as unknown[]; assert.deepEqual(projects, []); }); it('handles non-existent search root gracefully', () => { - const result = server.call('provar.nitrox.discover', { + const result = server.call('provar_nitrox_discover', { search_roots: [path.join(tmpDir, 'does-not-exist')], }); assert.ok(!isError(result)); @@ -117,7 +117,7 @@ describe('nitroXTools', () => { fs.mkdirSync(nmDir, { recursive: true }); fs.writeFileSync(path.join(nmDir, '.testproject'), ''); - const result = server.call('provar.nitrox.discover', { search_roots: [tmpDir] }); + const result = server.call('provar_nitrox_discover', { search_roots: [tmpDir] }); const body = parseBody(result); assert.deepEqual(body['projects'], []); }); @@ -127,7 +127,7 @@ describe('nitroXTools', () => { fs.mkdirSync(hiddenDir); fs.writeFileSync(path.join(hiddenDir, '.testproject'), ''); - const result = server.call('provar.nitrox.discover', { search_roots: [tmpDir] }); + const result = server.call('provar_nitrox_discover', { search_roots: [tmpDir] }); const body = parseBody(result); assert.deepEqual(body['projects'], []); }); @@ -136,12 +136,9 @@ describe('nitroXTools', () => { fs.writeFileSync(path.join(tmpDir, '.testproject'), ''); const pkgDir = path.join(tmpDir, 'nitroXPackages', 'my-pkg'); fs.mkdirSync(pkgDir, { recursive: true }); - fs.writeFileSync( - path.join(pkgDir, 'package.json'), - JSON.stringify({ name: 'my-pkg', version: '1.0.0' }) - ); + fs.writeFileSync(path.join(pkgDir, 'package.json'), JSON.stringify({ name: 'my-pkg', version: '1.0.0' })); - const result = server.call('provar.nitrox.discover', { + const result = server.call('provar_nitrox_discover', { search_roots: [tmpDir], include_packages: true, }); @@ -154,14 +151,14 @@ describe('nitroXTools', () => { }); }); - // ── provar.nitrox.read ───────────────────────────────────────────────────── + // ── provar_nitrox_read ───────────────────────────────────────────────────── - describe('provar.nitrox.read', () => { + describe('provar_nitrox_read', () => { it('returns content for a valid .po.json file', () => { const filePath = path.join(tmpDir, 'Component.po.json'); fs.writeFileSync(filePath, JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.read', { file_paths: [filePath] }); + const result = server.call('provar_nitrox_read', { file_paths: [filePath] }); assert.ok(!isError(result)); const body = parseBody(result); const files = body['files'] as Array>; @@ -172,7 +169,7 @@ describe('nitroXTools', () => { it('returns FILE_NOT_FOUND error for missing file', () => { const missing = path.join(tmpDir, 'missing.po.json'); - const result = server.call('provar.nitrox.read', { file_paths: [missing] }); + const result = server.call('provar_nitrox_read', { file_paths: [missing] }); assert.ok(!isError(result)); // tool-level success, per-file error const body = parseBody(result); const files = body['files'] as Array>; @@ -184,12 +181,8 @@ describe('nitroXTools', () => { fs.writeFileSync(path.join(tmpDir, `c${i}.po.json`), JSON.stringify(VALID_ROOT)); } - const result = server.call('provar.nitrox.read', { - file_paths: [ - path.join(tmpDir, 'c0.po.json'), - path.join(tmpDir, 'c1.po.json'), - path.join(tmpDir, 'c2.po.json'), - ], + const result = server.call('provar_nitrox_read', { + file_paths: [path.join(tmpDir, 'c0.po.json'), path.join(tmpDir, 'c1.po.json'), path.join(tmpDir, 'c2.po.json')], max_files: 2, }); const body = parseBody(result); @@ -206,7 +199,7 @@ describe('nitroXTools', () => { fs.writeFileSync(path.join(nitroxDir, 'A.po.json'), JSON.stringify(VALID_ROOT)); fs.writeFileSync(path.join(nitroxDir, 'B.po.json'), JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.read', { project_path: tmpDir }); + const result = server.call('provar_nitrox_read', { project_path: tmpDir }); assert.ok(!isError(result)); const body = parseBody(result); assert.equal(body['total_found'], 2); @@ -218,7 +211,7 @@ describe('nitroXTools', () => { // Write a real file so it's not FILE_NOT_FOUND fs.writeFileSync(outsidePath, JSON.stringify(VALID_ROOT)); try { - const result = server.call('provar.nitrox.read', { file_paths: [outsidePath] }); + const result = server.call('provar_nitrox_read', { file_paths: [outsidePath] }); const body = parseBody(result); const files = body['files'] as Array>; // Per-file path policy error @@ -229,18 +222,18 @@ describe('nitroXTools', () => { }); it('returns MISSING_INPUT when neither file_paths nor project_path provided', () => { - const result = server.call('provar.nitrox.read', {}); + const result = server.call('provar_nitrox_read', {}); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body['error_code'], 'MISSING_INPUT'); }); }); - // ── provar.nitrox.validate ───────────────────────────────────────────────── + // ── provar_nitrox_validate ───────────────────────────────────────────────── - describe('provar.nitrox.validate', () => { + describe('provar_nitrox_validate', () => { it('scores a fully valid root component as 100', () => { - const result = server.call('provar.nitrox.validate', { + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(VALID_ROOT), }); assert.ok(!isError(result)); @@ -252,7 +245,7 @@ describe('nitroXTools', () => { it('NX001 ERROR: missing componentId', () => { const obj = { name: '/com/test/C', type: 'Block', pageStructureElement: true, fieldDetailsElement: false }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX001' && i['severity'] === 'ERROR')); @@ -260,7 +253,7 @@ describe('nitroXTools', () => { it('NX001 ERROR: invalid UUID format', () => { const obj = { ...VALID_ROOT, componentId: 'not-a-uuid' }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX001' && i['severity'] === 'ERROR')); @@ -268,7 +261,7 @@ describe('nitroXTools', () => { it('NX002 ERROR: root missing required fields', () => { const obj = { componentId: VALID_UUID }; // no parentId, so root — missing name/type etc - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.filter((i) => i['rule_id'] === 'NX002').length >= 4); @@ -276,7 +269,7 @@ describe('nitroXTools', () => { it('NX002 does not fire when parentId is set', () => { const obj = { componentId: VALID_UUID, parentId: VALID_UUID }; // child — NX002 should not fire - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(!issues.some((i) => i['rule_id'] === 'NX002')); @@ -284,7 +277,7 @@ describe('nitroXTools', () => { it('NX003 ERROR: tagName contains whitespace', () => { const obj = { ...VALID_ROOT, tagName: 'my tag' }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX003' && i['severity'] === 'ERROR')); @@ -293,7 +286,7 @@ describe('nitroXTools', () => { it('NX004 ERROR: interaction missing required fields', () => { const badInteraction = { name: 'Click' }; // missing required fields const obj = { ...VALID_ROOT, interactions: [badInteraction] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX004' && i['severity'] === 'ERROR')); @@ -302,7 +295,7 @@ describe('nitroXTools', () => { it('NX005 ERROR: implementation missing javaScriptSnippet', () => { const badInteraction = { ...VALID_INTERACTION, implementations: [{}] }; const obj = { ...VALID_ROOT, interactions: [badInteraction] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX005' && i['severity'] === 'ERROR')); @@ -310,7 +303,7 @@ describe('nitroXTools', () => { it('NX006 ERROR: selector missing xpath', () => { const obj = { ...VALID_ROOT, selectors: [{ priority: 1 }] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX006' && i['severity'] === 'ERROR')); @@ -318,7 +311,7 @@ describe('nitroXTools', () => { it('NX007 WARNING: element missing type', () => { const obj = { ...VALID_ROOT, elements: [{ label: 'My Field' }] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX007' && i['severity'] === 'WARNING')); @@ -326,7 +319,7 @@ describe('nitroXTools', () => { it('NX008 WARNING: invalid comparisonType', () => { const obj = { ...VALID_ROOT, parameters: [{ name: 'p', value: 'v', comparisonType: 'startsWith' }] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX008' && i['severity'] === 'WARNING')); @@ -334,7 +327,7 @@ describe('nitroXTools', () => { it('NX008 accepts "starts-with" (hyphenated)', () => { const obj = { ...VALID_ROOT, parameters: [{ name: 'p', value: 'v', comparisonType: 'starts-with' }] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(!issues.some((i) => i['rule_id'] === 'NX008')); @@ -343,7 +336,7 @@ describe('nitroXTools', () => { it('NX009 INFO: interaction name with special characters', () => { const specialInteraction = { ...VALID_INTERACTION, name: 'Click! Now' }; const obj = { ...VALID_ROOT, interactions: [specialInteraction] }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX009' && i['severity'] === 'INFO')); @@ -351,7 +344,7 @@ describe('nitroXTools', () => { it('NX010 INFO: bodyTagName contains whitespace', () => { const obj = { ...VALID_ROOT, bodyTagName: 'body tag' }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX010' && i['severity'] === 'INFO')); @@ -360,14 +353,14 @@ describe('nitroXTools', () => { it('score formula: 2 errors = score 60', () => { // Missing componentId (NX001) + missing root fields (NX002 × 4) const obj = { name: '/test' }; // missing componentId + no type/pageStructureElement/fieldDetailsElement - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); assert.equal(body['valid'], false); assert.ok((body['score'] as number) < 100); }); it('returns FILE_NOT_FOUND when file_path does not exist', () => { - const result = server.call('provar.nitrox.validate', { + const result = server.call('provar_nitrox_validate', { file_path: path.join(tmpDir, 'missing.po.json'), }); assert.ok(isError(result)); @@ -376,14 +369,14 @@ describe('nitroXTools', () => { }); it('returns MISSING_INPUT when neither content nor file_path provided', () => { - const result = server.call('provar.nitrox.validate', {}); + const result = server.call('provar_nitrox_validate', {}); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body['error_code'], 'MISSING_INPUT'); }); it('returns NX000 for invalid JSON content', () => { - const result = server.call('provar.nitrox.validate', { content: 'not json {' }); + const result = server.call('provar_nitrox_validate', { content: 'not json {' }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body['error_code'], 'NX000'); @@ -401,18 +394,18 @@ describe('nitroXTools', () => { }, ], }; - const result = server.call('provar.nitrox.validate', { content: JSON.stringify(obj) }); + const result = server.call('provar_nitrox_validate', { content: JSON.stringify(obj) }); const body = parseBody(result); const issues = body['issues'] as Array>; assert.ok(issues.some((i) => i['rule_id'] === 'NX006')); }); }); - // ── provar.nitrox.generate ───────────────────────────────────────────────── + // ── provar_nitrox_generate ───────────────────────────────────────────────── - describe('provar.nitrox.generate', () => { + describe('provar_nitrox_generate', () => { it('dry_run=true returns JSON without writing', () => { - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/ButtonComponent', tag_name: 'lightning-button', dry_run: true, @@ -430,7 +423,7 @@ describe('nitroXTools', () => { it('writes file when dry_run=false', () => { const outPath = path.join(tmpDir, 'Button.po.json'); - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/ButtonComponent', tag_name: 'lightning-button', output_path: outPath, @@ -446,7 +439,7 @@ describe('nitroXTools', () => { const outPath = path.join(tmpDir, 'Exists.po.json'); fs.writeFileSync(outPath, '{}'); - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/C', tag_name: 'c-test', output_path: outPath, @@ -462,7 +455,7 @@ describe('nitroXTools', () => { const outPath = path.join(tmpDir, 'Overwrite.po.json'); fs.writeFileSync(outPath, '{"old": true}'); - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/C', tag_name: 'c-test', output_path: outPath, @@ -478,7 +471,7 @@ describe('nitroXTools', () => { it('returns PATH_NOT_ALLOWED when output_path is outside allowed roots', () => { const outPath = path.join(os.tmpdir(), 'outside-allowed.po.json'); - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/C', tag_name: 'c-test', output_path: outPath, @@ -490,7 +483,7 @@ describe('nitroXTools', () => { }); it('generates elements with parameters and selectors', () => { - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/FormComponent', tag_name: 'c-form', elements: [ @@ -518,7 +511,7 @@ describe('nitroXTools', () => { }); it('assigns unique UUIDs to root and each element', () => { - const result = server.call('provar.nitrox.generate', { + const result = server.call('provar_nitrox_generate', { name: '/com/test/Multi', tag_name: 'c-multi', elements: [ @@ -530,21 +523,17 @@ describe('nitroXTools', () => { const body = parseBody(result); const generated = JSON.parse(body['content'] as string) as Record; const elements = generated['elements'] as Array>; - const ids = [ - generated['componentId'], - elements[0]['componentId'], - elements[1]['componentId'], - ]; + const ids = [generated['componentId'], elements[0]['componentId'], elements[1]['componentId']]; const unique = new Set(ids); assert.equal(unique.size, 3); }); }); - // ── provar.nitrox.patch ──────────────────────────────────────────────────── + // ── provar_nitrox_patch ──────────────────────────────────────────────────── - describe('provar.nitrox.patch', () => { + describe('provar_nitrox_patch', () => { it('returns FILE_NOT_FOUND for missing file', () => { - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: path.join(tmpDir, 'missing.po.json'), patch: { name: '/new' }, }); @@ -557,7 +546,7 @@ describe('nitroXTools', () => { const filePath = path.join(tmpDir, 'Component.po.json'); fs.writeFileSync(filePath, JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: filePath, patch: { name: '/com/test/Updated' }, dry_run: true, @@ -576,7 +565,7 @@ describe('nitroXTools', () => { const filePath = path.join(tmpDir, 'Component.po.json'); fs.writeFileSync(filePath, JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: filePath, patch: { name: '/com/test/Patched' }, dry_run: false, @@ -593,7 +582,7 @@ describe('nitroXTools', () => { const filePath = path.join(tmpDir, 'Component.po.json'); fs.writeFileSync(filePath, JSON.stringify({ ...VALID_ROOT, qualifier: 'some-qualifier' })); - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: filePath, patch: { qualifier: null }, dry_run: true, @@ -609,7 +598,7 @@ describe('nitroXTools', () => { fs.writeFileSync(filePath, JSON.stringify(VALID_ROOT)); // Remove componentId via patch — will trigger NX001 error - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: filePath, patch: { componentId: null }, dry_run: false, @@ -627,7 +616,7 @@ describe('nitroXTools', () => { const filePath = path.join(tmpDir, 'Component.po.json'); fs.writeFileSync(filePath, JSON.stringify(VALID_ROOT)); - const result = server.call('provar.nitrox.patch', { + const result = server.call('provar_nitrox_patch', { file_path: filePath, patch: { name: '/com/test/Updated' }, dry_run: true, diff --git a/test/unit/mcp/pageObjectGenerate.test.ts b/test/unit/mcp/pageObjectGenerate.test.ts index 11d8e36c..094ad4b7 100644 --- a/test/unit/mcp/pageObjectGenerate.test.ts +++ b/test/unit/mcp/pageObjectGenerate.test.ts @@ -62,12 +62,12 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.pageobject.generate ───────────────────────────────────────────────── +// ── provar_pageobject_generate ───────────────────────────────────────────────── -describe('provar.pageobject.generate', () => { +describe('provar_pageobject_generate', () => { describe('dry_run', () => { it('returns java_source without writing to disk', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects', page_type: 'standard', @@ -85,7 +85,7 @@ describe('provar.pageobject.generate', () => { it('does NOT write a file even when output_path is provided', () => { const outPath = path.join(tmpDir, 'AccountDetailPage.java'); - server.call('provar.pageobject.generate', { + server.call('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects', page_type: 'standard', @@ -101,7 +101,7 @@ describe('provar.pageobject.generate', () => { describe('generated Java source content', () => { it('contains correct class name and package', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects.accounts', page_type: 'standard', @@ -116,7 +116,7 @@ describe('provar.pageobject.generate', () => { }); it('uses @Page annotation for standard page_type', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -131,7 +131,7 @@ describe('provar.pageobject.generate', () => { }); it('uses @SalesforcePage annotation for salesforce page_type', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountPage', package_name: 'pageobjects', page_type: 'salesforce', @@ -147,7 +147,7 @@ describe('provar.pageobject.generate', () => { }); it('includes standard imports', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'MyPage', package_name: 'pageobjects', page_type: 'standard', @@ -163,7 +163,7 @@ describe('provar.pageobject.generate', () => { }); it('generates @FindBy field blocks for provided fields', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountPage', package_name: 'pageobjects', page_type: 'standard', @@ -193,7 +193,7 @@ describe('provar.pageobject.generate', () => { }); it('emits a TODO comment when no fields are provided', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'EmptyPage', package_name: 'pageobjects', page_type: 'standard', @@ -207,7 +207,7 @@ describe('provar.pageobject.generate', () => { }); it('defaults title to class_name when title is omitted', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects', page_type: 'standard', @@ -224,7 +224,7 @@ describe('provar.pageobject.generate', () => { describe('writing to disk', () => { it('writes file when dry_run=false and output_path provided', () => { const outPath = path.join(tmpDir, 'AccountDetailPage.java'); - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountDetailPage', package_name: 'pageobjects', page_type: 'standard', @@ -240,7 +240,7 @@ describe('provar.pageobject.generate', () => { }); it('does NOT write when dry_run=false but no output_path', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'NoPathPage', package_name: 'pageobjects', page_type: 'standard', @@ -257,7 +257,7 @@ describe('provar.pageobject.generate', () => { const outPath = path.join(tmpDir, 'Existing.java'); fs.writeFileSync(outPath, '// old', 'utf-8'); - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'Existing', package_name: 'pageobjects', page_type: 'standard', @@ -275,7 +275,7 @@ describe('provar.pageobject.generate', () => { const outPath = path.join(tmpDir, 'Existing.java'); fs.writeFileSync(outPath, '// old', 'utf-8'); - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'Existing', package_name: 'pageobjects', page_type: 'standard', @@ -292,7 +292,7 @@ describe('provar.pageobject.generate', () => { it('creates parent directories as needed', () => { const outPath = path.join(tmpDir, 'src', 'main', 'java', 'MyPage.java'); - server.call('provar.pageobject.generate', { + server.call('provar_pageobject_generate', { class_name: 'MyPage', package_name: 'pageobjects', page_type: 'standard', @@ -311,7 +311,7 @@ describe('provar.pageobject.generate', () => { const strictServer = new MockMcpServer(); registerPageObjectGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.pageobject.generate', { + const result = strictServer.call('provar_pageobject_generate', { class_name: 'EvilPage', package_name: 'pageobjects', page_type: 'standard', @@ -330,7 +330,7 @@ describe('provar.pageobject.generate', () => { const strictServer = new MockMcpServer(); registerPageObjectGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.pageobject.generate', { + const result = strictServer.call('provar_pageobject_generate', { class_name: 'SafePage', package_name: 'pageobjects', page_type: 'standard', @@ -346,7 +346,7 @@ describe('provar.pageobject.generate', () => { describe('idempotency_key', () => { it('echoes back the provided idempotency_key', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'IdempotentPage', package_name: 'pageobjects', page_type: 'standard', @@ -360,7 +360,7 @@ describe('provar.pageobject.generate', () => { }); it('returns undefined idempotency_key when not provided', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'NoKeyPage', package_name: 'pageobjects', page_type: 'standard', @@ -375,7 +375,7 @@ describe('provar.pageobject.generate', () => { describe('sso_class — ILoginPage stub generation', () => { it('returns sso_stub_source when sso_class is provided', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -394,7 +394,7 @@ describe('provar.pageobject.generate', () => { }); it('sso stub includes loginAs and logout method stubs', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -410,7 +410,7 @@ describe('provar.pageobject.generate', () => { }); it('uses the correct package in sso stub', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects.auth', page_type: 'standard', @@ -425,7 +425,7 @@ describe('provar.pageobject.generate', () => { }); it('does not include sso fields when sso_class is omitted', () => { - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'AccountPage', package_name: 'pageobjects', page_type: 'standard', @@ -440,7 +440,7 @@ describe('provar.pageobject.generate', () => { it('writes both page object and SSO stub to disk when dry_run=false', () => { const poPath = path.join(tmpDir, 'LoginPage.java'); - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -461,7 +461,7 @@ describe('provar.pageobject.generate', () => { const strictServer = new MockMcpServer(); registerPageObjectGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.pageobject.generate', { + const result = strictServer.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -482,7 +482,7 @@ describe('provar.pageobject.generate', () => { const ssoPath = path.join(tmpDir, 'LoginPageSso.java'); fs.writeFileSync(ssoPath, '// existing stub', 'utf-8'); - const result = server.call('provar.pageobject.generate', { + const result = server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', @@ -502,7 +502,7 @@ describe('provar.pageobject.generate', () => { const ssoPath = path.join(tmpDir, 'LoginPageSso.java'); fs.writeFileSync(ssoPath, '// existing stub', 'utf-8'); - server.call('provar.pageobject.generate', { + server.call('provar_pageobject_generate', { class_name: 'LoginPage', package_name: 'pageobjects', page_type: 'standard', diff --git a/test/unit/mcp/projectValidateFromPath.test.ts b/test/unit/mcp/projectValidateFromPath.test.ts index 6d6fa204..aadd603d 100644 --- a/test/unit/mcp/projectValidateFromPath.test.ts +++ b/test/unit/mcp/projectValidateFromPath.test.ts @@ -68,16 +68,14 @@ function writeFile(p: string, content: string): void { const G = { tc1: '550e8400-e29b-41d4-a716-446655440001', - s1: '550e8400-e29b-41d4-a716-446655440011', + s1: '550e8400-e29b-41d4-a716-446655440011', }; /** Build a minimal valid Provar project in the given directory */ function makeProject(root: string, planName = 'smoke', tcName = 'Login'): void { writeFile(path.join(root, '.testproject'), TESTPROJECT_XML); - writeFile(path.join(root, 'tests', `${tcName}.testcase`), - makeXml(G.tc1, G.s1, `tc-${tcName.toLowerCase()}`)); - writeFile(path.join(root, 'plans', planName, `${tcName}.testinstance`), - `testCasePath="tests/${tcName}.testcase"\n`); + writeFile(path.join(root, 'tests', `${tcName}.testcase`), makeXml(G.tc1, G.s1, `tc-${tcName.toLowerCase()}`)); + writeFile(path.join(root, 'plans', planName, `${tcName}.testinstance`), `testCasePath="tests/${tcName}.testcase"\n`); } // ── Test setup ───────────────────────────────────────────────────────────────── @@ -107,13 +105,13 @@ afterEach(() => { } }); -// ── provar.project.validate ──────────────────────────────────────────────────── +// ── provar_project_validate ──────────────────────────────────────────────────── -describe('provar.project.validate (from path)', () => { +describe('provar_project_validate (from path)', () => { describe('happy path', () => { it('returns a result (not an error) for a valid project', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -127,7 +125,7 @@ describe('provar.project.validate (from path)', () => { it('quality_score is between 0 and 100', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -138,7 +136,7 @@ describe('provar.project.validate (from path)', () => { it('returns requestId in the response', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -151,7 +149,7 @@ describe('provar.project.validate (from path)', () => { describe('error cases', () => { it('returns NOT_A_PROJECT when .testproject is absent', () => { // Empty temp dir — no .testproject - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -162,7 +160,7 @@ describe('provar.project.validate (from path)', () => { it('returns NOT_A_PROJECT even when plans/ exists but .testproject is absent', () => { fs.mkdirSync(path.join(tmpDir, 'plans'), { recursive: true }); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -175,7 +173,7 @@ describe('provar.project.validate (from path)', () => { const strictServer = new MockMcpServer(); registerProjectValidateFromPath(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.project.validate', { + const result = strictServer.call('provar_project_validate', { project_path: '/etc', save_results: false, }); @@ -190,7 +188,7 @@ describe('provar.project.validate (from path)', () => { const strictServer = new MockMcpServer(); registerProjectValidateFromPath(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.project.validate', { + const result = strictServer.call('provar_project_validate', { project_path: tmpDir, save_results: true, results_dir: '/etc/evil-results', @@ -205,20 +203,19 @@ describe('provar.project.validate (from path)', () => { describe('save_results', () => { it('does NOT write a results file when save_results=false', () => { makeProject(tmpDir); - server.call('provar.project.validate', { + server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); const defaultResultsDir = path.join(tmpDir, 'provardx', 'validation'); - const exists = fs.existsSync(defaultResultsDir) && - fs.readdirSync(defaultResultsDir).length > 0; + const exists = fs.existsSync(defaultResultsDir) && fs.readdirSync(defaultResultsDir).length > 0; assert.equal(exists, false, 'No results file should be written when save_results=false'); }); it('writes a results file to default location when save_results=true', () => { makeProject(tmpDir); - server.call('provar.project.validate', { + server.call('provar_project_validate', { project_path: tmpDir, save_results: true, }); @@ -233,7 +230,7 @@ describe('provar.project.validate (from path)', () => { makeProject(tmpDir); const customResultsDir = path.join(tmpDir, 'my-results'); - server.call('provar.project.validate', { + server.call('provar_project_validate', { project_path: tmpDir, save_results: true, results_dir: customResultsDir, @@ -248,7 +245,7 @@ describe('provar.project.validate (from path)', () => { describe('quality_threshold', () => { it('accepts a custom quality_threshold', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, quality_threshold: 90, save_results: false, @@ -261,7 +258,7 @@ describe('provar.project.validate (from path)', () => { describe('include_plan_details', () => { it('default response uses plans_summary not plans', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, }); @@ -273,7 +270,7 @@ describe('provar.project.validate (from path)', () => { it('include_plan_details:true returns full plans array', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false, include_plan_details: true, @@ -285,7 +282,7 @@ describe('provar.project.validate (from path)', () => { it('plans_summary entries have name, quality_score, suite_count, test_case_count', () => { makeProject(tmpDir); - const result = server.call('provar.project.validate', { project_path: tmpDir, save_results: false }); + const result = server.call('provar_project_validate', { project_path: tmpDir, save_results: false }); const body = parseText(result); const plansSummary = body['plans_summary'] as Array>; assert.ok(Array.isArray(plansSummary)); @@ -310,7 +307,7 @@ describe('provar.project.validate (from path)', () => { } writeFile(path.join(root, 'plans', 'smoke', 'Login.testinstance'), 'testCasePath="tests/Login.testcase"\n'); - const result = server.call('provar.project.validate', { + const result = server.call('provar_project_validate', { project_path: root, save_results: false, max_uncovered: 1, diff --git a/test/unit/mcp/propertiesTools.test.ts b/test/unit/mcp/propertiesTools.test.ts index 0ba6a524..428506e5 100644 --- a/test/unit/mcp/propertiesTools.test.ts +++ b/test/unit/mcp/propertiesTools.test.ts @@ -94,12 +94,12 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.properties.generate ──────────────────────────────────────────────── +// ── provar_properties_generate ──────────────────────────────────────────────── -describe('provar.properties.generate', () => { +describe('provar_properties_generate', () => { it('dry_run returns content without writing to disk', () => { const outPath = path.join(tmpDir, 'dry.json'); - const result = server.call('provar.properties.generate', { output_path: outPath, dry_run: true }); + const result = server.call('provar_properties_generate', { output_path: outPath, dry_run: true }); assert.equal(isError(result), false); const body = parseText(result); @@ -111,7 +111,7 @@ describe('provar.properties.generate', () => { it('writes file to disk when dry_run is false', () => { const outPath = path.join(tmpDir, 'props.json'); - const result = server.call('provar.properties.generate', { output_path: outPath, dry_run: false }); + const result = server.call('provar_properties_generate', { output_path: outPath, dry_run: false }); assert.equal(isError(result), false); assert.equal(fs.existsSync(outPath), true, 'file should be written'); @@ -121,7 +121,7 @@ describe('provar.properties.generate', () => { it('pre-fills projectPath and provarHome when provided', () => { const outPath = path.join(tmpDir, 'pre-filled.json'); - server.call('provar.properties.generate', { + server.call('provar_properties_generate', { output_path: outPath, project_path: '/my/project', provar_home: '/opt/provar', @@ -137,7 +137,7 @@ describe('provar.properties.generate', () => { const outPath = path.join(tmpDir, 'existing.json'); fs.writeFileSync(outPath, '{}', 'utf-8'); - const result = server.call('provar.properties.generate', { output_path: outPath, overwrite: false }); + const result = server.call('provar_properties_generate', { output_path: outPath, overwrite: false }); assert.equal(isError(result), true); const body = parseText(result); @@ -148,7 +148,7 @@ describe('provar.properties.generate', () => { const outPath = path.join(tmpDir, 'overwrite.json'); fs.writeFileSync(outPath, '{"old":true}', 'utf-8'); - const result = server.call('provar.properties.generate', { output_path: outPath, overwrite: true }); + const result = server.call('provar_properties_generate', { output_path: outPath, overwrite: true }); assert.equal(isError(result), false); const written = JSON.parse(fs.readFileSync(outPath, 'utf-8')) as Record; @@ -157,27 +157,27 @@ describe('provar.properties.generate', () => { it('includes next_steps hint after writing the file', () => { const outPath = path.join(tmpDir, 'props-nextsteps.json'); - const result = server.call('provar.properties.generate', { output_path: outPath, dry_run: false }); + const result = server.call('provar_properties_generate', { output_path: outPath, dry_run: false }); assert.equal(isError(result), false); const body = parseText(result); assert.ok(typeof body['next_steps'] === 'string', 'next_steps should be present'); - assert.ok(body['next_steps'].includes('provar.automation.config.load'), 'next_steps should mention config.load'); + assert.ok(body['next_steps'].includes('provar_automation_config_load'), 'next_steps should mention config.load'); }); it('includes next_steps hint even on dry_run', () => { const outPath = path.join(tmpDir, 'props-dry.json'); - const result = server.call('provar.properties.generate', { output_path: outPath, dry_run: true }); + const result = server.call('provar_properties_generate', { output_path: outPath, dry_run: true }); assert.equal(isError(result), false); const body = parseText(result); assert.ok(typeof body['next_steps'] === 'string'); - assert.ok(body['next_steps'].includes('provar.automation.config.load')); + assert.ok(body['next_steps'].includes('provar_automation_config_load')); }); it('returns INVALID_PATH when output_path does not end with .json', () => { const outPath = path.join(tmpDir, 'props.txt'); - const result = server.call('provar.properties.generate', { output_path: outPath, dry_run: true }); + const result = server.call('provar_properties_generate', { output_path: outPath, dry_run: true }); assert.equal(isError(result), true); const body = parseText(result); @@ -192,7 +192,7 @@ describe('provar.properties.generate', () => { registerPropertiesGenerate(strictServer as never, strictConfig); // outPath resolves to os.tmpdir()/escape.json — outside tmpDir - const result = strictServer.call('provar.properties.generate', { output_path: outPath, dry_run: true }); + const result = strictServer.call('provar_properties_generate', { output_path: outPath, dry_run: true }); assert.equal(isError(result), true); const body = parseText(result); @@ -203,15 +203,15 @@ describe('provar.properties.generate', () => { }); }); -// ── provar.properties.read ──────────────────────────────────────────────────── +// ── provar_properties_read ──────────────────────────────────────────────────── -describe('provar.properties.read', () => { +describe('provar_properties_read', () => { it('returns parsed content for a valid JSON file', () => { const filePath = path.join(tmpDir, 'props.json'); const props = { provarHome: '/opt/provar', projectPath: '/proj' }; fs.writeFileSync(filePath, JSON.stringify(props), 'utf-8'); - const result = server.call('provar.properties.read', { file_path: filePath }); + const result = server.call('provar_properties_read', { file_path: filePath }); assert.equal(isError(result), false); const body = parseText(result); @@ -221,14 +221,14 @@ describe('provar.properties.read', () => { }); it('returns PROPERTIES_FILE_NOT_FOUND when file does not exist', () => { - const result = server.call('provar.properties.read', { + const result = server.call('provar_properties_read', { file_path: path.join(tmpDir, 'missing.json'), }); assert.equal(isError(result), true); const body = parseText(result); assert.equal(body['error_code'], 'PROPERTIES_FILE_NOT_FOUND'); - assert.ok((body['message'] as string).includes('provar.properties.generate'), 'suggestion should mention generate'); + assert.ok((body['message'] as string).includes('provar_properties_generate'), 'suggestion should mention generate'); }); it('surfaces divergence warning when active sf config points to a different file with different values', () => { @@ -255,7 +255,7 @@ describe('provar.properties.read', () => { setSfConfigDirForTesting(sfDir); try { - const result = server.call('provar.properties.read', { file_path: diskFile }); + const result = server.call('provar_properties_read', { file_path: diskFile }); assert.equal(isError(result), false); const body = parseText(result); @@ -287,7 +287,7 @@ describe('provar.properties.read', () => { setSfConfigDirForTesting(sfDir); try { - const result = server.call('provar.properties.read', { file_path: filePath }); + const result = server.call('provar_properties_read', { file_path: filePath }); assert.equal(isError(result), false); const body = parseText(result); @@ -301,7 +301,7 @@ describe('provar.properties.read', () => { const filePath = path.join(tmpDir, 'bad.json'); fs.writeFileSync(filePath, '{ not valid json }', 'utf-8'); - const result = server.call('provar.properties.read', { file_path: filePath }); + const result = server.call('provar_properties_read', { file_path: filePath }); assert.equal(isError(result), true); assert.equal(parseText(result)['error_code'], 'MALFORMED_JSON'); @@ -311,7 +311,7 @@ describe('provar.properties.read', () => { const strictServer = new MockMcpServer(); registerPropertiesRead(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.properties.read', { + const result = strictServer.call('provar_properties_read', { file_path: '/etc/passwd', }); @@ -321,9 +321,9 @@ describe('provar.properties.read', () => { }); }); -// ── provar.properties.set ───────────────────────────────────────────────────── +// ── provar_properties_set ───────────────────────────────────────────────────── -describe('provar.properties.set', () => { +describe('provar_properties_set', () => { it('partial update preserves unmodified fields (deep merge)', () => { const filePath = path.join(tmpDir, 'props.json'); const initial = { @@ -338,7 +338,7 @@ describe('provar.properties.set', () => { }; fs.writeFileSync(filePath, JSON.stringify(initial, null, 2), 'utf-8'); - const result = server.call('provar.properties.set', { + const result = server.call('provar_properties_set', { file_path: filePath, updates: { provarHome: '/opt/provar-2' }, }); @@ -362,7 +362,7 @@ describe('provar.properties.set', () => { }; fs.writeFileSync(filePath, JSON.stringify(initial, null, 2), 'utf-8'); - server.call('provar.properties.set', { + server.call('provar_properties_set', { file_path: filePath, updates: { environment: { webBrowser: 'Firefox' } }, }); @@ -381,7 +381,7 @@ describe('provar.properties.set', () => { 'utf-8' ); - server.call('provar.properties.set', { + server.call('provar_properties_set', { file_path: filePath, updates: { testCase: ['new/test2.testcase'] }, }); @@ -391,7 +391,7 @@ describe('provar.properties.set', () => { }); it('returns PROPERTIES_FILE_NOT_FOUND when file does not exist', () => { - const result = server.call('provar.properties.set', { + const result = server.call('provar_properties_set', { file_path: path.join(tmpDir, 'ghost.json'), updates: { provarHome: '/x' }, }); @@ -404,7 +404,7 @@ describe('provar.properties.set', () => { const strictServer = new MockMcpServer(); registerPropertiesSet(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.properties.set', { + const result = strictServer.call('provar_properties_set', { file_path: '/etc/hosts', updates: { provarHome: '/evil' }, }); @@ -415,11 +415,11 @@ describe('provar.properties.set', () => { }); }); -// ── provar.properties.validate ──────────────────────────────────────────────── +// ── provar_properties_validate ──────────────────────────────────────────────── -describe('provar.properties.validate', () => { +describe('provar_properties_validate', () => { it('is_valid=true for a fully-populated valid properties object (inline content)', () => { - const result = server.call('provar.properties.validate', { + const result = server.call('provar_properties_validate', { content: JSON.stringify(validProps()), }); @@ -433,7 +433,7 @@ describe('provar.properties.validate', () => { const props = validProps(); delete props['provarHome']; - const result = server.call('provar.properties.validate', { content: JSON.stringify(props) }); + const result = server.call('provar_properties_validate', { content: JSON.stringify(props) }); const body = parseText(result); assert.equal(body['is_valid'], false); @@ -448,7 +448,7 @@ describe('provar.properties.validate', () => { const props = validProps(); props['provarHome'] = '${PROVAR_HOME}'; - const result = server.call('provar.properties.validate', { content: JSON.stringify(props) }); + const result = server.call('provar_properties_validate', { content: JSON.stringify(props) }); const body = parseText(result); assert.ok((body['warning_count'] as number) > 0, 'Expected at least one warning'); @@ -460,7 +460,7 @@ describe('provar.properties.validate', () => { }); it('accepts inline content without file_path', () => { - const result = server.call('provar.properties.validate', { + const result = server.call('provar_properties_validate', { content: JSON.stringify(validProps()), }); assert.equal(isError(result), false); @@ -471,21 +471,21 @@ describe('provar.properties.validate', () => { const filePath = path.join(tmpDir, 'valid.json'); fs.writeFileSync(filePath, JSON.stringify(validProps()), 'utf-8'); - const result = server.call('provar.properties.validate', { file_path: filePath }); + const result = server.call('provar_properties_validate', { file_path: filePath }); assert.equal(isError(result), false); assert.equal((parseText(result) as { is_valid: boolean })['is_valid'], true); }); it('returns MISSING_INPUT when neither file_path nor content is provided', () => { - const result = server.call('provar.properties.validate', {}); + const result = server.call('provar_properties_validate', {}); assert.equal(isError(result), true); assert.equal(parseText(result)['error_code'], 'MISSING_INPUT'); }); it('returns PROPERTIES_FILE_NOT_FOUND when file_path points to a missing file', () => { - const result = server.call('provar.properties.validate', { + const result = server.call('provar_properties_validate', { file_path: path.join(tmpDir, 'nope.json'), }); @@ -494,7 +494,7 @@ describe('provar.properties.validate', () => { }); it('returns is_valid=false with root-level error for malformed JSON content', () => { - const result = server.call('provar.properties.validate', { content: '{ broken json' }); + const result = server.call('provar_properties_validate', { content: '{ broken json' }); assert.equal(isError(result), false, 'validate returns a result, not an error response'); const body = parseText(result); @@ -506,7 +506,7 @@ describe('provar.properties.validate', () => { const props = validProps(); (props['environment'] as Record)['webBrowser'] = 'Netscape'; - const result = server.call('provar.properties.validate', { content: JSON.stringify(props) }); + const result = server.call('provar_properties_validate', { content: JSON.stringify(props) }); const body = parseText(result); assert.equal(body['is_valid'], false); @@ -521,7 +521,7 @@ describe('provar.properties.validate', () => { const props = validProps(); (props['metadata'] as Record)['metadataLevel'] = 'Nuke'; - const result = server.call('provar.properties.validate', { content: JSON.stringify(props) }); + const result = server.call('provar_properties_validate', { content: JSON.stringify(props) }); const body = parseText(result); assert.equal(body['is_valid'], false); diff --git a/test/unit/mcp/qualityHubApiTools.test.ts b/test/unit/mcp/qualityHubApiTools.test.ts index 4f595f89..e5b72046 100644 --- a/test/unit/mcp/qualityHubApiTools.test.ts +++ b/test/unit/mcp/qualityHubApiTools.test.ts @@ -61,7 +61,7 @@ const CORPUS_RESPONSE = { query_truncated: false, }; -// ── Tests: provar.qualityhub.examples.retrieve ──────────────────────────────── +// ── Tests: provar_qualityhub_examples_retrieve ──────────────────────────────── describe('qualityHubApiTools', () => { let server: MockMcpServer; @@ -81,11 +81,11 @@ describe('qualityHubApiTools', () => { sinon.restore(); }); - describe('provar.qualityhub.examples.retrieve', () => { + describe('provar_qualityhub_examples_retrieve', () => { it('returns examples on happy path', async () => { retrieveStub.resolves(CORPUS_RESPONSE); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'Create an Opportunity', n: 1 }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'Create an Opportunity', n: 1 }); assert.equal(isError(result), false); const body = parseBody(result); @@ -99,7 +99,7 @@ describe('qualityHubApiTools', () => { it('passes n, app_filter, prefer_high_quality to the service', async () => { retrieveStub.resolves({ ...CORPUS_RESPONSE, count: 0, examples: [] }); - await server.call('provar.qualityhub.examples.retrieve', { + await server.call('provar_qualityhub_examples_retrieve', { query: 'test query', n: 3, app_filter: 'SalesCloud', @@ -120,7 +120,7 @@ describe('qualityHubApiTools', () => { it('returns empty examples with no isError when API key is not configured', async () => { resolveKeyStub.returns(null); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'Create an Opportunity' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'Create an Opportunity' }); // CRITICAL: must NOT be isError:true — the LLM workflow must continue assert.equal(isError(result), false, 'Must not set isError:true when key missing'); @@ -135,7 +135,7 @@ describe('qualityHubApiTools', () => { it('returns empty examples with no isError on 401 auth error', async () => { retrieveStub.rejects(new QualityHubAuthError('Key invalid')); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'Create an Opportunity' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'Create an Opportunity' }); // CRITICAL: must NOT be isError:true — graceful degrade assert.equal(isError(result), false, 'Must not set isError:true on auth failure'); @@ -147,7 +147,7 @@ describe('qualityHubApiTools', () => { it('returns empty examples with no isError on rate limit', async () => { retrieveStub.rejects(new QualityHubRateLimitError('Rate limited')); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'Create an Opportunity' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'Create an Opportunity' }); assert.equal(isError(result), false, 'Must not set isError:true on rate limit'); const body = parseBody(result); @@ -158,7 +158,7 @@ describe('qualityHubApiTools', () => { it('returns empty examples with no isError on network/server error', async () => { retrieveStub.rejects(new Error('ECONNRESET')); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'Create an Opportunity' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'Create an Opportunity' }); assert.equal(isError(result), false, 'Must not set isError:true on network error'); const body = parseBody(result); @@ -166,7 +166,7 @@ describe('qualityHubApiTools', () => { }); it('returns isError:true for empty query', async () => { - const result = await server.call('provar.qualityhub.examples.retrieve', { query: '' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: '' }); assert.equal(isError(result), true); const body = parseBody(result); @@ -176,7 +176,7 @@ describe('qualityHubApiTools', () => { it('surfaces query_truncated:true in response', async () => { retrieveStub.resolves({ ...CORPUS_RESPONSE, query_truncated: true }); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'A'.repeat(2100) }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'A'.repeat(2100) }); const body = parseBody(result); assert.equal(body.query_truncated, true); @@ -185,7 +185,7 @@ describe('qualityHubApiTools', () => { it('returns empty examples array (not an error) when Bedrock returns 0 results', async () => { retrieveStub.resolves({ retrieval_id: 'ret-empty', examples: [], count: 0, query_truncated: false }); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'very unusual query' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'very unusual query' }); assert.equal(isError(result), false); const body = parseBody(result); @@ -197,7 +197,7 @@ describe('qualityHubApiTools', () => { it('includes retrieval_id in all successful responses', async () => { retrieveStub.resolves(CORPUS_RESPONSE); - const result = await server.call('provar.qualityhub.examples.retrieve', { query: 'some query' }); + const result = await server.call('provar_qualityhub_examples_retrieve', { query: 'some query' }); const body = parseBody(result); assert.ok(body.retrieval_id, 'retrieval_id must be present'); }); diff --git a/test/unit/mcp/qualityHubTools.test.ts b/test/unit/mcp/qualityHubTools.test.ts index d80434cb..6989b8a5 100644 --- a/test/unit/mcp/qualityHubTools.test.ts +++ b/test/unit/mcp/qualityHubTools.test.ts @@ -79,13 +79,13 @@ describe('qualityHubTools', () => { sinon.restore(); }); - // ── provar.qualityhub.connect ─────────────────────────────────────────────── + // ── provar_qualityhub_connect ─────────────────────────────────────────────── - describe('provar.qualityhub.connect', () => { + describe('provar_qualityhub_connect', () => { it('passes correct args to sf and returns stdout on success', () => { spawnStub.returns(makeSpawnResult('{"status":0}', '', 0)); - const result = server.call('provar.qualityhub.connect', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_connect', { target_org: 'myorg', flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); @@ -98,14 +98,14 @@ describe('qualityHubTools', () => { it('forwards extra flags', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.qualityhub.connect', { target_org: 'myorg', flags: ['--json'] }); + server.call('provar_qualityhub_connect', { target_org: 'myorg', flags: ['--json'] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('--json')); }); it('returns isError when exit code is non-zero', () => { spawnStub.returns(makeSpawnResult('', 'bad credentials', 1)); - const result = server.call('provar.qualityhub.connect', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_connect', { target_org: 'myorg', flags: [] }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body.error_code, 'QH_CONNECT_FAILED'); @@ -113,7 +113,7 @@ describe('qualityHubTools', () => { it('returns SF_NOT_FOUND when sf is not in PATH', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.connect', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_connect', { target_org: 'myorg', flags: [] }); assert.ok(isError(result)); const body = parseBody(result); assert.equal(body.error_code, 'SF_NOT_FOUND'); @@ -121,12 +121,12 @@ describe('qualityHubTools', () => { }); }); - // ── provar.qualityhub.display ─────────────────────────────────────────────── + // ── provar_qualityhub_display ─────────────────────────────────────────────── - describe('provar.qualityhub.display', () => { + describe('provar_qualityhub_display', () => { it('calls sf with display args on success', () => { spawnStub.returns(makeSpawnResult('display output', '', 0)); - const result = server.call('provar.qualityhub.display', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_display', { target_org: 'myorg', flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); const args = spawnStub.firstCall.args[1] as string[]; @@ -136,31 +136,31 @@ describe('qualityHubTools', () => { it('omits --target-org when target_org not provided', () => { spawnStub.returns(makeSpawnResult('ok', '', 0)); - server.call('provar.qualityhub.display', { target_org: undefined, flags: [] }); + server.call('provar_qualityhub_display', { target_org: undefined, flags: [] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(!args.includes('--target-org')); }); it('returns isError on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'error', 1)); - const result = server.call('provar.qualityhub.display', { flags: [] }); + const result = server.call('provar_qualityhub_display', { flags: [] }); assert.ok(isError(result)); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.display', { flags: [] }); + const result = server.call('provar_qualityhub_display', { flags: [] }); const body = parseBody(result); assert.equal(body.error_code, 'SF_NOT_FOUND'); }); }); - // ── provar.qualityhub.testrun ─────────────────────────────────────────────── + // ── provar_qualityhub_testrun ─────────────────────────────────────────────── - describe('provar.qualityhub.testrun', () => { + describe('provar_qualityhub_testrun', () => { it('passes correct args and returns success', () => { spawnStub.returns(makeSpawnResult('run started', '', 0)); - const result = server.call('provar.qualityhub.testrun', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: [] }); const body = parseBody(result); assert.equal(body.exitCode, 0); const args = spawnStub.firstCall.args[1] as string[]; @@ -169,20 +169,20 @@ describe('qualityHubTools', () => { it('returns QH_TESTRUN_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'run failed', 1)); - const result = server.call('provar.qualityhub.testrun', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: [] }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'QH_TESTRUN_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testrun', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: [] }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); it('adds wildcard warning when flags contain * glob pattern', () => { spawnStub.returns(makeSpawnResult('run started', '', 0)); - const result = server.call('provar.qualityhub.testrun', { + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: ['--plan-name', 'Suite/E2E*'], }); @@ -195,7 +195,7 @@ describe('qualityHubTools', () => { it('adds wildcard warning when flags contain ? pattern', () => { spawnStub.returns(makeSpawnResult('run started', '', 0)); - const result = server.call('provar.qualityhub.testrun', { + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: ['--plan-name', 'Suite?Test'], }); @@ -206,7 +206,7 @@ describe('qualityHubTools', () => { it('does not add warning for exact plan name flags', () => { spawnStub.returns(makeSpawnResult('run started', '', 0)); - const result = server.call('provar.qualityhub.testrun', { + const result = server.call('provar_qualityhub_testrun', { target_org: 'myorg', flags: ['--plan-name', 'SmokeTests'], }); @@ -215,12 +215,12 @@ describe('qualityHubTools', () => { }); }); - // ── provar.qualityhub.testrun.report ───────────────────────────────────────── + // ── provar_qualityhub_testrun_report ───────────────────────────────────────── - describe('provar.qualityhub.testrun.report', () => { + describe('provar_qualityhub_testrun_report', () => { it('passes run_id in args', () => { spawnStub.returns(makeSpawnResult('{"status":"running"}', '', 0)); - server.call('provar.qualityhub.testrun.report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('--run-id')); assert.ok(args.includes('abc-123')); @@ -228,7 +228,7 @@ describe('qualityHubTools', () => { it('returns QH_REPORT_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'not found', 1)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -239,7 +239,7 @@ describe('qualityHubTools', () => { it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -250,7 +250,7 @@ describe('qualityHubTools', () => { describe('failure detection', () => { it('sets suggestion when JSON result.status is "FAILED"', () => { spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'FAILED' } }), '', 0)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -261,7 +261,7 @@ describe('qualityHubTools', () => { it('sets suggestion when JSON result.status is "FAIL"', () => { spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'FAIL' } }), '', 0)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -272,7 +272,7 @@ describe('qualityHubTools', () => { it('does NOT set suggestion when status is "RUNNING"', () => { spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'RUNNING' } }), '', 0)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -283,7 +283,7 @@ describe('qualityHubTools', () => { it('does NOT set suggestion when status is "PASSED"', () => { spawnStub.returns(makeSpawnResult(JSON.stringify({ result: { status: 'PASSED' } }), '', 0)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -298,7 +298,7 @@ describe('qualityHubTools', () => { spawnStub.returns( makeSpawnResult('{"message": "No failure detected in this output", "result": {"status": "PASSED"}}', '', 0) ); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -310,7 +310,7 @@ describe('qualityHubTools', () => { it('falls back to regex extraction when stdout is not valid JSON', () => { // Non-JSON output with "status": "FAILED" substring spawnStub.returns(makeSpawnResult('"status": "FAILED"', '', 0)); - const result = server.call('provar.qualityhub.testrun.report', { + const result = server.call('provar_qualityhub_testrun_report', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -324,12 +324,12 @@ describe('qualityHubTools', () => { }); }); - // ── provar.qualityhub.testrun.abort ────────────────────────────────────────── + // ── provar_qualityhub_testrun_abort ────────────────────────────────────────── - describe('provar.qualityhub.testrun.abort', () => { + describe('provar_qualityhub_testrun_abort', () => { it('passes run_id and abort subcommand', () => { spawnStub.returns(makeSpawnResult('aborted', '', 0)); - server.call('provar.qualityhub.testrun.abort', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); + server.call('provar_qualityhub_testrun_abort', { target_org: 'myorg', run_id: 'abc-123', flags: [] }); const args = spawnStub.firstCall.args[1] as string[]; assert.ok(args.includes('abort')); assert.ok(args.includes('abc-123')); @@ -337,7 +337,7 @@ describe('qualityHubTools', () => { it('returns QH_ABORT_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'abort failed', 1)); - const result = server.call('provar.qualityhub.testrun.abort', { + const result = server.call('provar_qualityhub_testrun_abort', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -348,7 +348,7 @@ describe('qualityHubTools', () => { it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testrun.abort', { + const result = server.call('provar_qualityhub_testrun_abort', { target_org: 'myorg', run_id: 'abc-123', flags: [], @@ -357,12 +357,12 @@ describe('qualityHubTools', () => { }); }); - // ── provar.qualityhub.testcase.retrieve ────────────────────────────────────── + // ── provar_qualityhub_testcase_retrieve ────────────────────────────────────── - describe('provar.qualityhub.testcase.retrieve', () => { + describe('provar_qualityhub_testcase_retrieve', () => { it('calls sf with testcase retrieve args', () => { spawnStub.returns(makeSpawnResult('[]', '', 0)); - const result = server.call('provar.qualityhub.testcase.retrieve', { + const result = server.call('provar_qualityhub_testcase_retrieve', { target_org: 'myorg', flags: ['--user-story', 'US-1'], }); @@ -376,14 +376,14 @@ describe('qualityHubTools', () => { it('returns QH_RETRIEVE_FAILED on non-zero exit', () => { spawnStub.returns(makeSpawnResult('', 'no records', 1)); - const result = server.call('provar.qualityhub.testcase.retrieve', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_testcase_retrieve', { target_org: 'myorg', flags: [] }); assert.ok(isError(result)); assert.equal(parseBody(result).error_code, 'QH_RETRIEVE_FAILED'); }); it('returns SF_NOT_FOUND on ENOENT', () => { spawnStub.returns(makeEnoentResult()); - const result = server.call('provar.qualityhub.testcase.retrieve', { target_org: 'myorg', flags: [] }); + const result = server.call('provar_qualityhub_testcase_retrieve', { target_org: 'myorg', flags: [] }); assert.equal(parseBody(result).error_code, 'SF_NOT_FOUND'); }); }); diff --git a/test/unit/mcp/rcaTools.test.ts b/test/unit/mcp/rcaTools.test.ts index 4f24e29c..71a839c4 100644 --- a/test/unit/mcp/rcaTools.test.ts +++ b/test/unit/mcp/rcaTools.test.ts @@ -108,14 +108,14 @@ function makeIncrementDir(base: string, index: number, junit?: string): string { return makeResultsDir(dir, junit); } -// ── provar.testrun.report.locate ─────────────────────────────────────────────── +// ── provar_testrun_report_locate ─────────────────────────────────────────────── -describe('provar.testrun.report.locate', () => { +describe('provar_testrun_report_locate', () => { // Test 1: explicit results_path → returns correct paths it('with explicit results_path returns correct paths', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: tmpDir, results_path: resultsDir, }); @@ -135,7 +135,7 @@ describe('provar.testrun.report.locate', () => { makeIncrementDir(resultsBase, 2, JUNIT_XML); makeIncrementDir(resultsBase, 3, JUNIT_XML); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: tmpDir, results_path: resultsBase, }); @@ -153,7 +153,7 @@ describe('provar.testrun.report.locate', () => { makeIncrementDir(resultsBase, 2, JUNIT_XML); makeIncrementDir(resultsBase, 5, JUNIT_XML); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: tmpDir, results_path: resultsBase, run_index: 2, @@ -180,7 +180,7 @@ describe('provar.testrun.report.locate', () => { // To guarantee the properties file path wins, supply it explicitly via results_path // so that step 1 (explicit) takes precedence — this tests the locate result structure // independently of resolution ordering. - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: tmpDir, results_path: resultsDir, }); @@ -203,7 +203,7 @@ describe('provar.testrun.report.locate', () => { 'utf-8' ); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: projectPath, }); @@ -224,7 +224,7 @@ describe('provar.testrun.report.locate', () => { const emptyProject = path.join(tmpDir, 'empty-project'); fs.mkdirSync(emptyProject, { recursive: true }); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: emptyProject, }); @@ -249,7 +249,7 @@ describe('provar.testrun.report.locate', () => { makeResultsDir(path.join(parent, 'Results(1)')); makeResultsDir(path.join(parent, 'Results(18)'), JUNIT_XML); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: parent, results_path: resultsBase, }); @@ -268,7 +268,7 @@ describe('provar.testrun.report.locate', () => { makeResultsDir(path.join(parent, 'Results(3)')); makeResultsDir(path.join(parent, 'Results(5)'), JUNIT_XML); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: parent, results_path: resultsBase, run_index: 3, @@ -286,7 +286,7 @@ describe('provar.testrun.report.locate', () => { fs.writeFileSync(path.join(resultsDir, 'LoginTest.testcase.html'), '', 'utf-8'); fs.writeFileSync(path.join(resultsDir, 'SearchTest.testcase.html'), '', 'utf-8'); - const result = server.call('provar.testrun.report.locate', { + const result = server.call('provar_testrun_report_locate', { project_path: tmpDir, results_path: resultsDir, }); @@ -298,14 +298,14 @@ describe('provar.testrun.report.locate', () => { }); }); -// ── provar.testrun.rca ───────────────────────────────────────────────────────── +// ── provar_testrun_rca ───────────────────────────────────────────────────────── -describe('provar.testrun.rca', () => { +describe('provar_testrun_rca', () => { // Test 5: locate_only → returns locate result, skips parsing it('with locate_only=true returns locate result and skips parsing', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, locate_only: true, @@ -321,7 +321,7 @@ describe('provar.testrun.rca', () => { it('when JUnit.xml missing returns run_in_progress: true', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results')); // no JUnit.xml - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -335,7 +335,7 @@ describe('provar.testrun.rca', () => { it('parses valid JUnit.xml and returns correct run_summary counts', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -355,7 +355,7 @@ describe('provar.testrun.rca', () => { it('classifies LOCATOR_STALE correctly from NoSuchElementException', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -372,7 +372,7 @@ describe('provar.testrun.rca', () => { it('classifies DRIVER_VERSION_MISMATCH before LOCATOR_STALE when both patterns could match', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), DRIVER_VERSION_JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -388,7 +388,7 @@ describe('provar.testrun.rca', () => { it('extracts page_object from failure message', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -419,7 +419,7 @@ describe('provar.testrun.rca', () => { // Current run (index 2) with same failure makeIncrementDir(resultsBase, 2, JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsBase, run_index: 2, @@ -450,7 +450,7 @@ describe('provar.testrun.rca', () => { // Current run (index 2) with SearchTest failing for the first time makeIncrementDir(resultsBase, 2, JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsBase, run_index: 2, @@ -482,7 +482,7 @@ describe('provar.testrun.rca', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), multiFailureJunit); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -498,7 +498,7 @@ describe('provar.testrun.rca', () => { it('classifies unrecognised failure text as UNKNOWN', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), UNKNOWN_JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -514,7 +514,7 @@ describe('provar.testrun.rca', () => { it('populates infrastructure_issues for DRIVER_VERSION_MISMATCH failures', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), DRIVER_VERSION_JUNIT_XML); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -539,7 +539,7 @@ describe('provar.testrun.rca', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), longJunit); - const result = server.call('provar.testrun.rca', { + const result = server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, }); @@ -560,7 +560,7 @@ describe('provar.testrun.rca', () => { `; const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-validation'), junit); - const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const body = parseText(server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir })); const failures = body['failures'] as Array>; assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_VALIDATION'); }); @@ -575,7 +575,7 @@ describe('provar.testrun.rca', () => { `; const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-picklist'), junit); - const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const body = parseText(server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir })); const failures = body['failures'] as Array>; assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_PICKLIST'); }); @@ -590,7 +590,7 @@ describe('provar.testrun.rca', () => { `; const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-trigger'), junit); - const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const body = parseText(server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir })); const failures = body['failures'] as Array>; assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_TRIGGER'); }); @@ -605,7 +605,7 @@ describe('provar.testrun.rca', () => { `; const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-infra-check'), junit); - const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const body = parseText(server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir })); const infra = body['infrastructure_issues'] as string[]; assert.ok( !infra.some((s) => s.includes('SALESFORCE_')), @@ -615,7 +615,7 @@ describe('provar.testrun.rca', () => { it('mode=rca is the default and produces existing RCA output shape', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'rca-default'), JUNIT_XML); - const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const body = parseText(server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir })); // Full RCA shape must be present assert.ok('run_summary' in body, 'run_summary should be present for mode=rca'); assert.ok('failures' in body, 'failures should be present'); @@ -624,13 +624,13 @@ describe('provar.testrun.rca', () => { }); }); -// ── provar.testrun.rca mode=failures ────────────────────────────────────────── +// ── provar_testrun_rca mode=failures ────────────────────────────────────────── -describe('provar.testrun.rca mode=failures', () => { +describe('provar_testrun_rca mode=failures', () => { it('returns lightweight failure array when JUnit.xml is present', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-mode'), JUNIT_XML); const body = parseText( - server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) ); assert.ok('failures' in body, 'failures should be present'); @@ -653,7 +653,7 @@ describe('provar.testrun.rca mode=failures', () => { it('returns empty array with warning when results dir has no JUnit.xml', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-empty')); const body = parseText( - server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) ); const failures = body['failures'] as unknown[]; @@ -666,7 +666,7 @@ describe('provar.testrun.rca mode=failures', () => { it('skipped tests are not included in failures list', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'failures-skip'), JUNIT_XML); const body = parseText( - server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) + server.call('provar_testrun_rca', { project_path: tmpDir, results_path: resultsDir, mode: 'failures' }) ); const failures = body['failures'] as Array>; const testItemIds = failures.map((f) => f['testItemId']); @@ -680,7 +680,7 @@ describe('provar.testrun.rca mode=failures', () => { registerTestRunRca(restrictedServer as never, { allowedPaths: [allowedDir] }); const outsideDir = makeResultsDir(path.join(tmpDir, 'outside'), JUNIT_XML); - const result = restrictedServer.call('provar.testrun.rca', { + const result = restrictedServer.call('provar_testrun_rca', { project_path: tmpDir, results_path: outsideDir, mode: 'failures', @@ -700,7 +700,7 @@ describe('provar.testrun.rca mode=failures', () => { fs.mkdirSync(allowedDir, { recursive: true }); registerTestRunRca(restrictedServer as never, { allowedPaths: [allowedDir] }); - const result = restrictedServer.call('provar.testrun.rca', { + const result = restrictedServer.call('provar_testrun_rca', { project_path: tmpDir, // tmpDir root is outside allowed subdir mode: 'failures', }); diff --git a/test/unit/mcp/testCaseGenerate.test.ts b/test/unit/mcp/testCaseGenerate.test.ts index e3729beb..3cac5233 100644 --- a/test/unit/mcp/testCaseGenerate.test.ts +++ b/test/unit/mcp/testCaseGenerate.test.ts @@ -65,12 +65,12 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.testcase.generate ─────────────────────────────────────────────────── +// ── provar_testcase_generate ─────────────────────────────────────────────────── -describe('provar.testcase.generate', () => { +describe('provar_testcase_generate', () => { describe('dry_run', () => { it('returns xml_content without writing to disk', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Login Test', steps: [], dry_run: true, @@ -86,7 +86,7 @@ describe('provar.testcase.generate', () => { it('does NOT write a file even when output_path is provided', () => { const outPath = path.join(tmpDir, 'LoginTest.testcase'); - server.call('provar.testcase.generate', { + server.call('provar_testcase_generate', { test_case_name: 'Login Test', steps: [], output_path: outPath, @@ -100,7 +100,7 @@ describe('provar.testcase.generate', () => { describe('generated XML content', () => { it('contains root element with name attribute', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Create Account', steps: [], dry_run: true, @@ -113,7 +113,7 @@ describe('provar.testcase.generate', () => { }); it('contains element', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'My Test', steps: [], dry_run: true, @@ -125,7 +125,7 @@ describe('provar.testcase.generate', () => { }); it('generates UUID v4 guids for testCase guid attribute', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'UUID Test', steps: [], dry_run: true, @@ -140,7 +140,7 @@ describe('provar.testcase.generate', () => { it('uses explicit test_case_id when provided', () => { const myId = 'my-explicit-id-123'; - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Explicit ID Test', test_case_id: myId, steps: [], @@ -153,7 +153,7 @@ describe('provar.testcase.generate', () => { }); it('includes steps with correct apiId and sequential testItemId', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Multi Step', steps: [ { api_id: 'UiConnect', name: 'Connect', attributes: {} }, @@ -174,7 +174,7 @@ describe('provar.testcase.generate', () => { }); it('reports step_count matching the number of steps', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Count Test', steps: [ { api_id: 'UiConnect', name: 'Step 1', attributes: {} }, @@ -188,7 +188,7 @@ describe('provar.testcase.generate', () => { }); it('includes validation field with is_valid and scores', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Validated Test', steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], dry_run: true, @@ -209,7 +209,7 @@ describe('provar.testcase.generate', () => { }); it('emits a TODO comment when no steps are provided', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'No Steps', steps: [], dry_run: true, @@ -221,7 +221,7 @@ describe('provar.testcase.generate', () => { }); it('escapes XML special characters in test_case_name', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Test & "Escape" ', steps: [], dry_run: true, @@ -236,7 +236,7 @@ describe('provar.testcase.generate', () => { }); it('escapes XML special characters in step api_id and name', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Escape Step Test', steps: [{ api_id: 'Api', name: 'Step & "Name"', attributes: {} }], dry_run: true, @@ -252,7 +252,7 @@ describe('provar.testcase.generate', () => { describe('writing to disk', () => { it('writes file when dry_run=false and output_path provided', () => { const outPath = path.join(tmpDir, 'Login.testcase'); - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Login', steps: [], output_path: outPath, @@ -266,7 +266,7 @@ describe('provar.testcase.generate', () => { }); it('does NOT write when dry_run=false but no output_path', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'No Path Test', steps: [], dry_run: false, @@ -281,7 +281,7 @@ describe('provar.testcase.generate', () => { const outPath = path.join(tmpDir, 'Existing.testcase'); fs.writeFileSync(outPath, '', 'utf-8'); - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Existing', steps: [], output_path: outPath, @@ -297,7 +297,7 @@ describe('provar.testcase.generate', () => { const outPath = path.join(tmpDir, 'Existing.testcase'); fs.writeFileSync(outPath, '', 'utf-8'); - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Existing', steps: [], output_path: outPath, @@ -312,7 +312,7 @@ describe('provar.testcase.generate', () => { it('creates parent directories as needed', () => { const outPath = path.join(tmpDir, 'tests', 'suite', 'Login.testcase'); - server.call('provar.testcase.generate', { + server.call('provar_testcase_generate', { test_case_name: 'Login', steps: [], output_path: outPath, @@ -329,7 +329,7 @@ describe('provar.testcase.generate', () => { const strictServer = new MockMcpServer(); registerTestCaseGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.testcase.generate', { + const result = strictServer.call('provar_testcase_generate', { test_case_name: 'Evil', steps: [], output_path: path.join(os.tmpdir(), 'evil.testcase'), @@ -346,7 +346,7 @@ describe('provar.testcase.generate', () => { const strictServer = new MockMcpServer(); registerTestCaseGenerate(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.testcase.generate', { + const result = strictServer.call('provar_testcase_generate', { test_case_name: 'Safe', steps: [], output_path: '/etc/evil.testcase', @@ -360,7 +360,7 @@ describe('provar.testcase.generate', () => { describe('idempotency_key', () => { it('echoes back the provided idempotency_key', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Idempotent', steps: [], idempotency_key: 'dedup-key-abc', @@ -374,7 +374,7 @@ describe('provar.testcase.generate', () => { describe('XML argument valueClass casing', () => { it('emits lowercase valueClass="string" not uppercase "String"', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'ValueClass Test', steps: [ { @@ -395,7 +395,7 @@ describe('provar.testcase.generate', () => { describe('target_uri — non-SF page object (ui:) nesting', () => { it('wraps steps in UiWithScreen when target_uri uses ?pageId= format', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Non-SF Login', steps: [{ api_id: 'UiDoAction', name: 'Enter username', attributes: { field: 'username' } }], target_uri: 'ui:pageobject:target?pageId=pageobjects.LoginPage', @@ -413,7 +413,7 @@ describe('provar.testcase.generate', () => { }); it('substeps clause uses testItemId=2, inner steps start at testItemId=3', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Non-SF Multi', steps: [ { api_id: 'UiDoAction', name: 'Step A', attributes: {} }, @@ -432,7 +432,7 @@ describe('provar.testcase.generate', () => { }); it('uses flat structure when target_uri starts with sf:', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'SF Target', steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], target_uri: 'sf:ui:target:Salesforce__Standard__Account', @@ -447,7 +447,7 @@ describe('provar.testcase.generate', () => { }); it('uses flat structure when target_uri is omitted', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'No URI', steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], dry_run: true, @@ -462,7 +462,7 @@ describe('provar.testcase.generate', () => { describe('D2 — uiTarget / uiLocator argument types', () => { it('emits class="uiTarget" uri="..." for the target argument', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'UI Target Test', steps: [ { @@ -479,11 +479,14 @@ describe('provar.testcase.generate', () => { const xml = parseText(result)['xml_content'] as string; assert.ok(xml.includes('class="uiTarget"'), 'Expected class="uiTarget"'); assert.ok(xml.includes('uri="sf:ui:target?'), 'Expected uri attribute with sf:ui:target value'); - assert.ok(!xml.includes('valueClass="string">sf:ui:target'), 'Must NOT emit uiTarget URI as a plain string value'); + assert.ok( + !xml.includes('valueClass="string">sf:ui:target'), + 'Must NOT emit uiTarget URI as a plain string value' + ); }); it('emits class="uiLocator" uri="..." for the locator argument', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'UI Locator Test', steps: [ { @@ -500,11 +503,14 @@ describe('provar.testcase.generate', () => { const xml = parseText(result)['xml_content'] as string; assert.ok(xml.includes('class="uiLocator"'), 'Expected class="uiLocator"'); assert.ok(xml.includes('uri="sf:ui:locator:'), 'Expected uri attribute with locator value'); - assert.ok(!xml.includes('valueClass="string">sf:ui:locator'), 'Must NOT emit locator URI as a plain string value'); + assert.ok( + !xml.includes('valueClass="string">sf:ui:locator'), + 'Must NOT emit locator URI as a plain string value' + ); }); it('uiTarget also applies inside UiWithScreen wrapper when target_uri is non-SF', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Non-SF With Target', steps: [], target_uri: 'ui:pageobject:target?pageId=pageobjects.LoginPage', @@ -515,13 +521,16 @@ describe('provar.testcase.generate', () => { const xml = parseText(result)['xml_content'] as string; assert.ok(xml.includes('class="uiTarget"'), 'Wrapper UiWithScreen target should use uiTarget class'); - assert.ok(xml.includes('uri="ui:pageobject:target?pageId=pageobjects.LoginPage"'), 'URI should appear as attribute'); + assert.ok( + xml.includes('uri="ui:pageobject:target?pageId=pageobjects.LoginPage"'), + 'URI should appear as attribute' + ); }); }); describe('D3 — SetValues / AssertValues use valueList/namedValues structure', () => { it('SetValues emits with ', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'SetValues Test', steps: [ { @@ -542,14 +551,11 @@ describe('provar.testcase.generate', () => { assert.ok(xml.includes(''), 'Expected namedValue for testCaseName'); assert.ok(xml.includes(''), 'Expected namedValue for testType'); assert.ok(xml.includes(''), 'Expected argument id="values"'); - assert.ok( - !xml.includes('testCaseName|TC_New'), - 'Must NOT emit pipe-delimited string for SetValues' - ); + assert.ok(!xml.includes('testCaseName|TC_New'), 'Must NOT emit pipe-delimited string for SetValues'); }); it('AssertValues uses flat argument structure (not valueList)', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'AssertValues Test', steps: [ { @@ -570,11 +576,9 @@ describe('provar.testcase.generate', () => { }); it('non-SetValues steps still use flat argument structure', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Flat Args Test', - steps: [ - { api_id: 'ApexCreateObject', name: 'Create record', attributes: { objectApiName: 'Opportunity' } }, - ], + steps: [{ api_id: 'ApexCreateObject', name: 'Create record', attributes: { objectApiName: 'Opportunity' } }], dry_run: true, overwrite: false, validate_after_edit: false, @@ -588,7 +592,7 @@ describe('provar.testcase.generate', () => { describe('D4 — Variable references use class="variable" with elements', () => { it('{VarName} emits class="variable" ', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Variable Ref Test', steps: [ { @@ -609,7 +613,7 @@ describe('provar.testcase.generate', () => { }); it('{Obj.Field} dotted path emits two elements', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Dotted Variable Test', steps: [ { @@ -629,7 +633,7 @@ describe('provar.testcase.generate', () => { }); it('variable reference also works inside SetValues namedValues', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'SetValues With Var', steps: [ { @@ -650,7 +654,7 @@ describe('provar.testcase.generate', () => { }); it('plain string values without braces are not treated as variable references', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'No Var Test', steps: [{ api_id: 'ApexCreateObject', name: 'Create', attributes: { Name: 'Literal Name' } }], dry_run: true, @@ -666,7 +670,7 @@ describe('provar.testcase.generate', () => { describe('D7 — Cleanup warning for ApexDeleteObject', () => { it('includes cleanup warning when ApexDeleteObject is in the step list', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Create and Delete', steps: [ { api_id: 'ApexCreateObject', name: 'Create record', attributes: { objectApiName: 'Account' } }, @@ -688,7 +692,7 @@ describe('provar.testcase.generate', () => { }); it('does NOT warn when no ApexDeleteObject steps are present', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'No Cleanup', steps: [{ api_id: 'ApexCreateObject', name: 'Create', attributes: {} }], dry_run: true, @@ -706,7 +710,7 @@ describe('provar.testcase.generate', () => { describe('validate_after_edit', () => { it('includes validation field when validate_after_edit=true (default)', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Validated', steps: [{ api_id: 'UiConnect', name: 'Connect', attributes: {} }], dry_run: true, @@ -720,7 +724,7 @@ describe('provar.testcase.generate', () => { }); it('omits validation field when validate_after_edit=false', () => { - const result = server.call('provar.testcase.generate', { + const result = server.call('provar_testcase_generate', { test_case_name: 'Skip Validation', steps: [], dry_run: true, diff --git a/test/unit/mcp/testCaseStepTools.test.ts b/test/unit/mcp/testCaseStepTools.test.ts index af46cfa7..fe85d803 100644 --- a/test/unit/mcp/testCaseStepTools.test.ts +++ b/test/unit/mcp/testCaseStepTools.test.ts @@ -74,12 +74,12 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.testcase.step.edit ────────────────────────────────────────────────── +// ── provar_testcase_step_edit ────────────────────────────────────────────────── -describe('provar.testcase.step.edit', () => { +describe('provar_testcase_step_edit', () => { // remove happy path it('mode=remove removes the target step and leaves file valid', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'remove', test_item_id: '2', @@ -101,7 +101,7 @@ describe('provar.testcase.step.edit', () => { // add happy path — after anchor it('mode=add inserts new step after anchor by default', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '1', @@ -127,7 +127,7 @@ describe('provar.testcase.step.edit', () => { // add before anchor it('mode=add with position=before inserts before anchor', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '2', @@ -145,7 +145,7 @@ describe('provar.testcase.step.edit', () => { // STEP_NOT_FOUND — remove it('mode=remove returns STEP_NOT_FOUND with all IDs when testItemId missing', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'remove', test_item_id: '999', @@ -167,7 +167,7 @@ describe('provar.testcase.step.edit', () => { // STEP_NOT_FOUND — add it('mode=add returns STEP_NOT_FOUND with all IDs when anchor missing', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '999', @@ -181,7 +181,7 @@ describe('provar.testcase.step.edit', () => { // INVALID_STEP_XML — step_xml contains no element it('mode=add returns INVALID_STEP_XML when step_xml contains no element', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '1', @@ -203,7 +203,7 @@ describe('provar.testcase.step.edit', () => { '' + ''; - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '1', @@ -218,7 +218,7 @@ describe('provar.testcase.step.edit', () => { // mode=add with missing step_xml it('mode=add returns MISSING_INPUT when step_xml is absent', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '1', @@ -241,7 +241,7 @@ describe('provar.testcase.step.edit', () => { const brokenStepXml = ''; - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'add', test_item_id: '1', @@ -265,7 +265,7 @@ describe('provar.testcase.step.edit', () => { registerAllTestCaseStepTools(restrictedServer as never, { allowedPaths: [path.join(tmpDir, 'allowed')] }); fs.mkdirSync(path.join(tmpDir, 'allowed'), { recursive: true }); - const result = restrictedServer.call('provar.testcase.step.edit', { + const result = restrictedServer.call('provar_testcase_step_edit', { test_case_path: tcPath, // tcPath is in tmpDir root, not in allowed subdir mode: 'remove', test_item_id: '1', @@ -281,7 +281,7 @@ describe('provar.testcase.step.edit', () => { // validate_after_edit=true with valid result includes validation in response it('returns validation result in response when validate_after_edit=true and edit is valid', () => { - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: tcPath, mode: 'remove', test_item_id: '2', @@ -299,7 +299,7 @@ describe('provar.testcase.step.edit', () => { // FILE_NOT_FOUND it('returns FILE_NOT_FOUND when test case does not exist', () => { const missing = path.join(tmpDir, 'nonexistent.testcase'); - const result = server.call('provar.testcase.step.edit', { + const result = server.call('provar_testcase_step_edit', { test_case_path: missing, mode: 'remove', test_item_id: '1', diff --git a/test/unit/mcp/testPlanTools.test.ts b/test/unit/mcp/testPlanTools.test.ts index 50021882..e72eb7cd 100644 --- a/test/unit/mcp/testPlanTools.test.ts +++ b/test/unit/mcp/testPlanTools.test.ts @@ -96,14 +96,14 @@ afterEach(() => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); -// ── provar.testplan.create ──────────────────────────────────────────────────── +// ── provar_testplan_create ──────────────────────────────────────────────────── -describe('provar.testplan.create', () => { +describe('provar_testplan_create', () => { describe('happy path', () => { it('creates plan directory and .planitem, returns expected fields', () => { makeProject(projectDir); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'MyNewPlan', overwrite: false, @@ -128,7 +128,7 @@ describe('provar.testplan.create', () => { it('response includes next_steps guidance', () => { makeProject(projectDir); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'GuidedPlan', overwrite: false, @@ -144,7 +144,7 @@ describe('provar.testplan.create', () => { it('returns created=false and does not write to disk', () => { makeProject(projectDir); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'DryPlan', overwrite: false, @@ -166,7 +166,7 @@ describe('provar.testplan.create', () => { it('returns NOT_A_PROJECT when .testproject is missing', () => { fs.mkdirSync(projectDir, { recursive: true }); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'MyPlan', overwrite: false, @@ -181,7 +181,7 @@ describe('provar.testplan.create', () => { makeProject(projectDir); makePlan(projectDir, 'ExistingPlan'); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'ExistingPlan', overwrite: false, @@ -196,7 +196,7 @@ describe('provar.testplan.create', () => { makeProject(projectDir); makePlan(projectDir, 'ExistingPlan'); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'ExistingPlan', overwrite: true, @@ -210,7 +210,7 @@ describe('provar.testplan.create', () => { it('returns INVALID_PLAN_NAME for dot-segment plan_name (..)', () => { makeProject(projectDir); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: '..', overwrite: false, @@ -224,7 +224,7 @@ describe('provar.testplan.create', () => { it('returns INVALID_PLAN_NAME for plan_name with path separators', () => { makeProject(projectDir); - const result = server.call('provar.testplan.create', { + const result = server.call('provar_testplan_create', { project_path: projectDir, plan_name: 'sub/plan', overwrite: false, @@ -239,7 +239,7 @@ describe('provar.testplan.create', () => { const strictServer = new MockMcpServer(); registerAllTestPlanTools(strictServer as never, { allowedPaths: [tmpDir] }); - const result = strictServer.call('provar.testplan.create', { + const result = strictServer.call('provar_testplan_create', { project_path: path.join(os.tmpdir(), 'outside-project'), plan_name: 'MyPlan', overwrite: false, @@ -253,9 +253,9 @@ describe('provar.testplan.create', () => { }); }); -// ── provar.testplan.add-instance ─────────────────────────────────────────────── +// ── provar_testplan_add-instance ─────────────────────────────────────────────── -describe('provar.testplan.add-instance', () => { +describe('provar_testplan_add-instance', () => { describe('happy path', () => { it('writes a .testinstance file and returns expected fields', () => { makeProject(projectDir); @@ -267,7 +267,7 @@ describe('provar.testplan.add-instance', () => { // Create .planitem in suite dir too (not strictly required by the tool, but realistic) fs.writeFileSync(path.join(suiteDir, '.planitem'), '', 'utf-8'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -293,7 +293,7 @@ describe('provar.testplan.add-instance', () => { makeTestCase(path.join(projectDir, 'tests', 'Root.testcase')); makePlan(projectDir, 'MyPlan'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/Root.testcase', plan_name: 'MyPlan', @@ -313,7 +313,7 @@ describe('provar.testplan.add-instance', () => { makeTestCase(path.join(projectDir, 'tests', 'MyTest.testcase')); makePlan(projectDir, 'MyPlan'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -336,7 +336,7 @@ describe('provar.testplan.add-instance', () => { fs.mkdirSync(projectDir, { recursive: true }); // No .testproject written - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -353,7 +353,7 @@ describe('provar.testplan.add-instance', () => { makePlan(projectDir, 'MyPlan'); // No testcase file created - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/Missing.testcase', plan_name: 'MyPlan', @@ -371,7 +371,7 @@ describe('provar.testplan.add-instance', () => { fs.mkdirSync(path.join(projectDir, 'tests'), { recursive: true }); fs.writeFileSync(path.join(projectDir, 'tests', 'MyTest.txt'), 'content', 'utf-8'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.txt', plan_name: 'MyPlan', @@ -389,7 +389,7 @@ describe('provar.testplan.add-instance', () => { makePlan(projectDir, 'MyPlan'); // Do NOT create the suite dir - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -409,7 +409,7 @@ describe('provar.testplan.add-instance', () => { // Pre-create the instance file fs.writeFileSync(path.join(projectDir, 'plans', 'MyPlan', 'MyTest.testinstance'), 'old', 'utf-8'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -427,7 +427,7 @@ describe('provar.testplan.add-instance', () => { makePlan(projectDir, 'MyPlan'); fs.writeFileSync(path.join(projectDir, 'plans', 'MyPlan', 'MyTest.testinstance'), 'old', 'utf-8'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/MyTest.testcase', plan_name: 'MyPlan', @@ -446,7 +446,7 @@ describe('provar.testplan.add-instance', () => { makeTestCase(path.join(projectDir, 'tests', 'TC.testcase'), 'my-registry-id'); makePlan(projectDir, 'P'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/TC.testcase', plan_name: 'P', @@ -468,7 +468,7 @@ describe('provar.testplan.add-instance', () => { ); makePlan(projectDir, 'P'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/TC.testcase', plan_name: 'P', @@ -490,7 +490,7 @@ describe('provar.testplan.add-instance', () => { ); makePlan(projectDir, 'P'); - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/TC.testcase', plan_name: 'P', @@ -516,7 +516,7 @@ describe('provar.testplan.add-instance', () => { makePlan(projectDir, 'P'); // Pass path with backslashes (Windows-style) - const result = server.call('provar.testplan.add-instance', { + const result = server.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests\\SubFolder\\TC.testcase', plan_name: 'P', @@ -531,15 +531,15 @@ describe('provar.testplan.add-instance', () => { }); }); -// ── provar.testplan.create-suite ─────────────────────────────────────────────── +// ── provar_testplan_create-suite ─────────────────────────────────────────────── -describe('provar.testplan.create-suite', () => { +describe('provar_testplan_create-suite', () => { describe('happy path', () => { it('creates suite directory and .planitem, returns expected fields', () => { makeProject(projectDir); makePlan(projectDir, 'MyPlan'); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'MySuite', @@ -564,7 +564,7 @@ describe('provar.testplan.create-suite', () => { const parentDir = path.join(projectDir, 'plans', 'MyPlan', 'Parent'); fs.mkdirSync(parentDir, { recursive: true }); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'Child', @@ -583,7 +583,7 @@ describe('provar.testplan.create-suite', () => { makeProject(projectDir); makePlan(projectDir, 'MyPlan'); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'DryRunSuite', @@ -604,7 +604,7 @@ describe('provar.testplan.create-suite', () => { it('returns NOT_A_PROJECT when .testproject is missing', () => { fs.mkdirSync(projectDir, { recursive: true }); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'MySuite', @@ -619,7 +619,7 @@ describe('provar.testplan.create-suite', () => { makeProject(projectDir); // No plan created - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'NonExistentPlan', suite_name: 'MySuite', @@ -635,7 +635,7 @@ describe('provar.testplan.create-suite', () => { // Create plan dir but no .planitem fs.mkdirSync(path.join(projectDir, 'plans', 'MyPlan'), { recursive: true }); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'MySuite', @@ -652,7 +652,7 @@ describe('provar.testplan.create-suite', () => { // Pre-create the suite dir fs.mkdirSync(path.join(projectDir, 'plans', 'MyPlan', 'AlreadyExists'), { recursive: true }); - const result = server.call('provar.testplan.create-suite', { + const result = server.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'MyPlan', suite_name: 'AlreadyExists', @@ -665,9 +665,9 @@ describe('provar.testplan.create-suite', () => { }); }); -// ── provar.testplan.remove-instance ─────────────────────────────────────────── +// ── provar_testplan_remove-instance ─────────────────────────────────────────── -describe('provar.testplan.remove-instance', () => { +describe('provar_testplan_remove-instance', () => { describe('happy path', () => { it('removes the .testinstance file and returns expected fields', () => { makeProject(projectDir); @@ -675,7 +675,7 @@ describe('provar.testplan.remove-instance', () => { const instancePath = path.join(projectDir, 'plans', 'MyPlan', 'MyTest.testinstance'); fs.writeFileSync(instancePath, '', 'utf-8'); - const result = server.call('provar.testplan.remove-instance', { + const result = server.call('provar_testplan_remove-instance', { project_path: projectDir, instance_path: 'plans/MyPlan/MyTest.testinstance', dry_run: false, @@ -696,7 +696,7 @@ describe('provar.testplan.remove-instance', () => { const instancePath = path.join(projectDir, 'plans', 'MyPlan', 'MyTest.testinstance'); fs.writeFileSync(instancePath, '', 'utf-8'); - const result = server.call('provar.testplan.remove-instance', { + const result = server.call('provar_testplan_remove-instance', { project_path: projectDir, instance_path: 'plans/MyPlan/MyTest.testinstance', dry_run: true, @@ -714,7 +714,7 @@ describe('provar.testplan.remove-instance', () => { it('returns INVALID_PATH when instance_path does not end with .testinstance', () => { makeProject(projectDir); - const result = server.call('provar.testplan.remove-instance', { + const result = server.call('provar_testplan_remove-instance', { project_path: projectDir, instance_path: 'plans/MyPlan/MyTest.testcase', dry_run: false, @@ -727,7 +727,7 @@ describe('provar.testplan.remove-instance', () => { it('returns FILE_NOT_FOUND when instance file does not exist', () => { makeProject(projectDir); - const result = server.call('provar.testplan.remove-instance', { + const result = server.call('provar_testplan_remove-instance', { project_path: projectDir, instance_path: 'plans/MyPlan/Missing.testinstance', dry_run: false, @@ -749,7 +749,7 @@ describe('registerAllTestPlanTools', () => { makeProject(projectDir); assert.doesNotThrow(() => { - freshServer.call('provar.testplan.create', { + freshServer.call('provar_testplan_create', { project_path: projectDir, plan_name: 'P', overwrite: false, @@ -757,7 +757,7 @@ describe('registerAllTestPlanTools', () => { }); }); assert.doesNotThrow(() => { - freshServer.call('provar.testplan.add-instance', { + freshServer.call('provar_testplan_add-instance', { project_path: projectDir, test_case_path: 'tests/X.testcase', plan_name: 'P', @@ -766,7 +766,7 @@ describe('registerAllTestPlanTools', () => { }); }); assert.doesNotThrow(() => { - freshServer.call('provar.testplan.create-suite', { + freshServer.call('provar_testplan_create-suite', { project_path: projectDir, plan_name: 'P', suite_name: 'S', @@ -774,7 +774,7 @@ describe('registerAllTestPlanTools', () => { }); }); assert.doesNotThrow(() => { - freshServer.call('provar.testplan.remove-instance', { + freshServer.call('provar_testplan_remove-instance', { project_path: projectDir, instance_path: 'plans/P/X.testinstance', dry_run: true, diff --git a/test/unit/mcp/testPlanValidate.test.ts b/test/unit/mcp/testPlanValidate.test.ts index e255f3e4..06db102a 100644 --- a/test/unit/mcp/testPlanValidate.test.ts +++ b/test/unit/mcp/testPlanValidate.test.ts @@ -40,7 +40,7 @@ function isError(result: unknown): boolean { } function hasViolation(result: unknown, ruleId: string): boolean { - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; return violations.some((v) => v.rule_id === ruleId); } @@ -49,8 +49,8 @@ function hasViolation(result: unknown, ruleId: string): boolean { const G = { tc1: '550e8400-e29b-41d4-a716-446655440001', tc2: '550e8400-e29b-41d4-a716-446655440002', - s1: '550e8400-e29b-41d4-a716-446655440011', - s2: '550e8400-e29b-41d4-a716-446655440012', + s1: '550e8400-e29b-41d4-a716-446655440011', + s2: '550e8400-e29b-41d4-a716-446655440012', }; function makeXml(tcGuid: string, stepGuid: string, id: string): string { @@ -64,7 +64,7 @@ function makeXml(tcGuid: string, stepGuid: string, id: string): string { ].join('\n'); } -const TC_LOGIN = { name: 'LoginTest.testcase', xml_content: makeXml(G.tc1, G.s1, 'tc-001') }; +const TC_LOGIN = { name: 'LoginTest.testcase', xml_content: makeXml(G.tc1, G.s1, 'tc-001') }; const TC_LOGOUT = { name: 'LogoutTest.testcase', xml_content: makeXml(G.tc2, G.s2, 'tc-002') }; // A suite with one test case (avoids SUITE-EMPTY-001 inside plan tests) @@ -94,12 +94,12 @@ beforeEach(() => { registerTestPlanValidate(server as never); }); -// ── provar.testplan.validate ────────────────────────────────────────────────── +// ── provar_testplan_validate ────────────────────────────────────────────────── -describe('provar.testplan.validate', () => { +describe('provar_testplan_validate', () => { describe('happy path', () => { it('returns a result (not an error) for a valid non-empty plan', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MyPlan', test_suites: [SUITE_A, SUITE_B], metadata: fullMeta(), @@ -112,7 +112,7 @@ describe('provar.testplan.validate', () => { }); it('quality_score is between 0 and 100', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MyPlan', test_suites: [SUITE_A], metadata: fullMeta(), @@ -123,7 +123,7 @@ describe('provar.testplan.validate', () => { }); it('returns requestId in the response', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MyPlan', test_suites: [SUITE_A], }); @@ -133,7 +133,7 @@ describe('provar.testplan.validate', () => { }); it('includes a summary with total_test_cases and total_violations', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MyPlan', test_suites: [SUITE_A, SUITE_B], metadata: fullMeta(), @@ -147,14 +147,14 @@ describe('provar.testplan.validate', () => { describe('PLAN-EMPTY-001 — empty plan', () => { it('triggers PLAN-EMPTY-001 when plan has no suites and no test_cases', () => { - const result = server.call('provar.testplan.validate', { plan_name: 'EmptyPlan' }); + const result = server.call('provar_testplan_validate', { plan_name: 'EmptyPlan' }); assert.equal(isError(result), false); assert.ok(hasViolation(result, 'PLAN-EMPTY-001'), 'Expected PLAN-EMPTY-001'); }); it('triggers PLAN-EMPTY-001 when test_suites is an empty array', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'EmptyPlan', test_suites: [], }); @@ -163,7 +163,7 @@ describe('provar.testplan.validate', () => { }); it('does NOT trigger PLAN-EMPTY-001 when plan has suites', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'NonEmptyPlan', test_suites: [SUITE_A], }); @@ -174,7 +174,7 @@ describe('provar.testplan.validate', () => { describe('PLAN-DUP-001 — duplicate suite names', () => { it('triggers PLAN-DUP-001 when two suites share the same name', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'DupPlan', test_suites: [ { name: 'AccountSuite', test_cases: [TC_LOGIN] }, @@ -186,7 +186,7 @@ describe('provar.testplan.validate', () => { }); it('does NOT trigger PLAN-DUP-001 for distinct suite names', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'GoodPlan', test_suites: [SUITE_A, SUITE_B], }); @@ -197,7 +197,7 @@ describe('provar.testplan.validate', () => { describe('PLAN-SIZE-001 — oversized plan (>20 suites)', () => { it('triggers PLAN-SIZE-001 when test_suite_count exceeds 20', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'HugePlan', test_suites: [SUITE_A], test_suite_count: 21, @@ -207,7 +207,7 @@ describe('provar.testplan.validate', () => { }); it('does NOT trigger PLAN-SIZE-001 when test_suite_count is exactly 20', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'BoundaryPlan', test_suites: [SUITE_A], test_suite_count: 20, @@ -222,7 +222,7 @@ describe('provar.testplan.validate', () => { test_cases: [TC_LOGIN], })); - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'HugePlan', test_suites: suites, }); @@ -236,7 +236,7 @@ describe('provar.testplan.validate', () => { const meta = fullMeta(); delete meta['objectives']; - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MetaPlan', test_suites: [SUITE_A], metadata: meta, @@ -249,7 +249,7 @@ describe('provar.testplan.validate', () => { const meta = fullMeta(); delete meta['in_scope']; - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MetaPlan', test_suites: [SUITE_A], metadata: meta, @@ -262,7 +262,7 @@ describe('provar.testplan.validate', () => { const meta = fullMeta(); delete meta['environments']; - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'MetaPlan', test_suites: [SUITE_A], metadata: meta, @@ -272,7 +272,7 @@ describe('provar.testplan.validate', () => { }); it('does NOT trigger any PLAN-META-* when all metadata fields are provided', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'FullPlan', test_suites: [SUITE_A], metadata: fullMeta(), @@ -280,11 +280,15 @@ describe('provar.testplan.validate', () => { const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; const metaViolations = violations.filter((v) => v.rule_id.startsWith('PLAN-META-')); - assert.equal(metaViolations.length, 0, `Unexpected PLAN-META violations: ${metaViolations.map((v) => v.rule_id).join(', ')}`); + assert.equal( + metaViolations.length, + 0, + `Unexpected PLAN-META violations: ${metaViolations.map((v) => v.rule_id).join(', ')}` + ); }); it('triggers all PLAN-META-* violations when no metadata is provided', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'NoMetaPlan', test_suites: [SUITE_A], }); @@ -293,20 +297,23 @@ describe('provar.testplan.validate', () => { const metaRuleIds = violations.filter((v) => v.rule_id.startsWith('PLAN-META-')).map((v) => v.rule_id); // At least objectives, in_scope, testing_methodology, acceptance_criteria, environments, // test_data_strategy, risks — 7 rules - assert.ok(metaRuleIds.length >= 7, `Expected >=7 PLAN-META violations, got ${metaRuleIds.length}: ${metaRuleIds.join(', ')}`); + assert.ok( + metaRuleIds.length >= 7, + `Expected >=7 PLAN-META violations, got ${metaRuleIds.length}: ${metaRuleIds.join(', ')}` + ); }); }); describe('xml_content alias — xml field accepted', () => { it('accepts xml field as alias for xml_content in test cases', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'AliasPlan', - test_suites: [{ - name: 'AccountSuite', - test_cases: [ - { name: 'LoginTest.testcase', xml: makeXml(G.tc1, G.s1, 'tc-001') }, - ], - }], + test_suites: [ + { + name: 'AccountSuite', + test_cases: [{ name: 'LoginTest.testcase', xml: makeXml(G.tc1, G.s1, 'tc-001') }], + }, + ], }); assert.equal(isError(result), false); @@ -316,7 +323,7 @@ describe('provar.testplan.validate', () => { describe('quality_threshold', () => { it('uses default threshold of 80 when not provided', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'ThresholdPlan', test_suites: [SUITE_A], }); @@ -325,7 +332,7 @@ describe('provar.testplan.validate', () => { }); it('accepts a custom quality_threshold', () => { - const result = server.call('provar.testplan.validate', { + const result = server.call('provar_testplan_validate', { plan_name: 'ThresholdPlan', test_suites: [SUITE_A], quality_threshold: 95, diff --git a/test/unit/mcp/testSuiteValidate.test.ts b/test/unit/mcp/testSuiteValidate.test.ts index df4c4913..335086c6 100644 --- a/test/unit/mcp/testSuiteValidate.test.ts +++ b/test/unit/mcp/testSuiteValidate.test.ts @@ -44,8 +44,8 @@ function isError(result: unknown): boolean { const G = { tc1: '550e8400-e29b-41d4-a716-446655440001', tc2: '550e8400-e29b-41d4-a716-446655440002', - s1: '550e8400-e29b-41d4-a716-446655440011', - s2: '550e8400-e29b-41d4-a716-446655440012', + s1: '550e8400-e29b-41d4-a716-446655440011', + s2: '550e8400-e29b-41d4-a716-446655440012', }; function makeXml(tcGuid: string, stepGuid: string, id: string): string { @@ -59,11 +59,11 @@ function makeXml(tcGuid: string, stepGuid: string, id: string): string { ].join('\n'); } -const TC_LOGIN = { name: 'LoginTest.testcase', xml_content: makeXml(G.tc1, G.s1, 'tc-001') }; +const TC_LOGIN = { name: 'LoginTest.testcase', xml_content: makeXml(G.tc1, G.s1, 'tc-001') }; const TC_LOGOUT = { name: 'LogoutTest.testcase', xml_content: makeXml(G.tc2, G.s2, 'tc-002') }; // Same test cases using the `xml` alias -const TC_LOGIN_ALIAS = { name: 'LoginTest.testcase', xml: makeXml(G.tc1, G.s1, 'tc-001') }; +const TC_LOGIN_ALIAS = { name: 'LoginTest.testcase', xml: makeXml(G.tc1, G.s1, 'tc-001') }; const TC_LOGOUT_ALIAS = { name: 'LogoutTest.testcase', xml: makeXml(G.tc2, G.s2, 'tc-002') }; // ── Test setup ───────────────────────────────────────────────────────────────── @@ -75,12 +75,12 @@ beforeEach(() => { registerTestSuiteValidate(server as never); }); -// ── provar.testsuite.validate ───────────────────────────────────────────────── +// ── provar_testsuite_validate ───────────────────────────────────────────────── -describe('provar.testsuite.validate', () => { +describe('provar_testsuite_validate', () => { describe('happy path', () => { it('returns a result (not an error) for a valid non-empty suite', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'AccountSuite', test_cases: [TC_LOGIN, TC_LOGOUT], }); @@ -92,7 +92,7 @@ describe('provar.testsuite.validate', () => { }); it('quality_score is between 0 and 100', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'AccountSuite', test_cases: [TC_LOGIN, TC_LOGOUT], }); @@ -103,7 +103,7 @@ describe('provar.testsuite.validate', () => { }); it('returns requestId in the response', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'MySuite', test_cases: [TC_LOGIN], }); @@ -113,7 +113,7 @@ describe('provar.testsuite.validate', () => { }); it('includes a summary object with totals', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'AccountSuite', test_cases: [TC_LOGIN, TC_LOGOUT], }); @@ -127,72 +127,81 @@ describe('provar.testsuite.validate', () => { describe('SUITE-EMPTY-001 — empty suite', () => { it('triggers SUITE-EMPTY-001 when suite has no test_cases and no child_suites', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'EmptySuite', }); assert.equal(isError(result), false); const body = parseText(result); const violations = body['violations'] as Array<{ rule_id: string }>; - assert.ok(violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), 'Expected SUITE-EMPTY-001'); + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), + 'Expected SUITE-EMPTY-001' + ); }); it('triggers SUITE-EMPTY-001 when test_cases is an empty array', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'EmptySuite', test_cases: [], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); - assert.ok(violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), 'Expected SUITE-EMPTY-001'); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), + 'Expected SUITE-EMPTY-001' + ); }); it('does NOT trigger SUITE-EMPTY-001 when suite has test_cases', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'NonEmptySuite', test_cases: [TC_LOGIN], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; assert.ok(!violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), 'Did not expect SUITE-EMPTY-001'); }); it('does NOT trigger SUITE-EMPTY-001 when suite has child_suites', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'ParentSuite', child_suites: [{ name: 'ChildSuite', test_cases: [TC_LOGIN] }], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; assert.ok(!violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), 'Did not expect SUITE-EMPTY-001'); }); }); describe('SUITE-DUP-001 — duplicate test case names', () => { it('triggers SUITE-DUP-001 when two test cases share the same name', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'DupSuite', test_cases: [TC_LOGIN, { name: 'LoginTest.testcase', xml_content: makeXml(G.tc2, G.s2, 'tc-dup') }], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); - assert.ok(violations.some((v) => v.rule_id === 'SUITE-DUP-001'), 'Expected SUITE-DUP-001'); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-DUP-001'), + 'Expected SUITE-DUP-001' + ); }); it('does NOT trigger SUITE-DUP-001 for distinct test case names', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'UniqSuite', test_cases: [TC_LOGIN, TC_LOGOUT], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; assert.ok(!violations.some((v) => v.rule_id === 'SUITE-DUP-001'), 'Did not expect SUITE-DUP-001'); }); }); describe('SUITE-DUP-002 — duplicate child suite names', () => { it('triggers SUITE-DUP-002 when two child suites share the same name', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'ParentSuite', child_suites: [ { name: 'ChildA', test_cases: [TC_LOGIN] }, @@ -200,31 +209,37 @@ describe('provar.testsuite.validate', () => { ], }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); - assert.ok(violations.some((v) => v.rule_id === 'SUITE-DUP-002'), 'Expected SUITE-DUP-002'); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-DUP-002'), + 'Expected SUITE-DUP-002' + ); }); }); describe('SUITE-SIZE-001 — oversized suite (>75 test cases)', () => { it('triggers SUITE-SIZE-001 when test_case_count exceeds 75', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'HugeSuite', test_cases: [TC_LOGIN], test_case_count: 76, }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); - assert.ok(violations.some((v) => v.rule_id === 'SUITE-SIZE-001'), 'Expected SUITE-SIZE-001'); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-SIZE-001'), + 'Expected SUITE-SIZE-001' + ); }); it('does NOT trigger SUITE-SIZE-001 when test_case_count is exactly 75', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'BoundarySuite', test_cases: [TC_LOGIN], test_case_count: 75, }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; assert.ok(!violations.some((v) => v.rule_id === 'SUITE-SIZE-001'), 'Did not expect SUITE-SIZE-001 at exactly 75'); }); @@ -239,19 +254,22 @@ describe('provar.testsuite.validate', () => { ), })); - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'HugeSuite', test_cases: cases, }); - const violations = (parseText(result)['violations'] as Array<{ rule_id: string }>); - assert.ok(violations.some((v) => v.rule_id === 'SUITE-SIZE-001'), 'Expected SUITE-SIZE-001 from counted cases'); + const violations = parseText(result)['violations'] as Array<{ rule_id: string }>; + assert.ok( + violations.some((v) => v.rule_id === 'SUITE-SIZE-001'), + 'Expected SUITE-SIZE-001 from counted cases' + ); }); }); describe('xml_content alias — xml field accepted', () => { it('accepts xml field as alias for xml_content and validates correctly', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'AliasSuite', test_cases: [TC_LOGIN_ALIAS, TC_LOGOUT_ALIAS], }); @@ -261,13 +279,16 @@ describe('provar.testsuite.validate', () => { assert.ok((body['quality_score'] as number) >= 0); // Should have no empty-suite violation since TCs are present const violations = body['violations'] as Array<{ rule_id: string }>; - assert.ok(!violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), 'xml alias should be accepted as valid content'); + assert.ok( + !violations.some((v) => v.rule_id === 'SUITE-EMPTY-001'), + 'xml alias should be accepted as valid content' + ); }); }); describe('child suites', () => { it('validates nested child suites and includes them in test_suites', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'ParentSuite', child_suites: [ { name: 'ChildA', test_cases: [TC_LOGIN] }, @@ -283,25 +304,28 @@ describe('provar.testsuite.validate', () => { it('quality_score reflects violations in child suites', () => { // Parent with a child that is empty → SUITE-EMPTY-001 in child - const resultWithEmpty = server.call('provar.testsuite.validate', { + const resultWithEmpty = server.call('provar_testsuite_validate', { suite_name: 'ParentSuite', child_suites: [{ name: 'EmptyChild', test_cases: [] }], }); - const resultHealthy = server.call('provar.testsuite.validate', { + const resultHealthy = server.call('provar_testsuite_validate', { suite_name: 'ParentSuite', child_suites: [{ name: 'HealthyChild', test_cases: [TC_LOGIN] }], }); - const emptyScore = (parseText(resultWithEmpty) ['quality_score'] as number); - const healthyScore = (parseText(resultHealthy)['quality_score'] as number); - assert.ok(emptyScore <= healthyScore, `Empty-child score (${emptyScore}) should be <= healthy score (${healthyScore})`); + const emptyScore = parseText(resultWithEmpty)['quality_score'] as number; + const healthyScore = parseText(resultHealthy)['quality_score'] as number; + assert.ok( + emptyScore <= healthyScore, + `Empty-child score (${emptyScore}) should be <= healthy score (${healthyScore})` + ); }); }); describe('quality_threshold', () => { it('uses default threshold of 80 when not specified', () => { // Just verify no error and score is present - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'ThresholdDefault', test_cases: [TC_LOGIN], }); @@ -310,7 +334,7 @@ describe('provar.testsuite.validate', () => { }); it('accepts a custom quality_threshold', () => { - const result = server.call('provar.testsuite.validate', { + const result = server.call('provar_testsuite_validate', { suite_name: 'ThresholdCustom', test_cases: [TC_LOGIN], quality_threshold: 90, From d16ea2e255e1bfa05d3330ac181b8ec9ffc7ff4d Mon Sep 17 00:00:00 2001 From: Michael Dailey Date: Tue, 5 May 2026 15:01:13 -0500 Subject: [PATCH 2/2] PDX-0: feat(mcp): migrate all 41 tools from server.tool() to server.registerTool() with human-readable titles RCA: server.tool() is deprecated in MCP SDK v1.8+; tools lacked human-readable labels in the Claude Code /mcp picker, displaying raw underscore names instead of descriptive titles Fix: migrated all 41 tool registrations to server.registerTool(name, {title, description, inputSchema}, handler) with approved title strings; added registerTool() method to all 17 MockMcpServer test classes and the CapturingServer in testCaseValidate tests Co-Authored-By: Claude Sonnet 4.6 --- src/mcp/server.ts | 10 +- src/mcp/tools/antTools.ts | 319 +++++++++--------- src/mcp/tools/automationTools.ts | 201 ++++++----- src/mcp/tools/connectionTools.ts | 25 +- src/mcp/tools/defectTools.ts | 35 +- src/mcp/tools/nitroXTools.ts | 174 ++++++---- src/mcp/tools/pageObjectGenerate.ts | 84 ++--- src/mcp/tools/pageObjectValidate.ts | 20 +- src/mcp/tools/projectInspect.ts | 31 +- src/mcp/tools/projectValidateFromPath.ts | 125 +++---- src/mcp/tools/propertiesTools.ts | 89 ++--- src/mcp/tools/qualityHubApiTools.ts | 85 ++--- src/mcp/tools/qualityHubTools.ts | 103 +++--- src/mcp/tools/rcaTools.ts | 108 +++--- src/mcp/tools/testCaseGenerate.ts | 55 +-- src/mcp/tools/testCaseStepTools.ts | 73 ++-- src/mcp/tools/testCaseValidate.ts | 14 +- src/mcp/tools/testPlanTools.ts | 168 ++++----- src/mcp/tools/testPlanValidate.ts | 40 ++- src/mcp/tools/testSuiteValidate.ts | 44 +-- test/unit/mcp/antTools.test.ts | 4 + test/unit/mcp/automationTools.test.ts | 4 + test/unit/mcp/connectionTools.test.ts | 4 + test/unit/mcp/defectTools.test.ts | 4 + test/unit/mcp/nitroXTools.test.ts | 4 + test/unit/mcp/pageObjectGenerate.test.ts | 4 + test/unit/mcp/projectValidateFromPath.test.ts | 4 + test/unit/mcp/propertiesTools.test.ts | 4 + test/unit/mcp/qualityHubApiTools.test.ts | 4 + test/unit/mcp/qualityHubTools.test.ts | 4 + test/unit/mcp/rcaTools.test.ts | 4 + test/unit/mcp/testCaseGenerate.test.ts | 4 + test/unit/mcp/testCaseStepTools.test.ts | 4 + test/unit/mcp/testCaseValidate.test.ts | 5 + test/unit/mcp/testPlanTools.test.ts | 4 + test/unit/mcp/testPlanValidate.test.ts | 4 + test/unit/mcp/testSuiteValidate.test.ts | 4 + 37 files changed, 1048 insertions(+), 824 deletions(-) diff --git a/src/mcp/server.ts b/src/mcp/server.ts index 08c7cb5f..cb9452f5 100644 --- a/src/mcp/server.ts +++ b/src/mcp/server.ts @@ -49,11 +49,15 @@ export function createProvarMcpServer(config: ServerConfig): McpServer { }); // ── Sanity-check tool ──────────────────────────────────────────────────────── - server.tool( + server.registerTool( 'provardx_ping', - 'Sanity-check tool. Echoes back a message with a timestamp. Use this to verify the MCP server is reachable before calling other tools.', { - message: z.string().optional().default('ping').describe('Optional message to echo back'), + title: 'Ping MCP Server', + description: + 'Sanity-check tool. Echoes back a message with a timestamp. Use this to verify the MCP server is reachable before calling other tools.', + inputSchema: { + message: z.string().optional().default('ping').describe('Optional message to echo back'), + }, }, ({ message }) => { const result = { pong: message, ts: new Date().toISOString(), server: `provar-mcp@${SERVER_VERSION}` }; diff --git a/src/mcp/tools/antTools.ts b/src/mcp/tools/antTools.ts index 0d5eb506..e7c64c32 100644 --- a/src/mcp/tools/antTools.ts +++ b/src/mcp/tools/antTools.ts @@ -66,155 +66,165 @@ const AttachmentPropertiesSchema = z.object({ // ── Generate tool ───────────────────────────────────────────────────────────── export function registerAntGenerate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_ant_generate', - [ - 'Generate a Provar ANT build.xml file.', - 'Produces the standard skeleton with Provar-Compile and Run-Test-Case tasks.', - 'Supports targeting tests by project folder, plan folder, or specific .testcase files via filesets.', - 'Returns XML content. Writes to disk only when dry_run=false.', - ].join(' '), { - // ── Core paths ────────────────────────────────────────────────────────── - provar_home: z - .string() - .describe( - 'Absolute path to the Provar installation directory (e.g. "C:/Program Files/Provar/"). Used for provar.home property and ant taskdef classpaths.' + title: 'Generate ANT Build File', + description: [ + 'Generate a Provar ANT build.xml file.', + 'Produces the standard skeleton with Provar-Compile and Run-Test-Case tasks.', + 'Supports targeting tests by project folder, plan folder, or specific .testcase files via filesets.', + 'Returns XML content. Writes to disk only when dry_run=false.', + ].join(' '), + inputSchema: { + // ── Core paths ────────────────────────────────────────────────────────── + provar_home: z + .string() + .describe( + 'Absolute path to the Provar installation directory (e.g. "C:/Program Files/Provar/"). Used for provar.home property and ant taskdef classpaths.' + ), + project_path: z + .string() + .default('..') + .describe('Path to the Provar test project root. Defaults to ".." (parent of the ANT folder).'), + results_path: z + .string() + .default('../ANT/Results') + .describe('Path where test results are written. Defaults to "../ANT/Results".'), + project_cache_path: z + .string() + .optional() + .describe( + 'Path to the .provarCaches directory. Defaults to "../../.provarCaches" relative to the ANT folder.' + ), + license_path: z + .string() + .optional() + .describe('Path to the Provar .licenses directory (e.g. "${env.PROVAR_HOME}/.licenses").'), + smtp_path: z + .string() + .optional() + .describe('Path to the Provar .smtp directory (e.g. "${env.PROVAR_HOME}/.smtp").'), + + // ── Test selection ────────────────────────────────────────────────────── + filesets: z + .array(FilesetSchema) + .min(1) + .describe( + 'One or more filesets defining which tests to run. ' + + 'To run all tests under a folder: { dir: "../tests" }. ' + + 'To run a plan: { id: "testplan", dir: "../plans/MyPlan" }. ' + + 'To run specific test cases: { dir: "../tests/Suite", includes: ["MyTest.testcase"] }.' + ), + + // ── Browser / environment ─────────────────────────────────────────────── + web_browser: z + .enum(['Chrome', 'Chrome_Headless', 'Firefox', 'Edge', 'Edge_Legacy', 'Safari', 'IE']) + .default('Chrome') + .describe('Web browser to use for test execution.'), + web_browser_configuration: z + .string() + .default('Full Screen') + .describe('Browser window configuration (e.g. "Full Screen").'), + web_browser_provider_name: z.string().default('Desktop').describe('Browser provider name (e.g. "Desktop").'), + web_browser_device_name: z + .string() + .default('Full Screen') + .describe('Browser device name (e.g. "Full Screen").'), + test_environment: z + .string() + .default('') + .describe( + 'Named test environment to use (must match a connection in the project). Empty string uses default.' + ), + + // ── Cache / metadata ──────────────────────────────────────────────────── + salesforce_metadata_cache: z + .enum(['Reuse', 'Refresh', 'Reload']) + .default('Reuse') + .describe( + 'Salesforce metadata cache strategy: Reuse (fastest, uses cached), Refresh (re-downloads), Reload (clears and re-downloads).' + ), + + // ── Output / logging ──────────────────────────────────────────────────── + results_path_disposition: z + .enum(['Increment', 'Replace', 'Reuse']) + .default('Increment') + .describe( + 'How to handle the results folder when it already exists: Increment (new subfolder), Replace (overwrite), Reuse (append).' + ), + test_output_level: z + .enum(['BASIC', 'WARNING', 'DEBUG']) + .default('BASIC') + .describe('Verbosity level for test output logs.'), + plugin_output_level: z + .enum(['BASIC', 'WARNING', 'DEBUG']) + .default('WARNING') + .describe('Verbosity level for plugin output logs.'), + + // ── Execution behaviour ───────────────────────────────────────────────── + stop_test_run_on_error: z + .boolean() + .default(false) + .describe('Abort the entire test run when any test case fails.'), + exclude_callable_test_cases: z + .boolean() + .default(true) + .describe('Skip test cases marked as callable (library/helper) when true.'), + dont_fail_build: z + .boolean() + .optional() + .describe( + 'When true, the ANT build does not fail even if tests fail. Useful for CI pipelines that collect results separately.' + ), + invoke_test_run_monitor: z.boolean().default(true).describe('Enable the Provar test run monitor.'), + + // ── Secrets / security ────────────────────────────────────────────────── + secrets_password: z + .string() + .default('${env.ProvarSecretsPassword}') + .describe( + 'Encryption key used to decrypt the Provar .secrets file (the password string itself, not a file path). Defaults to reading from the ProvarSecretsPassword environment variable.' + ), + test_environment_secrets_password: z + .string() + .optional() + .describe( + 'Per-environment secrets password. Defaults to reading from the ProvarSecretsPassword_EnvName environment variable.' + ), + + // ── Test Cycle ────────────────────────────────────────────────────────── + test_cycle_path: z.string().optional().describe('Path to a TestCycle folder (used with test cycle reporting).'), + test_cycle_run_type: z + .enum(['ALL', 'FAILED', 'NEW']) + .optional() + .describe('Which tests in the cycle to run (ALL, FAILED, NEW).'), + + // ── Plan features ─────────────────────────────────────────────────────── + plan_features: z + .array(PlanFeatureSchema) + .optional() + .describe( + 'Output and notification features to enable/disable (e.g. PDF, PIECHART, EMAIL). ' + + 'Only meaningful when running by test plan.' + ), + + // ── Email / attachment reporting ──────────────────────────────────────── + email_properties: EmailPropertiesSchema.optional().describe( + 'Email notification settings. Omit to exclude from the XML.' ), - project_path: z - .string() - .default('..') - .describe('Path to the Provar test project root. Defaults to ".." (parent of the ANT folder).'), - results_path: z - .string() - .default('../ANT/Results') - .describe('Path where test results are written. Defaults to "../ANT/Results".'), - project_cache_path: z - .string() - .optional() - .describe('Path to the .provarCaches directory. Defaults to "../../.provarCaches" relative to the ANT folder.'), - license_path: z - .string() - .optional() - .describe('Path to the Provar .licenses directory (e.g. "${env.PROVAR_HOME}/.licenses").'), - smtp_path: z - .string() - .optional() - .describe('Path to the Provar .smtp directory (e.g. "${env.PROVAR_HOME}/.smtp").'), - - // ── Test selection ────────────────────────────────────────────────────── - filesets: z - .array(FilesetSchema) - .min(1) - .describe( - 'One or more filesets defining which tests to run. ' + - 'To run all tests under a folder: { dir: "../tests" }. ' + - 'To run a plan: { id: "testplan", dir: "../plans/MyPlan" }. ' + - 'To run specific test cases: { dir: "../tests/Suite", includes: ["MyTest.testcase"] }.' + attachment_properties: AttachmentPropertiesSchema.optional().describe( + 'Attachment/report content settings. Omit to exclude from the XML.' ), - // ── Browser / environment ─────────────────────────────────────────────── - web_browser: z - .enum(['Chrome', 'Chrome_Headless', 'Firefox', 'Edge', 'Edge_Legacy', 'Safari', 'IE']) - .default('Chrome') - .describe('Web browser to use for test execution.'), - web_browser_configuration: z - .string() - .default('Full Screen') - .describe('Browser window configuration (e.g. "Full Screen").'), - web_browser_provider_name: z.string().default('Desktop').describe('Browser provider name (e.g. "Desktop").'), - web_browser_device_name: z.string().default('Full Screen').describe('Browser device name (e.g. "Full Screen").'), - test_environment: z - .string() - .default('') - .describe('Named test environment to use (must match a connection in the project). Empty string uses default.'), - - // ── Cache / metadata ──────────────────────────────────────────────────── - salesforce_metadata_cache: z - .enum(['Reuse', 'Refresh', 'Reload']) - .default('Reuse') - .describe( - 'Salesforce metadata cache strategy: Reuse (fastest, uses cached), Refresh (re-downloads), Reload (clears and re-downloads).' - ), - - // ── Output / logging ──────────────────────────────────────────────────── - results_path_disposition: z - .enum(['Increment', 'Replace', 'Reuse']) - .default('Increment') - .describe( - 'How to handle the results folder when it already exists: Increment (new subfolder), Replace (overwrite), Reuse (append).' - ), - test_output_level: z - .enum(['BASIC', 'WARNING', 'DEBUG']) - .default('BASIC') - .describe('Verbosity level for test output logs.'), - plugin_output_level: z - .enum(['BASIC', 'WARNING', 'DEBUG']) - .default('WARNING') - .describe('Verbosity level for plugin output logs.'), - - // ── Execution behaviour ───────────────────────────────────────────────── - stop_test_run_on_error: z - .boolean() - .default(false) - .describe('Abort the entire test run when any test case fails.'), - exclude_callable_test_cases: z - .boolean() - .default(true) - .describe('Skip test cases marked as callable (library/helper) when true.'), - dont_fail_build: z - .boolean() - .optional() - .describe( - 'When true, the ANT build does not fail even if tests fail. Useful for CI pipelines that collect results separately.' - ), - invoke_test_run_monitor: z.boolean().default(true).describe('Enable the Provar test run monitor.'), - - // ── Secrets / security ────────────────────────────────────────────────── - secrets_password: z - .string() - .default('${env.ProvarSecretsPassword}') - .describe( - 'Encryption key used to decrypt the Provar .secrets file (the password string itself, not a file path). Defaults to reading from the ProvarSecretsPassword environment variable.' - ), - test_environment_secrets_password: z - .string() - .optional() - .describe( - 'Per-environment secrets password. Defaults to reading from the ProvarSecretsPassword_EnvName environment variable.' - ), - - // ── Test Cycle ────────────────────────────────────────────────────────── - test_cycle_path: z.string().optional().describe('Path to a TestCycle folder (used with test cycle reporting).'), - test_cycle_run_type: z - .enum(['ALL', 'FAILED', 'NEW']) - .optional() - .describe('Which tests in the cycle to run (ALL, FAILED, NEW).'), - - // ── Plan features ─────────────────────────────────────────────────────── - plan_features: z - .array(PlanFeatureSchema) - .optional() - .describe( - 'Output and notification features to enable/disable (e.g. PDF, PIECHART, EMAIL). ' + - 'Only meaningful when running by test plan.' - ), - - // ── Email / attachment reporting ──────────────────────────────────────── - email_properties: EmailPropertiesSchema.optional().describe( - 'Email notification settings. Omit to exclude from the XML.' - ), - attachment_properties: AttachmentPropertiesSchema.optional().describe( - 'Attachment/report content settings. Omit to exclude from the XML.' - ), - - // ── File output ───────────────────────────────────────────────────────── - output_path: z - .string() - .optional() - .describe('Where to write the build.xml file (returned in response). Required when dry_run=false.'), - overwrite: z.boolean().default(false).describe('Overwrite output_path if the file already exists.'), - dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path.'), + // ── File output ───────────────────────────────────────────────────────── + output_path: z + .string() + .optional() + .describe('Where to write the build.xml file (returned in response). Required when dry_run=false.'), + overwrite: z.boolean().default(false).describe('Overwrite output_path if the file already exists.'), + dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path.'), + }, }, (input) => { const requestId = makeRequestId(); @@ -285,17 +295,20 @@ export function registerAntGenerate(server: McpServer, config: ServerConfig): vo // ── Validate tool ───────────────────────────────────────────────────────────── export function registerAntValidate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_ant_validate', - [ - 'Validate a Provar ANT build.xml for structural correctness.', - 'Checks XML well-formedness, required declarations, step,', - ' with required attributes (provarHome, projectPath, resultsPath),', - 'and at least one child. Returns is_valid, issues list, and a validity_score.', - ].join(' '), { - content: z.string().optional().describe('XML content to validate directly'), - file_path: z.string().optional().describe('Path to the build.xml file to validate'), + title: 'Validate ANT Build File', + description: [ + 'Validate a Provar ANT build.xml for structural correctness.', + 'Checks XML well-formedness, required declarations, step,', + ' with required attributes (provarHome, projectPath, resultsPath),', + 'and at least one child. Returns is_valid, issues list, and a validity_score.', + ].join(' '), + inputSchema: { + content: z.string().optional().describe('XML content to validate directly'), + file_path: z.string().optional().describe('Path to the build.xml file to validate'), + }, }, ({ content, file_path }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/automationTools.ts b/src/mcp/tools/automationTools.ts index e3406bd5..c7e6c4ce 100644 --- a/src/mcp/tools/automationTools.ts +++ b/src/mcp/tools/automationTools.ts @@ -219,22 +219,25 @@ function handleSpawnError( // ── Tool: provar_automation_config_load ────────────────────────────────────── export function registerAutomationConfigLoad(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_automation_config_load', - [ - 'Register a provardx-properties.json file as the active Provar configuration.', - 'Invokes `sf provar automation config load --properties-file `, writing the path to ~/.sf/config.json.', - 'REQUIRED before provar_automation_compile or provar_automation_testrun — without this step those commands fail with MISSING_FILE.', - 'Typical workflow: provar_automation_config_load → provar_automation_compile → provar_automation_testrun.', - ].join(' '), { - properties_path: z - .string() - .describe('Absolute path to the provardx-properties.json file to register as active configuration'), - sf_path: z - .string() - .optional() - .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + title: 'Load Automation Config', + description: [ + 'Register a provardx-properties.json file as the active Provar configuration.', + 'Invokes `sf provar automation config load --properties-file `, writing the path to ~/.sf/config.json.', + 'REQUIRED before provar_automation_compile or provar_automation_testrun — without this step those commands fail with MISSING_FILE.', + 'Typical workflow: provar_automation_config_load → provar_automation_compile → provar_automation_testrun.', + ].join(' '), + inputSchema: { + properties_path: z + .string() + .describe('Absolute path to the provardx-properties.json file to register as active configuration'), + sf_path: z + .string() + .optional() + .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + }, }, ({ properties_path, sf_path }) => { const requestId = makeRequestId(); @@ -387,26 +390,29 @@ function readResultsPathFromSfConfig(config: ServerConfig): string | null { // ── Tool: provar_automation_testrun ─────────────────────────────────────────── export function registerAutomationTestRun(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_automation_testrun', - [ - 'Trigger a LOCAL Provar automation test run using installed Provar binaries. Invokes `sf provar automation test run`.', - 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', - 'Requires Provar to be installed locally and provarHome set correctly in the properties file.', - 'Use provar_automation_setup first if Provar is not yet installed.', - 'For grid/CI execution via Provar Quality Hub instead of running locally, use provar_qualityhub_testrun.', - 'Typical local AI loop: config.load → compile → testrun → inspect results.', - ].join(' '), { - flags: z - .array(z.string()) - .optional() - .default([]) - .describe('Raw CLI flags to forward (e.g. ["--project-path", "/path/to/project"])'), - sf_path: z - .string() - .optional() - .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + title: 'Run Tests', + description: [ + 'Trigger a LOCAL Provar automation test run using installed Provar binaries. Invokes `sf provar automation test run`.', + 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', + 'Requires Provar to be installed locally and provarHome set correctly in the properties file.', + 'Use provar_automation_setup first if Provar is not yet installed.', + 'For grid/CI execution via Provar Quality Hub instead of running locally, use provar_qualityhub_testrun.', + 'Typical local AI loop: config.load → compile → testrun → inspect results.', + ].join(' '), + inputSchema: { + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Raw CLI flags to forward (e.g. ["--project-path", "/path/to/project"])'), + sf_path: z + .string() + .optional() + .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + }, }, ({ flags, sf_path }) => { const requestId = makeRequestId(); @@ -461,23 +467,26 @@ export function registerAutomationTestRun(server: McpServer, config: ServerConfi // ── Tool: provar_automation_compile ─────────────────────────────────────────── export function registerAutomationCompile(server: McpServer): void { - server.tool( + server.registerTool( 'provar_automation_compile', - [ - 'Compile a Provar automation project. Invokes `sf provar automation project compile`.', - 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', - 'Run this before triggering a test run after modifying test cases.', - ].join(' '), { - flags: z - .array(z.string()) - .optional() - .default([]) - .describe('Raw CLI flags to forward (e.g. ["--project-path", "/path/to/project"])'), - sf_path: z - .string() - .optional() - .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + title: 'Compile Test Assets', + description: [ + 'Compile a Provar automation project. Invokes `sf provar automation project compile`.', + 'PREREQUISITE: Run provar_automation_config_load first to register a provardx-properties.json — without this the command fails with MISSING_FILE.', + 'Run this before triggering a test run after modifying test cases.', + ].join(' '), + inputSchema: { + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Raw CLI flags to forward (e.g. ["--project-path", "/path/to/project"])'), + sf_path: z + .string() + .optional() + .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + }, }, ({ flags, sf_path }) => { const requestId = makeRequestId(); @@ -517,29 +526,32 @@ const DOWNLOAD_ERROR_SUGGESTION = '(4) if testprojectSecrets is set in provardx-properties.json, it must be the encryption key string used to decrypt .secrets — not a file path.'; export function registerAutomationMetadataDownload(server: McpServer): void { - server.tool( + server.registerTool( 'provar_automation_metadata_download', - [ - 'Download Salesforce metadata for one or more connections into a Provar project.', - 'Invokes `sf provar automation metadata download`.', - 'PREREQUISITE: Call provar_automation_config_load first — without it the command fails with MISSING_FILE.', - 'Use the -c flag to specify connections: flags: ["-c", "ConnectionName1,ConnectionName2"].', - 'Connection names are case-sensitive and must match the names defined in the Provar project.', - 'If the download fails with [DOWNLOAD_ERROR], this is almost always a Salesforce authentication issue —', - 'check that the credentials in the project .secrets file are current and that any referenced scratch orgs have not expired.', - ].join(' '), { - flags: z - .array(z.string()) - .optional() - .default([]) - .describe( - 'Raw CLI flags to forward. Use ["-c", "Name1,Name2"] (or the equivalent --connections form) to target specific connections. Example: ["-c", "MyOrg,SandboxOrg"]' - ), - sf_path: z - .string() - .optional() - .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + title: 'Download Salesforce Metadata', + description: [ + 'Download Salesforce metadata for one or more connections into a Provar project.', + 'Invokes `sf provar automation metadata download`.', + 'PREREQUISITE: Call provar_automation_config_load first — without it the command fails with MISSING_FILE.', + 'Use the -c flag to specify connections: flags: ["-c", "ConnectionName1,ConnectionName2"].', + 'Connection names are case-sensitive and must match the names defined in the Provar project.', + 'If the download fails with [DOWNLOAD_ERROR], this is almost always a Salesforce authentication issue —', + 'check that the credentials in the project .secrets file are current and that any referenced scratch orgs have not expired.', + ].join(' '), + inputSchema: { + flags: z + .array(z.string()) + .optional() + .default([]) + .describe( + 'Raw CLI flags to forward. Use ["-c", "Name1,Name2"] (or the equivalent --connections form) to target specific connections. Example: ["-c", "MyOrg,SandboxOrg"]' + ), + sf_path: z + .string() + .optional() + .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + }, }, ({ flags, sf_path }) => { const requestId = makeRequestId(); @@ -662,32 +674,37 @@ function findExistingInstallations(): ProvarInstall[] { } export function registerAutomationSetup(server: McpServer): void { - server.tool( + server.registerTool( 'provar_automation_setup', - [ - 'Download and install Provar Automation binaries locally. Invokes `sf provar automation setup`.', - 'Before downloading, checks for existing Provar installations in:', - ' • PROVAR_HOME environment variable', - ' • ./ProvarHome (default CLI install location)', - ' • C:\\Program Files\\Provar* (Windows system installs)', - ' • /Applications/Provar* (macOS app installs)', - 'If an existing installation is found, returns its path so you can set provarHome in the properties file — skipping the download unless force is true.', - 'After a successful install, update the provarHome property in provardx-properties.json to the returned install_path using provar_properties_set.', - ].join(' '), { - version: z - .string() - .optional() - .describe('Specific Provar Automation version to install, e.g. "2.12.0". Omit to install the latest release.'), - force: z - .boolean() - .optional() - .default(false) - .describe('Force a fresh download even if an existing installation is already detected (default: false).'), - sf_path: z - .string() - .optional() - .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + title: 'Install Provar Automation', + description: [ + 'Download and install Provar Automation binaries locally. Invokes `sf provar automation setup`.', + 'Before downloading, checks for existing Provar installations in:', + ' • PROVAR_HOME environment variable', + ' • ./ProvarHome (default CLI install location)', + ' • C:\\Program Files\\Provar* (Windows system installs)', + ' • /Applications/Provar* (macOS app installs)', + 'If an existing installation is found, returns its path so you can set provarHome in the properties file — skipping the download unless force is true.', + 'After a successful install, update the provarHome property in provardx-properties.json to the returned install_path using provar_properties_set.', + ].join(' '), + inputSchema: { + version: z + .string() + .optional() + .describe( + 'Specific Provar Automation version to install, e.g. "2.12.0". Omit to install the latest release.' + ), + force: z + .boolean() + .optional() + .default(false) + .describe('Force a fresh download even if an existing installation is already detected (default: false).'), + sf_path: z + .string() + .optional() + .describe('Path to the sf CLI executable when not in PATH (e.g. "~/.nvm/versions/node/v22.0.0/bin/sf")'), + }, }, ({ version, force, sf_path }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/connectionTools.ts b/src/mcp/tools/connectionTools.ts index 43d5b600..30f954e6 100644 --- a/src/mcp/tools/connectionTools.ts +++ b/src/mcp/tools/connectionTools.ts @@ -131,19 +131,22 @@ function parseEnvironmentList(content: string): EnvironmentEntry[] { // ── Tool registration ───────────────────────────────────────────────────────── export function registerConnectionList(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_connection_list', - [ - 'List all connections and named environments defined in the .testproject file.', - 'Use this before generating test cases or page objects to get the correct connection names.', - 'Returns connections (name, type, url, sso_configured) and environments (name, connection, url).', - 'Prerequisite: the project must have a .testproject file — run provar_project_validate first if unsure.', - 'Security: only connection names, types, and URLs are returned — credential values from .secrets are never included.', - ].join(' '), { - project_path: z - .string() - .describe('Absolute or relative path to the Provar project root directory (must be within --allowed-paths)'), + title: 'List Connections', + description: [ + 'List all connections and named environments defined in the .testproject file.', + 'Use this before generating test cases or page objects to get the correct connection names.', + 'Returns connections (name, type, url, sso_configured) and environments (name, connection, url).', + 'Prerequisite: the project must have a .testproject file — run provar_project_validate first if unsure.', + 'Security: only connection names, types, and URLs are returned — credential values from .secrets are never included.', + ].join(' '), + inputSchema: { + project_path: z + .string() + .describe('Absolute or relative path to the Provar project root directory (must be within --allowed-paths)'), + }, }, ({ project_path }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/defectTools.ts b/src/mcp/tools/defectTools.ts index a8fd76b7..c408f01f 100644 --- a/src/mcp/tools/defectTools.ts +++ b/src/mcp/tools/defectTools.ts @@ -261,24 +261,27 @@ export function createDefectsForRun( // ── Tool registration ────────────────────────────────────────────────────────── export function registerQualityHubDefectCreate(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_defect_create', - [ - 'Create Defect__c records in Quality Hub for failed test executions in a given run.', - 'Queries the run by Tracking_Id__c, finds failed Test_Execution__c records, creates a', - 'Defect__c per failure (with description, step, browser, environment, tester), and links', - 'it via Test_Case_Defect__c and Test_Execution_Defect__c junction records.', - 'If Jira or ADO sync is configured in Quality Hub, defects sync to those systems automatically.', - ].join(' '), { - run_id: z.string().describe('Test run Tracking_Id__c value returned by provar_qualityhub_testrun'), - target_org: z.string().describe('SF org alias or username for the Quality Hub org'), - failed_tests: z - .array(z.string()) - .optional() - .describe( - 'Optional filter — list of Test_Case__c record ID substrings to restrict defect creation to specific failures' - ), + title: 'Create Defects', + description: [ + 'Create Defect__c records in Quality Hub for failed test executions in a given run.', + 'Queries the run by Tracking_Id__c, finds failed Test_Execution__c records, creates a', + 'Defect__c per failure (with description, step, browser, environment, tester), and links', + 'it via Test_Case_Defect__c and Test_Execution_Defect__c junction records.', + 'If Jira or ADO sync is configured in Quality Hub, defects sync to those systems automatically.', + ].join(' '), + inputSchema: { + run_id: z.string().describe('Test run Tracking_Id__c value returned by provar_qualityhub_testrun'), + target_org: z.string().describe('SF org alias or username for the Quality Hub org'), + failed_tests: z + .array(z.string()) + .optional() + .describe( + 'Optional filter — list of Test_Case__c record ID substrings to restrict defect creation to specific failures' + ), + }, }, ({ run_id, target_org, failed_tests }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/nitroXTools.ts b/src/mcp/tools/nitroXTools.ts index 7166ac9f..7b94b1a8 100644 --- a/src/mcp/tools/nitroXTools.ts +++ b/src/mcp/tools/nitroXTools.ts @@ -426,22 +426,34 @@ function applyMergePatch(target: JsonObj, patch: JsonObj): JsonObj { // ── Tool Registrations ──────────────────────────────────────────────────────── export function registerNitroXDiscover(server: McpServer): void { - server.tool( + server.registerTool( 'provar_nitrox_discover', - [ - 'Discover Provar projects containing NitroX (Hybrid Model) page objects.', - 'Scans directories for .testproject marker files, then inventories nitroX/ and nitroXPackages/ directories.', - "NitroX is Provar's Hybrid Model for locators — component-based page objects for LWC,", - 'Screen Flow, Industry Components, Experience Cloud, and HTML5 components.', - 'Results provide file paths and package info for use with provar_nitrox_read, validate, and generate.', - ].join(' '), { - search_roots: z - .array(z.string()) - .optional() - .describe('Directories to scan (default: cwd; if empty, falls back to ~/git and ~/Provar)'), - max_depth: z.number().int().min(1).max(20).default(6).describe('Maximum directory depth for .testproject search'), - include_packages: z.boolean().default(true).describe('Include nitroXPackages/ package.json metadata in results'), + title: 'Discover NitroX Components', + description: [ + 'Discover Provar projects containing NitroX (Hybrid Model) page objects.', + 'Scans directories for .testproject marker files, then inventories nitroX/ and nitroXPackages/ directories.', + "NitroX is Provar's Hybrid Model for locators — component-based page objects for LWC,", + 'Screen Flow, Industry Components, Experience Cloud, and HTML5 components.', + 'Results provide file paths and package info for use with provar_nitrox_read, validate, and generate.', + ].join(' '), + inputSchema: { + search_roots: z + .array(z.string()) + .optional() + .describe('Directories to scan (default: cwd; if empty, falls back to ~/git and ~/Provar)'), + max_depth: z + .number() + .int() + .min(1) + .max(20) + .default(6) + .describe('Maximum directory depth for .testproject search'), + include_packages: z + .boolean() + .default(true) + .describe('Include nitroXPackages/ package.json metadata in results'), + }, }, ({ search_roots, max_depth, include_packages }) => { const requestId = makeRequestId(); @@ -516,26 +528,29 @@ export function registerNitroXDiscover(server: McpServer): void { } export function registerNitroXRead(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_nitrox_read', - [ - 'Read one or more NitroX .po.json (Hybrid Model page object) files and return their parsed content.', - 'Use this to load examples before generating or validating.', - "Provide file_paths for specific files, or project_path to read all .po.json files from a project's nitroX/ directory.", - ].join(' '), { - file_paths: z.array(z.string()).optional().describe('Specific .po.json file paths to read'), - project_path: z - .string() - .optional() - .describe('Provar project path — reads all .po.json files from nitroX/ directory'), - max_files: z - .number() - .int() - .min(1) - .max(100) - .default(20) - .describe('Maximum number of files to return (prevents context overflow)'), + title: 'Read NitroX Files', + description: [ + 'Read one or more NitroX .po.json (Hybrid Model page object) files and return their parsed content.', + 'Use this to load examples before generating or validating.', + "Provide file_paths for specific files, or project_path to read all .po.json files from a project's nitroX/ directory.", + ].join(' '), + inputSchema: { + file_paths: z.array(z.string()).optional().describe('Specific .po.json file paths to read'), + project_path: z + .string() + .optional() + .describe('Provar project path — reads all .po.json files from nitroX/ directory'), + max_files: z + .number() + .int() + .min(1) + .max(100) + .default(20) + .describe('Maximum number of files to return (prevents context overflow)'), + }, }, ({ file_paths, project_path, max_files }) => { const requestId = makeRequestId(); @@ -614,17 +629,20 @@ export function registerNitroXRead(server: McpServer, config: ServerConfig): voi } export function registerNitroXValidate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_nitrox_validate', - [ - 'Validate a NitroX .po.json (Hybrid Model component page object) against schema rules.', - 'Works for any NitroX-mapped component type: LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', - 'Returns a quality score (0–100) and a list of issues with rule IDs (NX001–NX010), severity, and suggestions.', - 'Score formula: 100 − (20 × errors) − (5 × warnings) − (1 × infos).', - ].join(' '), { - content: z.string().optional().describe('JSON string of the .po.json content to validate'), - file_path: z.string().optional().describe('Path to a .po.json file to validate'), + title: 'Validate NitroX Component', + description: [ + 'Validate a NitroX .po.json (Hybrid Model component page object) against schema rules.', + 'Works for any NitroX-mapped component type: LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', + 'Returns a quality score (0–100) and a list of issues with rule IDs (NX001–NX010), severity, and suggestions.', + 'Score formula: 100 − (20 × errors) − (5 × warnings) − (1 × infos).', + ].join(' '), + inputSchema: { + content: z.string().optional().describe('JSON string of the .po.json content to validate'), + file_path: z.string().optional().describe('Path to a .po.json file to validate'), + }, }, ({ content, file_path }) => { const requestId = makeRequestId(); @@ -698,26 +716,29 @@ const ElementInputSchema = z.object({ }); export function registerNitroXGenerate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_nitrox_generate', - [ - 'Generate a new NitroX .po.json (Hybrid Model page object) from a component description.', - "Applicable to any component type supported by Provar's Hybrid Model:", - 'LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', - 'All componentId fields are assigned fresh UUIDs. Returns JSON content;', - 'writes to disk only when dry_run=false.', - ].join(' '), { - name: z.string().describe('Path-like component name, e.g. /com/force/myapp/ButtonComponent'), - tag_name: z.string().describe('LWC or HTML tag name, e.g. lightning-button or c-my-component'), - type: z.enum(['Block', 'Page']).default('Block').describe('Component type'), - page_structure_element: z.boolean().default(true).describe('Whether this is a page structure element'), - field_details_element: z.boolean().default(false).describe('Whether this is a field details element'), - parameters: z.array(ParameterInputSchema).optional().describe('Component parameters/qualifiers'), - elements: z.array(ElementInputSchema).optional().describe('Child elements'), - output_path: z.string().optional().describe('File path to write (requires dry_run=false)'), - overwrite: z.boolean().default(false).describe('Overwrite if output_path already exists'), - dry_run: z.boolean().default(true).describe('Return JSON without writing to disk (default)'), + title: 'Generate NitroX Components', + description: [ + 'Generate a new NitroX .po.json (Hybrid Model page object) from a component description.', + "Applicable to any component type supported by Provar's Hybrid Model:", + 'LWC, Screen Flow, Industry Components, Experience Cloud, HTML5.', + 'All componentId fields are assigned fresh UUIDs. Returns JSON content;', + 'writes to disk only when dry_run=false.', + ].join(' '), + inputSchema: { + name: z.string().describe('Path-like component name, e.g. /com/force/myapp/ButtonComponent'), + tag_name: z.string().describe('LWC or HTML tag name, e.g. lightning-button or c-my-component'), + type: z.enum(['Block', 'Page']).default('Block').describe('Component type'), + page_structure_element: z.boolean().default(true).describe('Whether this is a page structure element'), + field_details_element: z.boolean().default(false).describe('Whether this is a field details element'), + parameters: z.array(ParameterInputSchema).optional().describe('Component parameters/qualifiers'), + elements: z.array(ElementInputSchema).optional().describe('Child elements'), + output_path: z.string().optional().describe('File path to write (requires dry_run=false)'), + overwrite: z.boolean().default(false).describe('Overwrite if output_path already exists'), + dry_run: z.boolean().default(true).describe('Return JSON without writing to disk (default)'), + }, }, (input) => { const requestId = makeRequestId(); @@ -775,24 +796,27 @@ export function registerNitroXGenerate(server: McpServer, config: ServerConfig): } export function registerNitroXPatch(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_nitrox_patch', - [ - 'Apply a JSON merge-patch (RFC 7396) to an existing NitroX .po.json file.', - 'Reads the file, merges the patch (null values remove keys, other values replace or recurse into objects),', - 'optionally validates the merged result, and writes back.', - 'Use dry_run=true (default) to preview the merged output without writing.', - ].join(' '), { - file_path: z.string().describe('Path to the existing .po.json file to patch'), - patch: z - .record(z.unknown()) - .describe('JSON merge-patch to apply (RFC 7396: null removes key, any other value replaces)'), - dry_run: z.boolean().default(true).describe('Return merged result without writing to disk (default)'), - validate_after: z - .boolean() - .default(true) - .describe('Run NX validation on merged result; blocks write if errors found'), + title: 'Patch NitroX Component', + description: [ + 'Apply a JSON merge-patch (RFC 7396) to an existing NitroX .po.json file.', + 'Reads the file, merges the patch (null values remove keys, other values replace or recurse into objects),', + 'optionally validates the merged result, and writes back.', + 'Use dry_run=true (default) to preview the merged output without writing.', + ].join(' '), + inputSchema: { + file_path: z.string().describe('Path to the existing .po.json file to patch'), + patch: z + .record(z.unknown()) + .describe('JSON merge-patch to apply (RFC 7396: null removes key, any other value replaces)'), + dry_run: z.boolean().default(true).describe('Return merged result without writing to disk (default)'), + validate_after: z + .boolean() + .default(true) + .describe('Run NX validation on merged result; blocks write if errors found'), + }, }, ({ file_path, patch, dry_run, validate_after }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/pageObjectGenerate.ts b/src/mcp/tools/pageObjectGenerate.ts index 4d877374..4e9fd960 100644 --- a/src/mcp/tools/pageObjectGenerate.ts +++ b/src/mcp/tools/pageObjectGenerate.ts @@ -101,47 +101,53 @@ function preflightAndWrite( } export function registerPageObjectGenerate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_pageobject_generate', - [ - 'Generate a Provar Java Page Object skeleton with @Page/@SalesforcePage annotation, standard imports, and @FindBy WebElement fields.', - 'Returns Java source. Writes to disk only when dry_run=false.', - 'SSO support: set sso_class to also generate an ILoginPage implementation stub for non-SF SSO pages.', - 'Example: sso_class="LoginPageSso" generates a LoginPageSso.java that implements ILoginPage with loginAs() and logout() stubs.', - 'The ILoginPage stub is written to the same directory as output_path when dry_run=false.', - ].join(' '), { - class_name: z.string().describe('PascalCase class name, e.g. AccountDetailPage'), - package_name: z - .string() - .default('pageobjects') - .describe('Java package, e.g. pageobjects or pageobjects.accounts'), - page_type: z - .enum(['standard', 'salesforce']) - .default('standard') - .describe('@Page (standard) or @SalesforcePage (salesforce)'), - title: z.string().optional().describe('Page title attribute; defaults to class_name if omitted'), - connection_name: z - .string() - .optional() - .describe('Salesforce connection name (required when page_type=salesforce)'), - salesforce_page_attribute: z - .enum(['page', 'auraComponent', 'object', 'lightningWebComponent']) - .optional() - .describe('Page type attribute for @SalesforcePage'), - fields: z.array(FieldSchema).default([]).describe('WebElement fields to generate'), - sso_class: z - .string() - .optional() - .describe( - 'PascalCase class name for an ILoginPage implementation stub (non-SF SSO pages). ' + - 'When provided, an additional Java class implementing ILoginPage is generated alongside the page object. ' + - 'Example: "LoginPageSso" → LoginPageSso.java with loginAs() and logout() method stubs.' - ), - output_path: z.string().optional().describe('Suggested file path for the .java file (returned in response)'), - overwrite: z.boolean().default(false).describe('Overwrite existing file when dry_run=false'), - dry_run: z.boolean().default(true).describe('true = return source only (default); false = write to output_path'), - idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), + title: 'Generate Page Object', + description: [ + 'Generate a Provar Java Page Object skeleton with @Page/@SalesforcePage annotation, standard imports, and @FindBy WebElement fields.', + 'Returns Java source. Writes to disk only when dry_run=false.', + 'SSO support: set sso_class to also generate an ILoginPage implementation stub for non-SF SSO pages.', + 'Example: sso_class="LoginPageSso" generates a LoginPageSso.java that implements ILoginPage with loginAs() and logout() stubs.', + 'The ILoginPage stub is written to the same directory as output_path when dry_run=false.', + ].join(' '), + inputSchema: { + class_name: z.string().describe('PascalCase class name, e.g. AccountDetailPage'), + package_name: z + .string() + .default('pageobjects') + .describe('Java package, e.g. pageobjects or pageobjects.accounts'), + page_type: z + .enum(['standard', 'salesforce']) + .default('standard') + .describe('@Page (standard) or @SalesforcePage (salesforce)'), + title: z.string().optional().describe('Page title attribute; defaults to class_name if omitted'), + connection_name: z + .string() + .optional() + .describe('Salesforce connection name (required when page_type=salesforce)'), + salesforce_page_attribute: z + .enum(['page', 'auraComponent', 'object', 'lightningWebComponent']) + .optional() + .describe('Page type attribute for @SalesforcePage'), + fields: z.array(FieldSchema).default([]).describe('WebElement fields to generate'), + sso_class: z + .string() + .optional() + .describe( + 'PascalCase class name for an ILoginPage implementation stub (non-SF SSO pages). ' + + 'When provided, an additional Java class implementing ILoginPage is generated alongside the page object. ' + + 'Example: "LoginPageSso" → LoginPageSso.java with loginAs() and logout() method stubs.' + ), + output_path: z.string().optional().describe('Suggested file path for the .java file (returned in response)'), + overwrite: z.boolean().default(false).describe('Overwrite existing file when dry_run=false'), + dry_run: z + .boolean() + .default(true) + .describe('true = return source only (default); false = write to output_path'), + idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), + }, }, (input) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/pageObjectValidate.ts b/src/mcp/tools/pageObjectValidate.ts index e98c5831..f34891bf 100644 --- a/src/mcp/tools/pageObjectValidate.ts +++ b/src/mcp/tools/pageObjectValidate.ts @@ -17,16 +17,20 @@ import { makeError, makeRequestId, type ValidationIssue } from '../schemas/commo import { log } from '../logging/logger.js'; export function registerPageObjectValidate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_pageobject_validate', - 'Validate a Provar Java Page Object against naming conventions, locator best practices, and structural requirements. Returns quality score (0–100) and list of issues.', { - content: z.string().optional().describe('Java source code to validate directly'), - file_path: z.string().optional().describe('Path to .java Page Object file'), - expected_class_name: z - .string() - .optional() - .describe('Expected class name for PO_006 check; inferred from file_path when omitted'), + title: 'Validate Page Object', + description: + 'Validate a Provar Java Page Object against naming conventions, locator best practices, and structural requirements. Returns quality score (0–100) and list of issues.', + inputSchema: { + content: z.string().optional().describe('Java source code to validate directly'), + file_path: z.string().optional().describe('Path to .java Page Object file'), + expected_class_name: z + .string() + .optional() + .describe('Expected class name for PO_006 check; inferred from file_path when omitted'), + }, }, ({ content, file_path, expected_class_name }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/projectInspect.ts b/src/mcp/tools/projectInspect.ts index 40ebff79..f0690603 100644 --- a/src/mcp/tools/projectInspect.ts +++ b/src/mcp/tools/projectInspect.ts @@ -16,22 +16,25 @@ import { makeError, makeRequestId } from '../schemas/common.js'; import { log } from '../logging/logger.js'; export function registerProjectInspect(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_project_inspect', - [ - 'Inspect a Provar project folder and return a structured inventory.', - 'Returns: provardx-properties.json config files (for ProvarDX CLI runs),', - 'ANT build files (build.xml etc in ANT/ dirs, for CLI/pipeline runs),', - 'source page object directories with Java file counts (src/pageobjects — compiled bin/ dirs excluded),', - '.testcase files found recursively under tests/,', - 'count of custom test step files in src/customapis/,', - 'count of data source files (CSV/XLSX/JSON) in data/ and templates/ dirs,', - 'test plan coverage showing which test cases are covered vs uncovered,', - 'and connection + environment overview parsed from the .testproject file', - '(Salesforce, UI Testing, Web Services, Quality Hub, Database, and other connection types).', - ].join(' '), { - project_path: z.string().describe('Absolute or relative path to the Provar project root directory'), + title: 'Inspect Project', + description: [ + 'Inspect a Provar project folder and return a structured inventory.', + 'Returns: provardx-properties.json config files (for ProvarDX CLI runs),', + 'ANT build files (build.xml etc in ANT/ dirs, for CLI/pipeline runs),', + 'source page object directories with Java file counts (src/pageobjects — compiled bin/ dirs excluded),', + '.testcase files found recursively under tests/,', + 'count of custom test step files in src/customapis/,', + 'count of data source files (CSV/XLSX/JSON) in data/ and templates/ dirs,', + 'test plan coverage showing which test cases are covered vs uncovered,', + 'and connection + environment overview parsed from the .testproject file', + '(Salesforce, UI Testing, Web Services, Quality Hub, Database, and other connection types).', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute or relative path to the Provar project root directory'), + }, }, ({ project_path }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/projectValidateFromPath.ts b/src/mcp/tools/projectValidateFromPath.ts index c1dc16ab..28e2fc96 100644 --- a/src/mcp/tools/projectValidateFromPath.ts +++ b/src/mcp/tools/projectValidateFromPath.ts @@ -103,69 +103,72 @@ function shapeResponse( // ── Tool registration ───────────────────────────────────────────────────────── export function registerProjectValidateFromPath(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_project_validate', - [ - 'Validate a Provar project directly from its directory on disk.', - 'Reads the plan/suite/testinstance hierarchy from the plans/ directory,', - 'resolves test case XML from the tests/ directory, extracts project context', - '(connections, environments, secrets) from the .testproject file, then runs', - 'the full validation rule set.', - 'Returns a compact quality score, violation summary, and per-plan/suite scores.', - 'By default returns a slim summary response to avoid token explosion.', - 'Pass include_plan_details:true to get full per-suite and per-test-case data.', - 'By default saves a QH-compatible JSON report to', - '{project_path}/provardx/validation/ (created if absent).', - 'IMPORTANT: Use this tool for whole-project validation —', - 'DO NOT read individual test case files and pass XML content inline.', - 'Pass a project_path and let this tool handle all file reading.', - ].join(' '), { - project_path: z - .string() - .describe('Absolute path to the Provar project root (the directory containing the .testproject file)'), - quality_threshold: z - .number() - .min(0) - .max(100) - .optional() - .default(80) - .describe('Minimum quality score for a test case to be considered valid (default: 80)'), - save_results: z - .boolean() - .optional() - .default(true) - .describe('Write a QH-compatible JSON report to provardx/validation/ (default: true)'), - results_dir: z - .string() - .optional() - .describe('Override the output directory for the saved report (default: {project_path}/provardx/validation)'), - include_plan_details: z - .boolean() - .optional() - .default(false) - .describe( - 'When true, include full per-suite and per-test-case violation data in the response. ' + - 'Default false to keep response small. Use only when you need to inspect specific test case failures.' - ), - max_uncovered: z - .number() - .int() - .min(0) - .optional() - .default(20) - .describe( - 'Maximum number of uncovered test case paths to include in the response (default: 20). Set to 0 for none, or a large number for all.' - ), - max_violations: z - .number() - .int() - .min(0) - .optional() - .default(50) - .describe( - 'When include_plan_details:true, caps project_violations returned (default: 50). Ignored in slim mode where violations are grouped by rule_id instead.' - ), + title: 'Validate Project', + description: [ + 'Validate a Provar project directly from its directory on disk.', + 'Reads the plan/suite/testinstance hierarchy from the plans/ directory,', + 'resolves test case XML from the tests/ directory, extracts project context', + '(connections, environments, secrets) from the .testproject file, then runs', + 'the full validation rule set.', + 'Returns a compact quality score, violation summary, and per-plan/suite scores.', + 'By default returns a slim summary response to avoid token explosion.', + 'Pass include_plan_details:true to get full per-suite and per-test-case data.', + 'By default saves a QH-compatible JSON report to', + '{project_path}/provardx/validation/ (created if absent).', + 'IMPORTANT: Use this tool for whole-project validation —', + 'DO NOT read individual test case files and pass XML content inline.', + 'Pass a project_path and let this tool handle all file reading.', + ].join(' '), + inputSchema: { + project_path: z + .string() + .describe('Absolute path to the Provar project root (the directory containing the .testproject file)'), + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .default(80) + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + save_results: z + .boolean() + .optional() + .default(true) + .describe('Write a QH-compatible JSON report to provardx/validation/ (default: true)'), + results_dir: z + .string() + .optional() + .describe('Override the output directory for the saved report (default: {project_path}/provardx/validation)'), + include_plan_details: z + .boolean() + .optional() + .default(false) + .describe( + 'When true, include full per-suite and per-test-case violation data in the response. ' + + 'Default false to keep response small. Use only when you need to inspect specific test case failures.' + ), + max_uncovered: z + .number() + .int() + .min(0) + .optional() + .default(20) + .describe( + 'Maximum number of uncovered test case paths to include in the response (default: 20). Set to 0 for none, or a large number for all.' + ), + max_violations: z + .number() + .int() + .min(0) + .optional() + .default(50) + .describe( + 'When include_plan_details:true, caps project_violations returned (default: 50). Ignored in slim mode where violations are grouped by rule_id instead.' + ), + }, }, ({ project_path, diff --git a/src/mcp/tools/propertiesTools.ts b/src/mcp/tools/propertiesTools.ts index fbc57db0..3395dcd8 100644 --- a/src/mcp/tools/propertiesTools.ts +++ b/src/mcp/tools/propertiesTools.ts @@ -159,25 +159,28 @@ function deepMerge(target: Record, source: Record { const requestId = makeRequestId(); @@ -319,11 +322,15 @@ function buildDivergenceWarning( // ── provar_properties_read ──────────────────────────────────────────────────── export function registerPropertiesRead(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_properties_read', - 'Read and parse a provardx-properties.json file. Returns the parsed content so you can inspect current settings before making changes with provar_properties_set.', { - file_path: z.string().describe('Path to the provardx-properties.json file'), + title: 'Read Properties File', + description: + 'Read and parse a provardx-properties.json file. Returns the parsed content so you can inspect current settings before making changes with provar_properties_set.', + inputSchema: { + file_path: z.string().describe('Path to the provardx-properties.json file'), + }, }, ({ file_path }) => { const requestId = makeRequestId(); @@ -472,18 +479,21 @@ const updatesSchema = z .describe('Fields to update in the properties file — only provided fields are changed'); export function registerPropertiesSet(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_properties_set', - [ - 'Update one or more fields in a provardx-properties.json file.', - 'Only the provided fields are changed — all other fields are preserved.', - 'Object fields (environment, metadata) are deep-merged.', - 'Array fields (testCase, testPlan, connectionOverride) replace the existing value entirely.', - 'Use provar_properties_read first to inspect the current state.', - ].join(' '), { - file_path: z.string().describe('Path to the provardx-properties.json file to update'), - updates: updatesSchema, + title: 'Set Property Value', + description: [ + 'Update one or more fields in a provardx-properties.json file.', + 'Only the provided fields are changed — all other fields are preserved.', + 'Object fields (environment, metadata) are deep-merged.', + 'Array fields (testCase, testPlan, connectionOverride) replace the existing value entirely.', + 'Use provar_properties_read first to inspect the current state.', + ].join(' '), + inputSchema: { + file_path: z.string().describe('Path to the provardx-properties.json file to update'), + updates: updatesSchema, + }, }, ({ file_path, updates }) => { const requestId = makeRequestId(); @@ -562,16 +572,19 @@ export function registerPropertiesSet(server: McpServer, config: ServerConfig): // ── provar_properties_validate ──────────────────────────────────────────────── export function registerPropertiesValidate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_properties_validate', - [ - 'Validate a provardx-properties.json file against the ProvarDX schema.', - 'Checks required fields, valid enum values, and warns about unfilled placeholder values.', - 'Accepts either a file path or inline JSON content.', - ].join(' '), { - file_path: z.string().optional().describe('Path to the provardx-properties.json file to validate'), - content: z.string().optional().describe('Inline JSON string to validate (alternative to file_path)'), + title: 'Validate ProvarDX Properties File', + description: [ + 'Validate a provardx-properties.json file against the ProvarDX schema.', + 'Checks required fields, valid enum values, and warns about unfilled placeholder values.', + 'Accepts either a file path or inline JSON content.', + ].join(' '), + inputSchema: { + file_path: z.string().optional().describe('Path to the provardx-properties.json file to validate'), + content: z.string().optional().describe('Inline JSON string to validate (alternative to file_path)'), + }, }, ({ file_path, content }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/qualityHubApiTools.ts b/src/mcp/tools/qualityHubApiTools.ts index 3321c205..0a76553d 100644 --- a/src/mcp/tools/qualityHubApiTools.ts +++ b/src/mcp/tools/qualityHubApiTools.ts @@ -45,49 +45,52 @@ const CORPUS_UNREACHABLE_WARNING = // ── Tool: provar_qualityhub_examples_retrieve ───────────────────────────────── export function registerCorpusExamplesRetrieve(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_examples_retrieve', - [ - 'Retrieve N similar Provar test case examples from the Quality Hub corpus (1000+ tests in Bedrock KB).', - 'Use this BEFORE writing any Provar .testcase XML — whether via provar_testcase_generate, Write, or Edit.', - 'Pass a user story, requirement, source test file content, or step type keywords as the query.', - 'Returns up to N example Provar XML test cases ordered by similarity score.', - 'If retrieval fails (no auth, network error, rate limit), returns empty examples with a warning — the', - 'generation workflow can still continue without grounding. Never hard-errors on API failure.', - '', - 'For org-specific field metadata: first call getObjectSchema from the Salesforce Hosted MCP', - '(platform/sobject-reads — https://api.salesforce.com/platform/mcp/v1/platform/sobject-reads),', - 'then include key field names in your query (e.g. "Opportunity: CloseDate, Amount, StageName").', - '', - 'Requires a Provar API key (sf provar auth login). Without a key, returns empty examples with onboarding instructions.', - ].join('\n'), { - query: z - .string() - .describe( - 'Text to search against the corpus — a user story, requirement description, or source test file content. ' + - 'Longer is better: include Salesforce object names, field names, and action descriptions. ' + - 'Truncated server-side at 2000 characters.' - ), - n: z - .number() - .int() - .min(1) - .max(10) - .optional() - .default(5) - .describe('Number of examples to return. Default 5, max 10.'), - app_filter: z - .string() - .optional() - .describe( - 'Optional Salesforce cloud filter to bias results (e.g. "SalesCloud", "ServiceCloud", "HealthCloud").' - ), - prefer_high_quality: z - .boolean() - .optional() - .default(true) - .describe('When true (default), favours tier4/tier3 corpus examples. Set false to include all tiers.'), + title: 'Retrieve Corpus Examples', + description: [ + 'Retrieve N similar Provar test case examples from the Quality Hub corpus (1000+ tests in Bedrock KB).', + 'Use this BEFORE writing any Provar .testcase XML — whether via provar_testcase_generate, Write, or Edit.', + 'Pass a user story, requirement, source test file content, or step type keywords as the query.', + 'Returns up to N example Provar XML test cases ordered by similarity score.', + 'If retrieval fails (no auth, network error, rate limit), returns empty examples with a warning — the', + 'generation workflow can still continue without grounding. Never hard-errors on API failure.', + '', + 'For org-specific field metadata: first call getObjectSchema from the Salesforce Hosted MCP', + '(platform/sobject-reads — https://api.salesforce.com/platform/mcp/v1/platform/sobject-reads),', + 'then include key field names in your query (e.g. "Opportunity: CloseDate, Amount, StageName").', + '', + 'Requires a Provar API key (sf provar auth login). Without a key, returns empty examples with onboarding instructions.', + ].join('\n'), + inputSchema: { + query: z + .string() + .describe( + 'Text to search against the corpus — a user story, requirement description, or source test file content. ' + + 'Longer is better: include Salesforce object names, field names, and action descriptions. ' + + 'Truncated server-side at 2000 characters.' + ), + n: z + .number() + .int() + .min(1) + .max(10) + .optional() + .default(5) + .describe('Number of examples to return. Default 5, max 10.'), + app_filter: z + .string() + .optional() + .describe( + 'Optional Salesforce cloud filter to bias results (e.g. "SalesCloud", "ServiceCloud", "HealthCloud").' + ), + prefer_high_quality: z + .boolean() + .optional() + .default(true) + .describe('When true (default), favours tier4/tier3 corpus examples. Set false to include all tiers.'), + }, }, async ({ query, n, app_filter, prefer_high_quality }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/qualityHubTools.ts b/src/mcp/tools/qualityHubTools.ts index ad0c40eb..74d037da 100644 --- a/src/mcp/tools/qualityHubTools.ts +++ b/src/mcp/tools/qualityHubTools.ts @@ -33,16 +33,20 @@ function handleSpawnError( // ── Tool: provar_qualityhub_connect ─────────────────────────────────────────── export function registerQualityHubConnect(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_connect', - 'Connect to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` with the supplied flags.', { - target_org: z.string().describe('SF org alias or username to connect as'), - flags: z - .array(z.string()) - .optional() - .default([]) - .describe('Additional raw CLI flags to forward (e.g. ["--json"])'), + title: 'Connect to Quality Hub', + description: + 'Connect to a Provar Quality Hub org. Invokes `sf provar quality-hub connect` with the supplied flags.', + inputSchema: { + target_org: z.string().describe('SF org alias or username to connect as'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Additional raw CLI flags to forward (e.g. ["--json"])'), + }, }, ({ target_org, flags }) => { const requestId = makeRequestId(); @@ -75,12 +79,15 @@ export function registerQualityHubConnect(server: McpServer): void { // ── Tool: provar_qualityhub_display ────────────────────────────────────────── export function registerQualityHubDisplay(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_display', - 'Display connected Quality Hub org info. Invokes `sf provar quality-hub display`.', { - target_org: z.string().optional().describe('SF org alias or username (uses default if omitted)'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags to forward'), + title: 'Display Quality Hub Info', + description: 'Display connected Quality Hub org info. Invokes `sf provar quality-hub display`.', + inputSchema: { + target_org: z.string().optional().describe('SF org alias or username (uses default if omitted)'), + flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags to forward'), + }, }, ({ target_org, flags }) => { const requestId = makeRequestId(); @@ -132,19 +139,23 @@ function detectWildcardFlags(flags: string[]): string | undefined { } export function registerQualityHubTestRun(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_testrun', - 'Trigger a Quality Hub test run. Invokes `sf provar quality-hub test run`. ' + - 'Warning: wildcard characters (* or ?) in flag values will cause QH plan-level reporting to be skipped — use exact plan names.', { - target_org: z.string().describe('SF org alias or username'), - flags: z - .array(z.string()) - .optional() - .default([]) - .describe( - 'Additional raw CLI flags (e.g. ["--plan-name", "SmokeTests"]). Avoid wildcards in --plan-name values — they skip QH plan-level reporting.' - ), + title: 'Trigger Quality Hub Test Run', + description: + 'Trigger a Quality Hub test run. Invokes `sf provar quality-hub test run`. ' + + 'Warning: wildcard characters (* or ?) in flag values will cause QH plan-level reporting to be skipped — use exact plan names.', + inputSchema: { + target_org: z.string().describe('SF org alias or username'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe( + 'Additional raw CLI flags (e.g. ["--plan-name", "SmokeTests"]). Avoid wildcards in --plan-name values — they skip QH plan-level reporting.' + ), + }, }, ({ target_org, flags }) => { const requestId = makeRequestId(); @@ -187,13 +198,16 @@ export function registerQualityHubTestRun(server: McpServer): void { // ── Tool: provar_qualityhub_testrun_report ──────────────────────────────────── export function registerQualityHubTestRunReport(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_testrun_report', - 'Poll the status of a Quality Hub test run. Invokes `sf provar quality-hub test run report`.', { - target_org: z.string().describe('SF org alias or username'), - run_id: z.string().describe('Test run ID returned by provar_qualityhub_testrun'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags'), + title: 'Poll Quality Hub Test Run', + description: 'Poll the status of a Quality Hub test run. Invokes `sf provar quality-hub test run report`.', + inputSchema: { + target_org: z.string().describe('SF org alias or username'), + run_id: z.string().describe('Test run ID returned by provar_qualityhub_testrun'), + flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags'), + }, }, ({ target_org, run_id, flags }) => { const requestId = makeRequestId(); @@ -256,13 +270,16 @@ export function registerQualityHubTestRunReport(server: McpServer): void { // ── Tool: provar_qualityhub_testrun_abort ───────────────────────────────────── export function registerQualityHubTestRunAbort(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_testrun_abort', - 'Abort an in-progress Quality Hub test run. Invokes `sf provar quality-hub test run abort`.', { - target_org: z.string().describe('SF org alias or username'), - run_id: z.string().describe('Test run ID to abort'), - flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags'), + title: 'Abort Quality Hub Test Run', + description: 'Abort an in-progress Quality Hub test run. Invokes `sf provar quality-hub test run abort`.', + inputSchema: { + target_org: z.string().describe('SF org alias or username'), + run_id: z.string().describe('Test run ID to abort'), + flags: z.array(z.string()).optional().default([]).describe('Additional raw CLI flags'), + }, }, ({ target_org, run_id, flags }) => { const requestId = makeRequestId(); @@ -306,16 +323,20 @@ export function registerQualityHubTestRunAbort(server: McpServer): void { // ── Tool: provar_qualityhub_testcase_retrieve ───────────────────────────────── export function registerQualityHubTestcaseRetrieve(server: McpServer): void { - server.tool( + server.registerTool( 'provar_qualityhub_testcase_retrieve', - 'Retrieve Quality Hub test cases by user story or component. Invokes `sf provar quality-hub testcase retrieve`.', { - target_org: z.string().describe('SF org alias or username'), - flags: z - .array(z.string()) - .optional() - .default([]) - .describe('Additional raw CLI flags (e.g. ["--user-story", "US-123"])'), + title: 'Retrieve Quality Hub Test Cases', + description: + 'Retrieve Quality Hub test cases by user story or component. Invokes `sf provar quality-hub testcase retrieve`.', + inputSchema: { + target_org: z.string().describe('SF org alias or username'), + flags: z + .array(z.string()) + .optional() + .default([]) + .describe('Additional raw CLI flags (e.g. ["--user-story", "US-123"])'), + }, }, ({ target_org, flags }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/rcaTools.ts b/src/mcp/tools/rcaTools.ts index 1b4e4813..5a4df233 100644 --- a/src/mcp/tools/rcaTools.ts +++ b/src/mcp/tools/rcaTools.ts @@ -391,26 +391,29 @@ function resolveResultsLocation( // ── provar_testrun_report_locate tool ───────────────────────────────────────── export function registerTestRunLocate(server: McpServer): void { - server.tool( + server.registerTool( 'provar_testrun_report_locate', - [ - 'Resolve exactly where Provar test run artifacts were written, without parsing them.', - 'Returns the results directory, paths to JUnit.xml and Index.html if they exist,', - 'paths to per-test HTML reports, and any validation JSON files.', - 'Supports explicit results_path override or auto-detection from sf config, provardx properties file, or ANT build.xml.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root'), - results_path: z - .string() - .optional() - .describe('Explicit override for the results base directory; if provided, skip auto-detection'), - run_index: z - .number() - .int() - .positive() - .optional() - .describe('Which Increment run to target (default: latest); must be a positive integer'), + title: 'Locate Test Report', + description: [ + 'Resolve exactly where Provar test run artifacts were written, without parsing them.', + 'Returns the results directory, paths to JUnit.xml and Index.html if they exist,', + 'paths to per-test HTML reports, and any validation JSON files.', + 'Supports explicit results_path override or auto-detection from sf config, provardx properties file, or ANT build.xml.', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute path to the Provar project root'), + results_path: z + .string() + .optional() + .describe('Explicit override for the results base directory; if provided, skip auto-detection'), + run_index: z + .number() + .int() + .positive() + .optional() + .describe('Which Increment run to target (default: latest); must be a positive integer'), + }, }, (input) => { const requestId = makeRequestId(); @@ -668,41 +671,44 @@ function buildFailureReports( // ── provar_testrun_rca tool ─────────────────────────────────────────────────── export function registerTestRunRca(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testrun_rca', - [ - 'Parse a completed Provar test run and produce a structured Root Cause Analysis (RCA) report.', - 'Resolves the results directory, parses JUnit.xml, classifies each failure by category,', - 'and produces recommendations. Use locate_only=true to skip parsing and just resolve artifact locations.', - 'Use mode="failures" to get a lightweight array of failed test cases', - '([{ testItemId, title, errorMessage }]) without the full RCA classification — useful when you', - 'need failure names quickly without loading the HTML report.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root'), - results_path: z - .string() - .optional() - .describe('Explicit override for the results base directory; must be within --allowed-paths if provided'), - run_index: z - .number() - .int() - .positive() - .optional() - .describe('Which Increment run to target (default: latest); must be a positive integer'), - locate_only: z - .boolean() - .optional() - .default(false) - .describe('If true, skip parsing and return just artifact locations'), - mode: z - .enum(['rca', 'failures']) - .optional() - .default('rca') - .describe( - '"rca" (default): full root-cause analysis with classification and recommendations. ' + - '"failures": lightweight array of failed test cases [{ testItemId, title, errorMessage }].' - ), + title: 'Root Cause Analysis', + description: [ + 'Parse a completed Provar test run and produce a structured Root Cause Analysis (RCA) report.', + 'Resolves the results directory, parses JUnit.xml, classifies each failure by category,', + 'and produces recommendations. Use locate_only=true to skip parsing and just resolve artifact locations.', + 'Use mode="failures" to get a lightweight array of failed test cases', + '([{ testItemId, title, errorMessage }]) without the full RCA classification — useful when you', + 'need failure names quickly without loading the HTML report.', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute path to the Provar project root'), + results_path: z + .string() + .optional() + .describe('Explicit override for the results base directory; must be within --allowed-paths if provided'), + run_index: z + .number() + .int() + .positive() + .optional() + .describe('Which Increment run to target (default: latest); must be a positive integer'), + locate_only: z + .boolean() + .optional() + .default(false) + .describe('If true, skip parsing and return just artifact locations'), + mode: z + .enum(['rca', 'failures']) + .optional() + .default('rca') + .describe( + '"rca" (default): full root-cause analysis with classification and recommendations. ' + + '"failures": lightweight array of failed test cases [{ testItemId, title, errorMessage }].' + ), + }, }, (input) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testCaseGenerate.ts b/src/mcp/tools/testCaseGenerate.ts index 28df1c87..6c12a571 100644 --- a/src/mcp/tools/testCaseGenerate.ts +++ b/src/mcp/tools/testCaseGenerate.ts @@ -144,34 +144,37 @@ const TOOL_DESCRIPTION = [ ].join(' '); export function registerTestCaseGenerate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testcase_generate', - TOOL_DESCRIPTION, { - test_case_name: z.string().describe('Test case name (human-readable label)'), - test_case_id: z.string().optional().describe('Explicit test case id; auto-generated UUID v4 if omitted'), - steps: z.array(StepSchema).default([]).describe('Ordered list of test steps'), - target_uri: z - .string() - .optional() - .describe( - 'Page object URI that determines the XML nesting structure. ' + - 'Omit or use "sf:ui:target" for Salesforce targets (flat structure). ' + - 'Use "ui:pageobject:target?pageId=pageobjects.PageClass" for non-SF page objects — ' + - 'steps are wrapped in a UiWithScreen element targeting that class.' - ), - output_path: z.string().optional().describe('Suggested file path for the .xml file (returned in response)'), - overwrite: z.boolean().default(false).describe('Overwrite if output_path file already exists'), - dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path'), - validate_after_edit: z - .boolean() - .default(true) - .describe( - 'Run structural validation after generation (default: true). ' + - 'Returns TESTCASE_INVALID error if the generated XML fails validation. ' + - 'Set false to skip validation and omit the validation field from the response.' - ), - idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), + title: 'Generate Test Case', + description: TOOL_DESCRIPTION, + inputSchema: { + test_case_name: z.string().describe('Test case name (human-readable label)'), + test_case_id: z.string().optional().describe('Explicit test case id; auto-generated UUID v4 if omitted'), + steps: z.array(StepSchema).default([]).describe('Ordered list of test steps'), + target_uri: z + .string() + .optional() + .describe( + 'Page object URI that determines the XML nesting structure. ' + + 'Omit or use "sf:ui:target" for Salesforce targets (flat structure). ' + + 'Use "ui:pageobject:target?pageId=pageobjects.PageClass" for non-SF page objects — ' + + 'steps are wrapped in a UiWithScreen element targeting that class.' + ), + output_path: z.string().optional().describe('Suggested file path for the .xml file (returned in response)'), + overwrite: z.boolean().default(false).describe('Overwrite if output_path file already exists'), + dry_run: z.boolean().default(true).describe('true = return XML only (default); false = write to output_path'), + validate_after_edit: z + .boolean() + .default(true) + .describe( + 'Run structural validation after generation (default: true). ' + + 'Returns TESTCASE_INVALID error if the generated XML fails validation. ' + + 'Set false to skip validation and omit the validation field from the response.' + ), + idempotency_key: z.string().optional().describe('Caller-provided key echoed back for deduplication tracking'), + }, }, (input) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testCaseStepTools.ts b/src/mcp/tools/testCaseStepTools.ts index 46ca11f1..09eca655 100644 --- a/src/mcp/tools/testCaseStepTools.ts +++ b/src/mcp/tools/testCaseStepTools.ts @@ -82,43 +82,46 @@ function parseNewStep(stepXml: string): { step: ApiCallNode } | { error: string // ── Tool registration ───────────────────────────────────────────────────────── export function registerTestCaseStepEdit(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testcase_step_edit', - [ - 'Add or remove a single step (apiCall) in a Provar XML test case file.', - 'Uses write-to-temp-then-rename to minimise partial-write risk.', - 'Prerequisites: the test case must exist and be valid XML.', - 'For mode=remove: supply test_item_id of the step to remove.', - 'For mode=add: supply test_item_id of the anchor step, position (before|after, default after),', - 'and step_xml (the ... XML fragment for the new step; must contain exactly one ).', - 'A backup is written to .bak before any mutation and restored automatically if', - 'the post-edit validation fails.', - 'Returns STEP_NOT_FOUND (with all_test_item_ids list) when the target step is absent.', - 'Returns INVALID_STEP_XML when step_xml cannot be parsed or contains ≠1 elements.', - 'Returns INVALID_XML_AFTER_EDIT (backup restored) when the mutated file fails validation.', - ].join(' '), { - test_case_path: z.string().describe('Absolute path to the .testcase XML file; must be within --allowed-paths'), - mode: z.enum(['remove', 'add']).describe('"remove" to delete a step; "add" to insert a new step'), - test_item_id: z - .string() - .describe('For mode=remove: testItemId of the step to delete. For mode=add: testItemId of the anchor step.'), - position: z - .enum(['before', 'after']) - .optional() - .default('after') - .describe('Where to insert relative to the anchor step (mode=add only; default: after)'), - step_xml: z - .string() - .optional() - .describe( - 'The ... XML fragment for the new step (mode=add only). Must be well-formed XML.' - ), - validate_after_edit: z - .boolean() - .optional() - .default(true) - .describe('Run provar_testcase_validate after the mutation; restores backup on failure (default: true)'), + title: 'Edit Test Case Step', + description: [ + 'Add or remove a single step (apiCall) in a Provar XML test case file.', + 'Uses write-to-temp-then-rename to minimise partial-write risk.', + 'Prerequisites: the test case must exist and be valid XML.', + 'For mode=remove: supply test_item_id of the step to remove.', + 'For mode=add: supply test_item_id of the anchor step, position (before|after, default after),', + 'and step_xml (the ... XML fragment for the new step; must contain exactly one ).', + 'A backup is written to .bak before any mutation and restored automatically if', + 'the post-edit validation fails.', + 'Returns STEP_NOT_FOUND (with all_test_item_ids list) when the target step is absent.', + 'Returns INVALID_STEP_XML when step_xml cannot be parsed or contains ≠1 elements.', + 'Returns INVALID_XML_AFTER_EDIT (backup restored) when the mutated file fails validation.', + ].join(' '), + inputSchema: { + test_case_path: z.string().describe('Absolute path to the .testcase XML file; must be within --allowed-paths'), + mode: z.enum(['remove', 'add']).describe('"remove" to delete a step; "add" to insert a new step'), + test_item_id: z + .string() + .describe('For mode=remove: testItemId of the step to delete. For mode=add: testItemId of the anchor step.'), + position: z + .enum(['before', 'after']) + .optional() + .default('after') + .describe('Where to insert relative to the anchor step (mode=add only; default: after)'), + step_xml: z + .string() + .optional() + .describe( + 'The ... XML fragment for the new step (mode=add only). Must be well-formed XML.' + ), + validate_after_edit: z + .boolean() + .optional() + .default(true) + .describe('Run provar_testcase_validate after the mutation; restores backup on failure (default: true)'), + }, }, (input) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testCaseValidate.ts b/src/mcp/tools/testCaseValidate.ts index 35035cf4..8f4c19d9 100644 --- a/src/mcp/tools/testCaseValidate.ts +++ b/src/mcp/tools/testCaseValidate.ts @@ -42,13 +42,17 @@ const UNREACHABLE_WARNING = 'For CI/CD: set PROVAR_QUALITY_HUB_URL and PROVAR_API_KEY environment variables.'; export function registerTestCaseValidate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testcase_validate', - 'Validate a Provar XML test case for structural correctness and quality. Checks XML declaration, root element, required attributes (guid UUID v4, testItemId integer), presence, and applies best-practice rules. When a Provar API key is configured (via sf provar auth login or PROVAR_API_KEY env var), calls the Quality Hub API for full 170-rule scoring. Falls back to local validation if no key is set or the API is unavailable. Returns validity_score (schema compliance), quality_score (best practices, 0–100), and validation_source indicating which ruleset was applied.', { - content: z.string().optional().describe('XML content to validate directly (alias: xml)'), - xml: z.string().optional().describe('XML content to validate — API-compatible alias for content'), - file_path: z.string().optional().describe('Path to .xml test case file'), + title: 'Validate Test Case', + description: + 'Validate a Provar XML test case for structural correctness and quality. Checks XML declaration, root element, required attributes (guid UUID v4, testItemId integer), presence, and applies best-practice rules. When a Provar API key is configured (via sf provar auth login or PROVAR_API_KEY env var), calls the Quality Hub API for full 170-rule scoring. Falls back to local validation if no key is set or the API is unavailable. Returns validity_score (schema compliance), quality_score (best practices, 0–100), and validation_source indicating which ruleset was applied.', + inputSchema: { + content: z.string().optional().describe('XML content to validate directly (alias: xml)'), + xml: z.string().optional().describe('XML content to validate — API-compatible alias for content'), + file_path: z.string().optional().describe('Path to .xml test case file'), + }, }, async ({ content, xml, file_path }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testPlanTools.ts b/src/mcp/tools/testPlanTools.ts index 2c567392..8c01d9e3 100644 --- a/src/mcp/tools/testPlanTools.ts +++ b/src/mcp/tools/testPlanTools.ts @@ -60,26 +60,31 @@ function buildPlanItemXml(guid: string): string { // ── provar_testplan_create ──────────────────────────────────────────────────── export function registerTestPlanCreate(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testplan_create', - [ - 'Create a new Provar test plan: makes the plans/{plan_name}/ directory and writes the root .planitem file.', - 'Use this before provar_testplan_create-suite or provar_testplan_add-instance, which both require the plan to already exist.', - 'Returns the guid assigned to the new plan, the plan directory path, and the .planitem path written.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root (must contain a .testproject file)'), - plan_name: z.string().describe('Name of the new test plan (becomes the directory name under plans/)'), - overwrite: z - .boolean() - .optional() - .default(false) - .describe('Overwrite the .planitem file if the plan directory already exists (default: false)'), - dry_run: z - .boolean() - .optional() - .default(false) - .describe('Return what would be created without writing to disk (default: false)'), + title: 'Create Test Plan', + description: [ + 'Create a new Provar test plan: makes the plans/{plan_name}/ directory and writes the root .planitem file.', + 'Use this before provar_testplan_create-suite or provar_testplan_add-instance, which both require the plan to already exist.', + 'Returns the guid assigned to the new plan, the plan directory path, and the .planitem path written.', + ].join(' '), + inputSchema: { + project_path: z + .string() + .describe('Absolute path to the Provar project root (must contain a .testproject file)'), + plan_name: z.string().describe('Name of the new test plan (becomes the directory name under plans/)'), + overwrite: z + .boolean() + .optional() + .default(false) + .describe('Overwrite the .planitem file if the plan directory already exists (default: false)'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be created without writing to disk (default: false)'), + }, }, ({ project_path, plan_name, overwrite, dry_run }) => { const requestId = makeRequestId(); @@ -194,35 +199,38 @@ export function registerTestPlanCreate(server: McpServer, config: ServerConfig): // ── provar_testplan_add-instance ────────────────────────────────────────────── export function registerTestPlanAddInstance(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testplan_add-instance', - [ - 'Add a .testinstance file to an existing Provar test plan suite directory.', - 'The plan directory and suite directory must already exist.', - 'test_case_path is relative to the project root (e.g. "tests/MyTest.testcase").', - 'suite_path is the path within the plan (e.g. "MySuite" or "MySuite/SubSuite").', - 'Returns the guid assigned to the new instance and the path where it was written.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root'), - test_case_path: z - .string() - .describe('Path to the .testcase file, relative to project root (e.g. "tests/MyTest.testcase")'), - plan_name: z.string().describe('Name of the test plan (directory under plans/)'), - suite_path: z - .string() - .optional() - .describe('Path within the plan to place the instance (e.g. "MySuite" or "MySuite/SubSuite")'), - overwrite: z - .boolean() - .optional() - .default(false) - .describe('Overwrite the .testinstance file if it already exists (default: false)'), - dry_run: z - .boolean() - .optional() - .default(false) - .describe('Return what would be written without writing to disk (default: false)'), + title: 'Add Test Plan Instance', + description: [ + 'Add a .testinstance file to an existing Provar test plan suite directory.', + 'The plan directory and suite directory must already exist.', + 'test_case_path is relative to the project root (e.g. "tests/MyTest.testcase").', + 'suite_path is the path within the plan (e.g. "MySuite" or "MySuite/SubSuite").', + 'Returns the guid assigned to the new instance and the path where it was written.', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute path to the Provar project root'), + test_case_path: z + .string() + .describe('Path to the .testcase file, relative to project root (e.g. "tests/MyTest.testcase")'), + plan_name: z.string().describe('Name of the test plan (directory under plans/)'), + suite_path: z + .string() + .optional() + .describe('Path within the plan to place the instance (e.g. "MySuite" or "MySuite/SubSuite")'), + overwrite: z + .boolean() + .optional() + .default(false) + .describe('Overwrite the .testinstance file if it already exists (default: false)'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be written without writing to disk (default: false)'), + }, }, ({ project_path, test_case_path, plan_name, suite_path, overwrite, dry_run }) => { const requestId = makeRequestId(); @@ -391,27 +399,30 @@ export function registerTestPlanAddInstance(server: McpServer, config: ServerCon // ── provar_testplan_create-suite ────────────────────────────────────────────── export function registerTestPlanCreateSuite(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testplan_create-suite', - [ - 'Create a new suite directory inside a Provar test plan.', - 'The plan directory must already exist with a .planitem file at its root.', - 'Writes a new .planitem file into the created suite directory.', - 'Returns the guid assigned to the new suite.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root'), - plan_name: z.string().describe('Name of the test plan (directory under plans/)'), - suite_name: z.string().describe('Name of the new suite directory to create'), - parent_suite_path: z - .string() - .optional() - .describe('Path of the parent suite within the plan (e.g. "MySuite"). Omit to create at plan root.'), - dry_run: z - .boolean() - .optional() - .default(false) - .describe('Return what would be created without writing to disk (default: false)'), + title: 'Create Test Plan Suite', + description: [ + 'Create a new suite directory inside a Provar test plan.', + 'The plan directory must already exist with a .planitem file at its root.', + 'Writes a new .planitem file into the created suite directory.', + 'Returns the guid assigned to the new suite.', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute path to the Provar project root'), + plan_name: z.string().describe('Name of the test plan (directory under plans/)'), + suite_name: z.string().describe('Name of the new suite directory to create'), + parent_suite_path: z + .string() + .optional() + .describe('Path of the parent suite within the plan (e.g. "MySuite"). Omit to create at plan root.'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be created without writing to disk (default: false)'), + }, }, ({ project_path, plan_name, suite_name, parent_suite_path, dry_run }) => { const requestId = makeRequestId(); @@ -536,21 +547,24 @@ export function registerTestPlanCreateSuite(server: McpServer, config: ServerCon // ── provar_testplan_remove-instance ────────────────────────────────────────── export function registerTestPlanRemoveInstance(server: McpServer, config: ServerConfig): void { - server.tool( + server.registerTool( 'provar_testplan_remove-instance', - [ - 'Remove a .testinstance file from a Provar test plan.', - 'instance_path is relative to the project root.', - 'Returns the path of the removed file.', - ].join(' '), { - project_path: z.string().describe('Absolute path to the Provar project root'), - instance_path: z.string().describe('Path to the .testinstance file, relative to project root'), - dry_run: z - .boolean() - .optional() - .default(false) - .describe('Return what would be removed without deleting (default: false)'), + title: 'Remove Test Plan Instance', + description: [ + 'Remove a .testinstance file from a Provar test plan.', + 'instance_path is relative to the project root.', + 'Returns the path of the removed file.', + ].join(' '), + inputSchema: { + project_path: z.string().describe('Absolute path to the Provar project root'), + instance_path: z.string().describe('Path to the .testinstance file, relative to project root'), + dry_run: z + .boolean() + .optional() + .default(false) + .describe('Return what would be removed without deleting (default: false)'), + }, }, ({ project_path, instance_path, dry_run }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testPlanValidate.ts b/src/mcp/tools/testPlanValidate.ts index cdb62d64..a9f30462 100644 --- a/src/mcp/tools/testPlanValidate.ts +++ b/src/mcp/tools/testPlanValidate.ts @@ -71,26 +71,30 @@ const metadataSchema = z ); export function registerTestPlanValidate(server: McpServer): void { - server.tool( + server.registerTool( 'provar_testplan_validate', - 'Validate a Provar test plan: checks for empty plans, duplicate suite names, oversized plans (>20 suites), plan completeness (objectives, scope, methodology, environments, acceptance criteria, test data strategy, risk assessment), and naming consistency. Recursively validates child suites and test cases. Returns quality score, plan-level violations, and full hierarchy results.', { - plan_name: z.string().describe('Name of the test plan'), - test_suites: z.array(suiteSchema).optional().describe('Test suites belonging to this plan'), - test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this plan (not in a suite)'), - test_suite_count: z - .number() - .int() - .min(0) - .optional() - .describe('Explicit suite count for size check (overrides counting test_suites)'), - metadata: metadataSchema, - quality_threshold: z - .number() - .min(0) - .max(100) - .optional() - .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + title: 'Validate Test Plan', + description: + 'Validate a Provar test plan: checks for empty plans, duplicate suite names, oversized plans (>20 suites), plan completeness (objectives, scope, methodology, environments, acceptance criteria, test data strategy, risk assessment), and naming consistency. Recursively validates child suites and test cases. Returns quality score, plan-level violations, and full hierarchy results.', + inputSchema: { + plan_name: z.string().describe('Name of the test plan'), + test_suites: z.array(suiteSchema).optional().describe('Test suites belonging to this plan'), + test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this plan (not in a suite)'), + test_suite_count: z + .number() + .int() + .min(0) + .optional() + .describe('Explicit suite count for size check (overrides counting test_suites)'), + metadata: metadataSchema, + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + }, }, ({ plan_name, test_suites, test_cases, test_suite_count, metadata, quality_threshold }) => { const requestId = makeRequestId(); diff --git a/src/mcp/tools/testSuiteValidate.ts b/src/mcp/tools/testSuiteValidate.ts index 71efe879..bb23dcc8 100644 --- a/src/mcp/tools/testSuiteValidate.ts +++ b/src/mcp/tools/testSuiteValidate.ts @@ -43,28 +43,32 @@ const childSuiteSchema = z.object({ }); export function registerTestSuiteValidate(server: McpServer): void { - server.tool( + server.registerTool( 'provar_testsuite_validate', - 'Validate a Provar test suite: checks for empty suites, duplicate names, oversized suites (>75 tests), and naming convention consistency. Recursively validates child suites and individual test case XML. Returns quality score, suite-level violations, and per-test-case results.', { - suite_name: z.string().describe('Name of the test suite'), - test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this suite'), - child_suites: z - .array(childSuiteSchema) - .optional() - .describe('Child test suites (supports up to 2 levels of nesting)'), - test_case_count: z - .number() - .int() - .min(0) - .optional() - .describe('Explicit total test case count for size check (overrides counting test_cases)'), - quality_threshold: z - .number() - .min(0) - .max(100) - .optional() - .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + title: 'Validate Test Suite', + description: + 'Validate a Provar test suite: checks for empty suites, duplicate names, oversized suites (>75 tests), and naming convention consistency. Recursively validates child suites and individual test case XML. Returns quality score, suite-level violations, and per-test-case results.', + inputSchema: { + suite_name: z.string().describe('Name of the test suite'), + test_cases: z.array(testCaseSchema).optional().describe('Test cases directly in this suite'), + child_suites: z + .array(childSuiteSchema) + .optional() + .describe('Child test suites (supports up to 2 levels of nesting)'), + test_case_count: z + .number() + .int() + .min(0) + .optional() + .describe('Explicit total test case count for size check (overrides counting test_cases)'), + quality_threshold: z + .number() + .min(0) + .max(100) + .optional() + .describe('Minimum quality score for a test case to be considered valid (default: 80)'), + }, }, ({ suite_name, test_cases, child_suites, test_case_count, quality_threshold }) => { const requestId = makeRequestId(); diff --git a/test/unit/mcp/antTools.test.ts b/test/unit/mcp/antTools.test.ts index 3b76f5f4..74e37bc2 100644 --- a/test/unit/mcp/antTools.test.ts +++ b/test/unit/mcp/antTools.test.ts @@ -26,6 +26,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/automationTools.test.ts b/test/unit/mcp/automationTools.test.ts index 48979008..381ec475 100644 --- a/test/unit/mcp/automationTools.test.ts +++ b/test/unit/mcp/automationTools.test.ts @@ -31,6 +31,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/connectionTools.test.ts b/test/unit/mcp/connectionTools.test.ts index 5021563b..cf5ec3a6 100644 --- a/test/unit/mcp/connectionTools.test.ts +++ b/test/unit/mcp/connectionTools.test.ts @@ -25,6 +25,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/defectTools.test.ts b/test/unit/mcp/defectTools.test.ts index d0fb9269..f99ae6d8 100644 --- a/test/unit/mcp/defectTools.test.ts +++ b/test/unit/mcp/defectTools.test.ts @@ -23,6 +23,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/nitroXTools.test.ts b/test/unit/mcp/nitroXTools.test.ts index 0d80d948..b0d0c2a8 100644 --- a/test/unit/mcp/nitroXTools.test.ts +++ b/test/unit/mcp/nitroXTools.test.ts @@ -23,6 +23,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/pageObjectGenerate.test.ts b/test/unit/mcp/pageObjectGenerate.test.ts index 094ad4b7..70d68adf 100644 --- a/test/unit/mcp/pageObjectGenerate.test.ts +++ b/test/unit/mcp/pageObjectGenerate.test.ts @@ -27,6 +27,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/projectValidateFromPath.test.ts b/test/unit/mcp/projectValidateFromPath.test.ts index aadd603d..5e463ad9 100644 --- a/test/unit/mcp/projectValidateFromPath.test.ts +++ b/test/unit/mcp/projectValidateFromPath.test.ts @@ -25,6 +25,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/propertiesTools.test.ts b/test/unit/mcp/propertiesTools.test.ts index 428506e5..d405b084 100644 --- a/test/unit/mcp/propertiesTools.test.ts +++ b/test/unit/mcp/propertiesTools.test.ts @@ -32,6 +32,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/qualityHubApiTools.test.ts b/test/unit/mcp/qualityHubApiTools.test.ts index e5b72046..0f453c01 100644 --- a/test/unit/mcp/qualityHubApiTools.test.ts +++ b/test/unit/mcp/qualityHubApiTools.test.ts @@ -27,6 +27,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/qualityHubTools.test.ts b/test/unit/mcp/qualityHubTools.test.ts index 6989b8a5..c7e8dc2a 100644 --- a/test/unit/mcp/qualityHubTools.test.ts +++ b/test/unit/mcp/qualityHubTools.test.ts @@ -22,6 +22,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/rcaTools.test.ts b/test/unit/mcp/rcaTools.test.ts index 71a839c4..42ce4ec1 100644 --- a/test/unit/mcp/rcaTools.test.ts +++ b/test/unit/mcp/rcaTools.test.ts @@ -24,6 +24,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/testCaseGenerate.test.ts b/test/unit/mcp/testCaseGenerate.test.ts index 3cac5233..c1586904 100644 --- a/test/unit/mcp/testCaseGenerate.test.ts +++ b/test/unit/mcp/testCaseGenerate.test.ts @@ -27,6 +27,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/testCaseStepTools.test.ts b/test/unit/mcp/testCaseStepTools.test.ts index fe85d803..9e879b76 100644 --- a/test/unit/mcp/testCaseStepTools.test.ts +++ b/test/unit/mcp/testCaseStepTools.test.ts @@ -24,6 +24,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/testCaseValidate.test.ts b/test/unit/mcp/testCaseValidate.test.ts index 279209b6..0aec459f 100644 --- a/test/unit/mcp/testCaseValidate.test.ts +++ b/test/unit/mcp/testCaseValidate.test.ts @@ -785,6 +785,11 @@ describe('registerTestCaseValidate handler', () => { public tool(...args: any[]): void { this.capturedHandler = args[args.length - 1] as (args: Record) => Promise; } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + public registerTool(...args: any[]): void { + this.capturedHandler = args[args.length - 1] as (args: Record) => Promise; + } } let capServer: CapturingServer; diff --git a/test/unit/mcp/testPlanTools.test.ts b/test/unit/mcp/testPlanTools.test.ts index e72eb7cd..cbef770c 100644 --- a/test/unit/mcp/testPlanTools.test.ts +++ b/test/unit/mcp/testPlanTools.test.ts @@ -26,6 +26,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/testPlanValidate.test.ts b/test/unit/mcp/testPlanValidate.test.ts index 06db102a..2c257cf6 100644 --- a/test/unit/mcp/testPlanValidate.test.ts +++ b/test/unit/mcp/testPlanValidate.test.ts @@ -21,6 +21,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`); diff --git a/test/unit/mcp/testSuiteValidate.test.ts b/test/unit/mcp/testSuiteValidate.test.ts index 335086c6..cbe45023 100644 --- a/test/unit/mcp/testSuiteValidate.test.ts +++ b/test/unit/mcp/testSuiteValidate.test.ts @@ -21,6 +21,10 @@ class MockMcpServer { this.handlers.set(name, handler); } + public registerTool(name: string, _config: unknown, handler: ToolHandler): void { + this.handlers.set(name, handler); + } + public call(name: string, args: Record): ReturnType { const h = this.handlers.get(name); if (!h) throw new Error(`Tool not registered: ${name}`);