diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ea50f213..660a47ad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: pull_request: - branches: [develop, main] + branches: [main] paths: - "dashboard/**" - "tests/**" @@ -10,7 +10,7 @@ on: - "uv.lock" - ".github/workflows/ci.yml" push: - branches: [develop, main] + branches: [main] paths: - "dashboard/**" - "tests/**" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..50cde338 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.15.11 + hooks: + - id: ruff-check + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.1.0 + hooks: + - id: prettier + files: ^dashboard/frontend/.*\.(ts|tsx|js|jsx|json|css|md|html|yaml|yml)$ + + - repo: local + hooks: + - id: frontend-typecheck + name: frontend type-check + entry: npm --prefix dashboard/frontend run build + language: system + files: ^dashboard/frontend/ + pass_filenames: false + always_run: true diff --git a/CHANGELOG.md b/CHANGELOG.md index c1a796f8..d0e8cce8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.33.0] - 2026-04-25 + +Plugin contract release. Five PRs merged in one day to unblock the EvoNexus Plugin Nutri (and any future plugin needing per-endpoint role enforcement, public token-bound portals, or safe uninstall). Plus a UX fix so `409 CONFLICT` from plugin install actually says *why* it conflicted. + +### Added + +- **`requires_role` on `PluginWritableResource`** (PR #55) — plugins can declare a list of roles allowed on each writable endpoint. The host returns `403` when `current_user.role` is not in the list. `'admin'` always passes (super-user override). Backwards compatible: resources without the field accept any authenticated user. +- **Auto-injected readonly bind params** (PR #55) — every `readonly_data` query receives `:current_user_id` and `:current_user_role` server-side. Plugins reference them directly in SQL for scoping (`WHERE primary_nutritionist_id = :current_user_id`). Both names are reserved — clients that try to spoof them via `?current_user_id=...` get `400`. +- **`public_pages` capability** (PR #53) — token-bound public portals at `/p/{slug}/{route_prefix}/{token}`. Token validated against a plugin-declared `token_source.column`. CSP, rate limit, and security headers applied. Read-only `readonly_data` queries can be exposed to the portal via `public_via` + `bind_token_param`. +- **HTML shell content negotiation** (PR #56) — when a request includes `text/html` in `Accept`, the host renders a minimal HTML shell that loads the plugin bundle as a module and instantiates the declared custom element with `data-token`. Programmatic clients (`Accept: application/javascript`, default `*/*`) keep getting the raw bundle. Plugins ship a single JS bundle and get a working browser experience for free. +- **`safe_uninstall` capability** (PR #54) — three-step uninstall wizard with `preserved_tables` (renamed to `_orphan_{slug}_*` instead of dropped), pre-uninstall hook (sandboxed: read-only DB, no `BRAIN_REPO_MASTER_KEY`), and required user confirmation (checkbox + typed phrase + ZIP password). Reinstall verifies SHA256 and restores access to preserved data. +- **Rate limit + security headers** (PR #52) — `flask-limiter` with in-memory storage on the public share endpoint and any future `/p/...` route. Five security headers applied to public responses (`Referrer-Policy`, `Cache-Control: no-store`, HSTS, `X-Content-Type-Options`, `Pragma`). + +### Fixed + +- **Plugin install wizard now shows the actual reason for `409 CONFLICT`.** The frontend was treating any 4xx as an opaque error string. Now `buildError` in `lib/api.ts` falls back to `data.conflicts[0]` when the standard `error`/`message` fields are absent (which is the case for the plugin preview endpoint), so a version mismatch shows up as `"409 CONFLICT: Plugin 'nutri' requires EvoNexus >= 0.33.0, but installed version is 0.32.3."` instead of just `"409 CONFLICT"`. `PluginInstallModal` also fixes the type of `conflicts` (was `Record`, the backend always returned `string[]`) and renders each conflict as a list item. + +### Compat + +- All existing plugins (PM Essentials, etc.) work unchanged. New manifest fields default to absent / `None` and the auto-injected bind params are silently ignored if the SQL doesn't reference them. The `409` body shape for plugin install was already `{conflicts: [...], manifest, ...}` — only the frontend's interpretation changed. + ## [0.32.3] - 2026-04-25 Patch release fixing a long-standing Workspace UI bug where folders refused to open and the dev console flooded with `400 Path is a directory` requests, plus a small UX win on the file share dialog (reuse existing share links instead of generating a new token every time). Also includes the upstream PR #51 (private-repo plugin update flow + ClickUp webhook compat + DetachedInstanceError). diff --git a/Dockerfile.dashboard b/Dockerfile.dashboard index 523f16d0..c23f48b0 100644 --- a/Dockerfile.dashboard +++ b/Dockerfile.dashboard @@ -18,7 +18,7 @@ FROM python:3.12-slim AS runtime # System deps RUN apt-get update && apt-get install -y --no-install-recommends \ - curl \ + curl git \ && rm -rf /var/lib/apt/lists/* # Install uv diff --git a/Dockerfile.swarm.dashboard b/Dockerfile.swarm.dashboard index e19a2f23..8b725806 100644 --- a/Dockerfile.swarm.dashboard +++ b/Dockerfile.swarm.dashboard @@ -43,7 +43,7 @@ FROM python:3.12-slim AS runtime # System deps: curl for healthcheck + Node.js 22 for terminal-server binary. # We use NodeSource so the final image is clean (no build toolchain). RUN apt-get update && apt-get install -y --no-install-recommends \ - curl ca-certificates gnupg openssl \ + curl ca-certificates gnupg openssl git \ && curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \ && apt-get install -y --no-install-recommends nodejs \ && apt-get clean \ diff --git a/ROADMAP.md b/ROADMAP.md index df015adf..381c7550 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,135 +1,254 @@ # EvoNexus Roadmap -> Unofficial toolkit for Claude Code — AI-powered business operating system. -> -> This roadmap is updated regularly. Want to vote or suggest? [Open a discussion](https://github.com/EvolutionAPI/evo-nexus/discussions) or [create an issue](https://github.com/EvolutionAPI/evo-nexus/issues). +> Execution roadmap derived from the PRD review and current branch state. +> Last updated: 2026-04-24 --- -## Legend +## How To Read This -| Symbol | Meaning | -|--------|---------| -| `[ ]` | Not started | -| `[x]` | Done | -| `⚠️` | Breaking change | -| `🔥` | High priority | -| `💡` | Needs design discussion first | +- `Done` means implemented and validated locally. +- `Partial` means a scaffold or mitigation exists, but the item is not closed. +- `Next` means highest priority pending work. +- `Later` means important work that is gated by dependencies. --- -## v0.4 — Foundation & Stability +## Project Snapshot -> Fix, secure, and improve what already exists before growing. +- Delivered: the security hardening pass, backend maintainability work, frontend scaling work, and the phase 4 platform layer are in place. +- Open: env normalization edge cases in the last offline tooling paths. +- Focus now: close the remaining partial items before adding new feature surface area. -### Skills +--- -- [x] 🔥 **Evolution product skills** — `int-evolution-api` (33 commands), `int-evolution-go` (24 commands), `int-evo-crm` (48 commands) for managing instances, messages, contacts, conversations, pipelines via REST API. -- [x] **Version indicator & update alerts** — show current version in dashboard sidebar, alert when new GitHub releases are available. +## Current Baseline -### Developer Experience +| Area | Status | Notes | +|---|---|---| +| Security | Done | Login hardening, password policy, configurable CORS, env-first secret key handling, CSRF token plumbing, session token rotation, and 2FA/TOTP are in place. | +| Reliability | Done | Session GC, heartbeat timeout handling, health endpoints, backup hooks, restart supervision, and readiness/liveness probe split are in place. | +| Architecture | Partial | Alembic bootstrap, lazy blueprint loading, structured logs, terminal-server modularization, and restart supervision are in place. Remaining work: the last env normalization edge cases. | +| Frontend | Done | Route splitting, section-level error boundaries, theme toggle, PWA support, command palette, and mobile chat improvements are done. | +| Testing / CI | Done | Auth tests, terminal-server tests, CI, pre-commit hooks, and Playwright E2E coverage exist. | +| Platform | Done | Queue/cache abstractions, provider failover, observability, plugins, a shared platform broker, and PostgreSQL rollout wiring are in place. | -- [x] 🔥 **CLI installer** — `npx @evoapi/evo-nexus` — clones repo, installs deps, runs interactive setup wizard. -- [x] **Full Docker install** — `docker compose up dashboard` with multi-stage Dockerfile + GitHub Actions CI pushing to GHCR. -- [x] **Update checker** — dashboard checks GitHub releases and shows upgrade notification. -- [x] **settings.json** — project-level permissions (allow/deny), hooks configuration, thinking mode enabled. -- [x] **CLAUDE.md split** — reduced from 263 to 128 lines. Detailed config moved to `.claude/rules/` (agents, integrations, routines, skills). -- [x] **Inner-loop commands** — `/status` (workspace status) and `/review` (recent changes + next actions). +--- -### Dashboard UX +## Shipped So Far + +- Rate limiting on login +- Strong password policy +- Configurable CORS +- Env-first secret key handling +- Runtime config aliases for `DATABASE_URL` and `PORT` +- CSRF token plumbing for mutating requests +- Per-session token rotation and CSRF refresh on auth changes +- TOTP / 2FA enrollment, login, and disable flows for admins +- Readiness / liveness health endpoints +- Pre-commit hooks for backend lint, frontend format, and frontend type-check +- Pre-migration backup hook for SQLite +- Alembic-backed schema bootstrap +- Session GC in the terminal server +- Heartbeat, summary, and goal workers now follow the shared DB resolver +- WebSocket heartbeat timeout handling +- Terminal-server audit log +- Structured JSON logs for backend and terminal server +- Health and deep-health endpoints +- Healthcheck wiring in compose / swarm +- Session save debounce in the terminal server +- Lazy blueprint loading +- Terminal-server restart supervision +- Terminal-server runtime split into a wrapper plus dedicated runtime module +- Shared platform event broker between Flask and terminal-server +- Hot-path cache invalidation on platform events +- Frontend route splitting +- Section-level error boundaries +- Global theme toggle +- Command palette with Ctrl+K +- PWA manifest and service worker +- Mobile chat layout improvements +- AgentChat transcript shell extraction +- AgentChat block extraction into focused components +- Integrations page decomposition into focused modules +- Shared `PageSkeleton` loading states across major pages +- Provider failover routing and preflight selection +- Observability dashboard +- Plugin registry and install / uninstall flow +- PostgreSQL-ready runtime and DB adapters +- Compose and Swarm PostgreSQL rollout wiring +- CI workflow for backend, terminal server, and frontend build +- Playwright E2E coverage for setup, login, and protected-route navigation -- [x] **Sidebar reorganization** — 5 collapsible groups (Main, Operations, Data, System, Admin) with localStorage persistence. -- [x] **Active agent visualization** — Claude Code hooks track agent launches via `PreToolUse` events, writing to `agent-status.json`. Dashboard polls `/api/agents/active` and shows "RUNNING" badges with pulse animation on agent cards and overview. -- [x] **Agents page redesign** — unique icons and accent colors per agent, status dots, slash command badges, memory count pills, hover glow effects. -- [x] **Overview page redesign** — stat cards with icons and trend indicators, active agents bar, quick actions row, improved reports and routines tables. +--- -### Agent Generalization +## Delivery Roadmap -- [x] **Agent prompts generalized** — all 9 agent prompts cleaned of hardcoded personal references. User-specific context preserved in `_improvements.md` per agent memory folder. +### Phase 1 - Close Remaining Risk ---- +Goal: make the current stack safe enough for sensitive data and stable deployments. + +| Item | Priority | Status | Exit Criterion | +|---|---|---|---| +| D6 Backup before migrations | P0 | Done | Automatic backup runs before any schema change. | +| S4 CSRF tokens for mutations | P1 | Done | Mutating requests now require the XHR header plus a per-session CSRF token. | +| S5 Audit log for terminal server | P1 | Done | Sensitive actions are recorded with actor, timestamp, and target. | +| P5 WebSocket heartbeat timeout | P1 | Done | Dead connections are detected and closed automatically. | +| T2 Terminal-server integration tests | P1 | Done | WebSocket session lifecycle is covered by automated tests. | +| D1 / D2 Health checks and probes | P1 | Done | Separate readiness and liveness endpoints are exposed and the deploy healthchecks use the live probe. | + +Exit criterion: no remaining P0 security gaps, and unhealthy processes are detected automatically. + +### Phase 2 - Backend Maintainability + +Goal: reduce startup coupling and make migrations and runtime behavior explicit. -## v0.5 — Extensibility & Ecosystem +| Item | Priority | Status | Exit Criterion | +|---|---|---|---| +| A1 Alembic extraction | P1 | Done | Schema changes are versioned and reversible. | +| A2 Split `server.js` | P2 | Done | `server.js` now delegates to a dedicated runtime module and platform event broker. | +| D3 Structured logs | P2 | Done | Logs are JSON and searchable across services. | +| D4 Env-based config | P1 | Partial | Most runtime config is env-driven; `DATABASE_URL` and `PORT` aliases are supported, and the heartbeat, summary, and goal workers now follow the shared DB resolver, but a few offline tools still keep local-path fallbacks. | +| D5 Isolated process restart | P1 | Done | Terminal-server failure no longer brings down the dashboard. | +| P3 Debounce `saveSessionsToDisk` | P2 | Done | Disk writes are batched and bounded. | +| P4 Lazy blueprint loading | P2 | Done | Backend startup avoids eager loading every route. | -> Make EvoNexus composable and self-extending. +Exit criterion: backend startup is faster, migrations are explicit, and service boundaries are clearer. -### Agent System +### Phase 3 - Frontend Scale And UX -- [x] 🔥 **Generalize existing agents** — all 9 agent prompts generalized. User-specific context preserved in `_improvements.md` per agent memory folder. Adapter patterns documented as future work. -- [x] 🔥 **New business agents** — expand functional coverage: - - [x] **Marketing Agent** — orchestrate existing `mkt-*` skills, attribution, budget, full funnel - - [x] **HR / People Agent** — onboarding, 1:1s, performance reviews, hiring pipeline - - [x] **Customer Success Agent** — health score, churn prediction, NPS/CSAT, client onboarding - - [x] **Legal / Compliance Agent** — contracts, renewals, GDPR/LGPD, compliance checklists - - [x] **Product Agent** — discovery, feature prioritization (RICE/ICE), PLG metrics, feedback loop - - [x] **Data / BI Agent** — cross-area consolidated dashboard, unified KPIs, alerts, trend analysis -- [x] **Custom agents** — `custom-` prefix, gitignored, auto-discovered by dashboard (core/custom badges), `create-agent` skill, `create-command` skill -- [x] **Help agent (Oracle)** — `/oracle` answers questions about the workspace by reading the actual docs. No RAG needed — reads `docs/llms-full.txt` and source files directly +Goal: keep the dashboard maintainable as pages and workflows grow. -### Routines & Scheduling +| Item | Priority | Status | Exit Criterion | +|---|---|---|---| +| U1 Decompose `AgentChat.tsx` | P2 | Done | Core chat blocks and the transcript shell are split into focused components. | +| U2 Decompose `Integrations.tsx` | P2 | Done | Provider-specific logic, social accounts, and database flows are split into focused modules. | +| U3 Theme toggle | P3 | Done | Light and dark preference is user-controlled. | +| U4 PWA support | P3 | Done | Dashboard has a manifest and service worker. | +| U5 Skeleton states everywhere | P2 | Done | Every major page now uses the shared `PageSkeleton` loading surface. | +| U7 Command palette | P3 | Done | Keyboard-driven navigation is available. | +| 4.4 Mobile responsive chat | P2 | Done | Chat works on narrow screens without layout breakage. | -- [x] 🔥 **Trigger registry** — define and manage named triggers (webhook, cron, event-based) that invoke skills or routines -- [x] **Non-recurrent scheduled actions** — one-off scheduled tasks (e.g., "post this on LinkedIn Friday at 10am") without creating a full routine -- [x] **Systematic routines** — pure Python routines via `run_script()` — no AI, no tokens, no cost. `create-routine` skill generates the code +Exit criterion: the dashboard can absorb new pages without becoming a monolith. -### Integrations +### Phase 4 - Platform Expansion -- [ ] **Complete Obsidian integration** — finish `obs-*` skills: bidirectional sync, canvas, bases, CLI +Goal: unlock multi-provider, observability, and enterprise deployment paths. -### Import / Export +| Item | Priority | Status | Exit Criterion | +|---|---|---|---| +| A3 Message queue | P2 | Done | Queue events now flow through a shared broker between Flask and terminal server. | +| A4 Redis cache | P3 | Done | Provider, observability, and queue hot paths now use cache-backed reads with event-driven invalidation. | +| A5 PostgreSQL option | P2 | Done | Compose and Swarm now wire `DATABASE_URL` to a PostgreSQL service. | +| 4.1 Native provider failover | P2 | Done | Routing is configurable and the terminal server now preflights and falls back through ready providers. | +| 4.2 Observability dashboard | P2 | Done | Tokens, latency, queue, cache, and plugin state are visible in one place. | +| 4.3 Plugin system | P2 | Done | Third-party agent packs can be registered, installed, and removed safely. | +| 4.5 PostgreSQL backend | P2 | Done | DB adapter and migrations work across SQLite and PostgreSQL. | -- [x] **Backup system** — export workspace state as ZIP (agents, skills, routines, memory, config); import to restore. Support local, git, and cloud bucket targets. +Exit criterion: the platform can scale across load, providers, and deployment models. --- -## v0.12 — Engineering Layer +## Priority Backlog + +### Security + +- No open Phase 1 security items remain. + +### Reliability + +- No open Phase 1 reliability items remain. + +### Architecture + +- D4 Config normalization + +### Testing / CI -> Add a complete software development team alongside the business agents. -### Engineering Layer (delivered) +No open Testing / CI items remain. -- [x] 🔥 **19 engineering agents** — complete dev team derived from [oh-my-claudecode](https://github.com/yeachan-heo/oh-my-claudecode) (MIT, Yeachan Heo). Reasoning tier (opus): apex-architect, echo-analyst, compass-planner, raven-critic, lens-reviewer, zen-simplifier, vault-security. Execution tier (sonnet): bolt-executor, hawk-debugger, grid-tester, probe-qa, oath-verifier, trail-tracer, flow-git, scroll-docs, canvas-designer, prism-scientist. Speed tier (haiku): scout-explorer, quill-writer. -- [x] 🔥 **25 `dev-*` skills** — Tier 1 orchestration (15): `dev-autopilot`, `dev-plan`, `dev-ralplan`, `dev-deep-interview`, `dev-deep-dive`, `dev-external-context`, `dev-trace`, `dev-verify`, `dev-ultraqa`, `dev-visual-verdict`, `dev-ai-slop-cleaner`, `dev-sciomc`, `dev-team`, `dev-ccg`, `dev-ralph`. Tier 2 setup (5): `dev-mcp-setup`, `dev-deepinit`, `dev-project-session-manager`, `dev-configure-notifications`, `dev-release`. Tier 3 utilities (5): `dev-cancel`, `dev-remember`, `dev-ask`, `dev-learner`, `dev-skillify`. -- [x] **15 dev templates** — `.claude/templates/dev-*.md` for each agent's primary output (architecture-decision, work-plan, code-review, bug-report, verification-report, deep-interview-spec, security-audit, test-strategy, trace-report, explore-report, design-spec, analysis-report, research-brief, critique, simplification-report). -- [x] **`workspace/development/` folder** — 7 subfolders (architecture, plans, specs, reviews, debug, verifications, research) for engineering layer artifacts. Distinct from `workspace/projects/` (active git repos). -- [x] **Two-layer architecture documented** — `.claude/rules/agents.md`, `CLAUDE.md`, `docs/agents/overview.md`, `docs/agents/engineering-layer.md`, `docs/architecture.md`, `docs/introduction.md`, site `Home.tsx`. -- [x] **Open source attribution** — `NOTICE.md` at repo root with full MIT license, version pinned (v4.11.4), modifications listed. Credits in `README.md`. -- [x] **Pattern compliance** — all 19 engineering agents follow the EvoNexus standard: rich frontmatter (Examples + commentary), Workspace Context, Shared Knowledge Base, Working Folder, Identity, Anti-patterns, Domain, How You Work, Skills You Can Use, Handoffs, Output Format, Continuity. Verified by `@lens-reviewer` (3 fixes applied: oath-verifier disallowedTools, raven-critic and trail-tracer Skills section). +### Frontend / UX -### Cross-layer pipelines (now possible) +- No open Phase 3 frontend items remain. -- [x] **End-to-end implementation** — `dev-autopilot` orchestrates spec → plan → code → QA → validation across multiple engineering agents. -- [x] **High-stakes consensus planning** — `dev-ralplan` runs Planner/Architect/Critic consensus loop with RALPLAN-DR structured deliberation. -- [x] **Bug investigation** — `@trail-tracer` (multi-hypothesis) → `@hawk-debugger` (root cause + minimal fix) → `@oath-verifier` (regression check). -- [x] **Pre-merge gate** — `@lens-reviewer` (code quality) → `@vault-security` (OWASP audit) → `dev-ultraqa` (build/test/fix loop) → `@oath-verifier` (acceptance criteria). +### Platform Expansion + +- No open Platform Expansion items remain. --- -## v1.0 — Community & Growth +## Sprint Execution Plan + +Priority inside each sprint runs top to bottom. The `Depends On` column lists hard blockers only; the sprint order itself also reflects risk reduction and release sequencing. + +Implementation status: Sprint 1 is complete, Sprint 2 is partially complete, Sprint 3 is complete, and Sprint 4 is complete. + +### Sprint 1 - Security And Deploy Gates + +Goal: close the items that reduce blast radius and make production checks reliable. + +| Item | Priority | Depends On | Why It Is Here | +|---|---|---|---| +| S4 CSRF tokens for mutations | P1 | None | Must land before any new write-heavy surface area expands. | +| D1 / D2 health checks and probes | P1 | None | Separate readiness and liveness before more rollout work. | +| S6 session key rotation | P2 | None | Completes the auth hardening pass while the auth path is already being touched. | + +### Sprint 2 - Config And Auth Hardening -> Community adoption, discoverability, and self-sustaining ecosystem. +Goal: normalize runtime configuration and finish the remaining auth hardening. -### Community & Docs +| Item | Priority | Depends On | Why It Is Here | +|---|---|---|---| +| D4 config normalization | P1 | None | Remove the last hardcoded runtime assumptions before deeper refactors. | +| S7 2FA / TOTP for admins | P2 | S4 + S6 | Builds on the hardened login and session flow. | +| A2 `server.js` modularization | P2 | D4 | Refactor after the config surface is stable. | +| T5 pre-commit hooks | P2 | Done | Lint, format, and frontend type-check gates run before commits. | -- [x] 🔥 **Public roadmap** — this file. Community input welcome via [discussions](https://github.com/EvolutionAPI/evo-nexus/discussions). -- [x] **Telegram & Discord channels** — activate community channels, document in README and docs site. -- [ ] **In-app tutorials** — contextual tutorials surfaced inside the dashboard, not just external docs. -- [x] **Resume Claude sessions in chat** — list active/resumable Claude sessions in dashboard chat with `--resume` support. +### Sprint 3 - Verification And Scale Prep -### Development +Goal: prove the stable flows end to end and prepare the platform layer for scale. -- [ ] **Testing framework** — define and implement test strategy for skills, routines, and agent behaviors; prevent regressions. +| Item | Priority | Depends On | Why It Is Here | +|---|---|---|---| +| T3 Playwright E2E | P2 | Done | Validates the core user journeys after the config and auth work settle. | +| A3 Message queue | P2 | Done | Async coordination is now handled by the shared platform broker. | +| A4 Redis cache | P3 | Done | Hot-path caching is now active with invalidation on platform events. | + +### Sprint 4 - Production Rollout + +Goal: finish the remaining deployment wiring for PostgreSQL-backed production use. + +| Item | Priority | Depends On | Why It Is Here | +|---|---|---|---| +| A5 PostgreSQL option | P2 | Done | PostgreSQL rollout wiring is now present in both Compose and the Swarm stack. | --- -## Contributing +## Dependency Rules -Want to help? Pick any `[ ]` item and: +- S4 should land before any new write-heavy surfaces. +- P5 should land before more WebSocket features are built. +- 4.1 should reuse the current Smart Router instead of duplicating provider loading logic. +- D4 remains the only partially open configuration cleanup item. -1. Check [open issues](https://github.com/EvolutionAPI/evo-nexus/issues) -2. Read [CONTRIBUTING.md](CONTRIBUTING.md) -3. For `💡` items, open a [discussion](https://github.com/EvolutionAPI/evo-nexus/discussions) first — design is still open +--- + +## Success Metrics + +| Metric | Target | +|---|---| +| First Contentful Paint | < 1.5s | +| Bundle size gzipped | < 400KB | +| Auth coverage | >= 80% | +| Terminal server WebSocket coverage | >= 70% | +| Login brute-force attempts | Bounded by lockout policy | +| Session GC lag | <= 24h idle | +| Backend startup time | < 2s | --- -*Last updated: 2026-04-10 — [Evolution Foundation](https://evolutionfoundation.com.br)* +## Notes + +- This file replaces the old v0.4-v1.0 milestone list. +- The next concrete delivery gate is D4 cleanup, which should be treated as the current execution queue. diff --git a/SPEC.json b/SPEC.json new file mode 100644 index 00000000..e8ee71f1 --- /dev/null +++ b/SPEC.json @@ -0,0 +1,84 @@ +{ + "projeto": "EvoNexus Multiagentes - Agro Roraima Contábil", + "descricao": "Sistema Multiagentes Autônomos (SMA) local-first baseado no framework OpenClaw, focado na gestão contábil, tributária e administrativa do Agronegócio.", + "sprints": [ + { + "id": 1, + "nome": "Roteamento e Escala de IA", + "descricao": "Implementação do Smart Router com suporte a falha e painel de controle.", + "entregavel": "Backend, Frontend e Terminal Server integrados com troca de provedor dinâmica.", + "risco": "alto", + "pre_requisito": null, + "features": [ + { + "categoria": "ia_agent", + "descricao": "O sistema suporta troca dinâmica de provedores em tempo real.", + "steps": [ + "Usuário seleciona provedor (OpenRouter, Anthropic, Google, etc) no painel administrativo.", + "Backend (Flask) recebe a requisição HTTP POST.", + "Backend aciona webhook interno de notificação para o Terminal Server.", + "Terminal Server invoca `ClaudeBridge.invalidateAllSessions()` e recarrega `providers.json`.", + "Novas sub-chamadas passam a utilizar o novo provedor escolhido." + ], + "edge_cases": [ + "E se o Provedor 1 e Provedor 2 caírem: Iniciar Roteamento em Cascata (Waterfall Fallback), saltando automaticamente para a próxima API da lista (Google, OpenAI) sem descartar a transação do cliente.", + "E se atingir Rate Limit da API: Backend implementa Retry com Backoff Exponencial automático.", + "E se houver timeout na API: Interromper a chamada após 60 segundos (Graceful Shutdown) e avançar para o provedor de fallback.", + "E se as permissões falharem: Apenas usuários com a role 'Admin' têm permissão de realizar esta troca (Status Code 403 Forbidden para não-admins)." + ] + } + ] + }, + { + "id": 2, + "nome": "Auth Hardening e Segurança", + "descricao": "Blindagem da API contra ataques de força bruta e controle de acesso.", + "entregavel": "Autenticação robusta operando no banco de dados com lockouts gerenciais.", + "risco": "medio", + "pre_requisito": null, + "features": [ + { + "categoria": "auth", + "descricao": "Limitação de tentativas de login falhas por usuário ou IP.", + "steps": [ + "Sistema recebe requisição POST /api/login com validação restrita de Input.", + "Em caso de senha errada, registro é inserido na tabela `login_throttles` do banco de dados (SQLite/PostgreSQL).", + "Bloquear endpoint por 5 minutos após 5 erros seguidos (Status Code 429 Too Many Requests).", + "Bloquear endpoint por 30 minutos após 10 erros seguidos (Status Code 429 Too Many Requests)." + ], + "edge_cases": [ + "E se houver concorrência massiva no login: O ORM gerencia o pool de conexões para evitar DB Lock.", + "E se usuário usar token CSRF expirado: Retornar Status Code 403 (Invalid Token).", + "E se a payload for gigantesca num ataque: O servidor barra payloads > 5MB nas rotas públicas de login (Status Code 413 Payload Too Large)." + ] + } + ] + }, + { + "id": 3, + "nome": "SPED EFD-Contribuições", + "descricao": "Automação massiva via agente fiscal com base na IN RFB nº 2.305/2024.", + "entregavel": "Skill de automação `sped_cli.py` integrada aos agentes de IA com leitura em lote.", + "risco": "alto", + "pre_requisito": 1, + "features": [ + { + "categoria": "api_endpoint", + "descricao": "Extração inteligente e massiva de arquivos com compilação de layout padrão SPED.", + "steps": [ + "Agente recebe arquivo ou pasta contendo faturas e recibos.", + "Invocação da skill local `sped_cli.py batch-process`.", + "Validação automática dos blocos estruturais (ex: 0000, 0001, 0100) via script determinístico.", + "Geração de arquivo formatado com delimitador pipe (|)." + ], + "edge_cases": [ + "E se os arquivos de faturamento somarem 80MB: O limite configurado para este processamento massivo é de 100MB+, garantindo o parse do lote mensal completo.", + "E se faltar metadados num bloco: Aplicar regras de auto-fill ou usar separadores vazios (||) conforme a sintaxe legislativa.", + "E se o modelo de IA alucinar nos blocos matemáticos: A execução binária intercepta e o script de conferência (`validate`) emite Status 500 Interno pedindo correção humana.", + "E se o servidor ficar sem memória (OOM) devido aos 100MB: Implementar stream de parsing (chunks) no script Python para não sobrecarregar a RAM da VPS." + ] + } + ] + } + ] +} diff --git a/SPEC.md b/SPEC.md new file mode 100644 index 00000000..7d63c7c2 --- /dev/null +++ b/SPEC.md @@ -0,0 +1,57 @@ +# SPEC: EvoNexus & Agro Roraima Contábil + +## Visão Geral +Sistema Multiagentes Autônomos (SMA) local-first baseado no framework OpenClaw, voltado para automação de processos contábeis, tributários e burocráticos do Agronegócio (Agro Roraima Contábil). O painel administrativo (Dashboard) integra gestão de usuários, observabilidade de integrações (WhatsApp, Telegram) e controle total do pipeline de agentes. + +## Sprint Atual: Configuração e Escala +**Entregável:** Sistema estável com normalização de variáveis de ambiente, roteamento de provedores (OpenRouter) e painéis de observabilidade operacionais. +**Risco:** Alto (Lida com dados sensíveis de folha de pagamento e impostos federais). + +--- + +### Feature 1: Roteamento de Provedores de IA (Smart Router) +**Categoria:** ia_agent +**Descrição:** O sistema deve suportar falha e troca dinâmica de provedores (ex: OpenRouter para Anthropic), derrubando sessões antigas para evitar loops e travamentos. + +**Steps:** +1. Painel frontend permite alteração do provedor principal. +2. Backend (Flask) notifica o Terminal Server via rota HTTP. +3. Terminal Server invalida sessões PTY ativas (`ClaudeBridge.invalidateAllSessions()`). +4. Agentes reconectam usando as novas chaves. + +**Edge cases (Tratamento Automático):** +- E se o novo provedor estiver fora do ar: Fallback configurado no `provider-config.js`. +- Limite de tokens (Rate Limit do provedor): Backoff exponencial e retry. +- Modelo indisponível: Reverter para o modelo secundário definido na configuração. + +--- + +### Feature 2: Proteção Contra Força Bruta (Auth Hardening) +**Categoria:** auth +**Descrição:** Limitar tentativas de login falhas por usuário/IP para proteger contra ataques de força bruta. + +**Steps:** +1. Usuário tenta login com credenciais inválidas. +2. Registro de `login_throttles` é incrementado no banco de dados. +3. Se tentativas falhas == 5, aplicar bloqueio de 5 minutos. +4. Se tentativas falhas >= 10, aplicar bloqueio de 30 minutos. + +**Edge cases (Tratamento Automático):** +- Sessão expirada no meio da navegação: Redirecionar para login sem crashar o app. +- Proteção CSRF ausente: Rejeitar a mutação com status 403. + +--- + +### Feature 3: Automação EFD-Contribuições (Integração SPED) +**Categoria:** api_endpoint +**Descrição:** Agente Fiscal deve extrair dados estruturados (notas fiscais) e gerar arquivo no formato SPED delimitado por *pipe* (|). + +**Steps:** +1. Agente aciona ferramenta (skill) `sped_cli.py`. +2. Leitura massiva de diretório de faturas via OCR/JSON. +3. Validação dos blocos (0000, 0001, 0100). +4. Emissão de relatório de aprovação. + +**Edge cases:** +- Falha na leitura do OCR (fatura borrada): Alertar o Humano (Human-in-the-loop) para input manual. +- E se a alíquota for alterada por nova lei: Bloquear emissão até que a tabela seja atualizada. diff --git a/config/plugin-registry.json b/config/plugin-registry.json new file mode 100644 index 00000000..9a9390b4 --- /dev/null +++ b/config/plugin-registry.json @@ -0,0 +1,40 @@ +{ + "plugins": [ + { + "id": "observability-scout", + "name": "Observability Scout", + "version": "1.0.0", + "category": "observability", + "description": "Installs a read-only agent pack for platform telemetry and incident triage.", + "agents": [ + { + "name": "observability-scout", + "description": "Read-only observability analyst for EvoNexus. Uses metrics, health, costs, and provider routing data to identify regressions.", + "model": "haiku", + "color": "cyan", + "memory": "project", + "disallowedTools": ["Write", "Edit", "Bash", "NotebookEdit"], + "prompt": "You are Observability Scout.\nAnalyze EvoNexus platform health, provider routing, queue/cache status, and cost trends.\nRecommend concrete fixes with clear severity. Do not edit files." + } + ] + }, + { + "id": "provider-router", + "name": "Provider Router", + "version": "1.0.0", + "category": "platform", + "description": "Installs a read-only agent pack for provider failover and routing audits.", + "agents": [ + { + "name": "provider-router", + "description": "Read-only provider routing specialist for EvoNexus. Reviews failover order, provider health, and selected model mode.", + "model": "haiku", + "color": "violet", + "memory": "project", + "disallowedTools": ["Write", "Edit", "Bash", "NotebookEdit"], + "prompt": "You are Provider Router.\nAudit provider health, failover order, and model compatibility.\nRecommend a safe routing chain and explain why a fallback should or should not be activated." + } + ] + } + ] +} diff --git a/config/providers.example.json b/config/providers.example.json index 17a4d61f..25a747ae 100644 --- a/config/providers.example.json +++ b/config/providers.example.json @@ -1,5 +1,17 @@ { - "active_provider": "anthropic", + "active_provider": "openrouter", + "routing": { + "enabled": true, + "failover_order": [ + "openrouter", + "anthropic", + "openai", + "codex_auth", + "gemini", + "bedrock", + "vertex" + ] + }, "providers": { "anthropic": { "name": "Anthropic (Claude nativo)", diff --git a/dashboard/backend/app.py b/dashboard/backend/app.py index 2dccb597..e070c93c 100644 --- a/dashboard/backend/app.py +++ b/dashboard/backend/app.py @@ -8,10 +8,29 @@ from datetime import timedelta from dotenv import load_dotenv -from flask import Flask, send_from_directory, request, jsonify +from flask import Flask, send_from_directory, request, jsonify, abort from flask_cors import CORS from flask_login import LoginManager, current_user, login_user +# TKR Security Hardening imports +try: + from runtime_config import load_dashboard_runtime_config + _runtime_config = load_dashboard_runtime_config +except ImportError: + _runtime_config = None +try: + from structured_logging import install_request_logging +except ImportError: + install_request_logging = None +try: + from request_security import require_xhr +except ImportError: + require_xhr = None +try: + from session_security import attach_session_token +except ImportError: + attach_session_token = None + # Workspace root: two levels up from backend/ WORKSPACE = Path(__file__).resolve().parent.parent.parent @@ -42,6 +61,28 @@ def _cors_allowed_origins(): return "*" if not _is_production() else [] app = Flask(__name__, static_folder=None) + +class ProxyFixWSGI: + def __init__(self, wsgi_app): + self.wsgi_app = wsgi_app + + def __call__(self, environ, start_response): + if environ.get('HTTP_UPGRADE', '').lower() == 'websocket': + environ['wsgi.websocket'] = True + return self.wsgi_app(environ, start_response) + +app.wsgi_app = ProxyFixWSGI(app.wsgi_app) + +class ProxyFixWSGI: + def __init__(self, wsgi_app): + self.wsgi_app = wsgi_app + def __call__(self, environ, start_response): + print('PROXY_FIX:', environ.get('HTTP_UPGRADE', ''), flush=True) + if environ.get('HTTP_UPGRADE', '').lower() == 'websocket': + environ['wsgi.websocket'] = True + return self.wsgi_app(environ, start_response) + +app.wsgi_app = ProxyFixWSGI(app.wsgi_app) # Persist secret key so sessions survive restarts _secret_key = os.environ.get("EVONEXUS_SECRET_KEY") if not _secret_key: @@ -91,7 +132,19 @@ def _cors_allowed_origins(): # Flask <2.2 exposed this through app.config; keep compatibility. app.config["JSON_AS_ASCII"] = False -CORS(app, origins=_cors_allowed_origins(), supports_credentials=True) +CORS( + app, + origins=_cors_allowed_origins(), + supports_credentials=True, + allow_headers=["Content-Type", "X-Requested-With", "X-CSRF-Token", "Authorization"], + expose_headers=["X-CSRF-Token"], +) + +# --------------- Rate limiting (in-memory, single-process Flask) --------------- +# Vault audit §2.S1 CRITICAL: all public endpoints require rate limiting. +# The limiter singleton lives in rate_limit.py to avoid circular imports with blueprints. +from rate_limit import limiter +limiter.init_app(app) # --------------- Database --------------- from models import db, User, BrainRepoConfig, needs_setup, seed_roles, seed_systems @@ -603,6 +656,27 @@ def _cors_allowed_origins(): _conn.commit() # --- End Wave 2.2r migration --- + # --- B3 safe_uninstall migration: plugin_orphans table --- + _existing_tables_b3 = {row[0] for row in _cur.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()} + if "plugin_orphans" not in _existing_tables_b3: + _cur.executescript(""" + CREATE TABLE IF NOT EXISTS plugin_orphans ( + id TEXT PRIMARY KEY, + slug TEXT NOT NULL, + tablename TEXT NOT NULL, + orphaned_at TEXT NOT NULL, + orphaned_by_user_id INTEGER, + original_plugin_version TEXT, + original_sha256 TEXT, + original_publisher_url TEXT, + recovered_at TEXT, + UNIQUE(slug, tablename) + ); + CREATE INDEX IF NOT EXISTS idx_plugin_orphans_slug ON plugin_orphans(slug); + """) + _conn.commit() + # --- End B3 safe_uninstall migration --- + # Fix corrupted datetime columns (NULL or non-string values crash SQLAlchemy) for _tbl, _col in [("roles", "created_at"), ("users", "created_at"), ("users", "last_login")]: try: @@ -743,6 +817,7 @@ def unauthorized(): "/api/auth/login", "/api/auth/needs-setup", "/api/auth/setup", + "/api/auth/csrf", "/api/health", "/api/auth/needs-onboarding", "/api/config/workspace-status", @@ -815,6 +890,21 @@ def auth_middleware(): if not current_user.is_authenticated: return jsonify({"error": "Authentication required"}), 401 +# --------------- TKR Security Headers --------------- +@app.after_request +def attach_security_headers(response): + """Attach security-related headers to every response.""" + response.headers.setdefault("X-Content-Type-Options", "nosniff") + response.headers.setdefault("X-Frame-Options", "DENY") + response.headers.setdefault("Referrer-Policy", "strict-origin-when-cross-origin") + # Attach session token if module available + if attach_session_token is not None: + try: + attach_session_token(response) + except Exception: + pass + return response + # --------------- Register blueprints --------------- from routes.overview import bp as overview_bp from routes.workspace import bp as workspace_bp @@ -847,9 +937,11 @@ def auth_middleware(): from routes.knowledge_public import bp as knowledge_public_bp from routes.knowledge_proxy import bp as knowledge_proxy_bp from routes.knowledge_v1 import bp as knowledge_v1_bp +from routes.agent_knowledge import bp as agent_knowledge_bp from routes.databases import bp as databases_bp from routes.plugins import bp as plugins_bp from routes.mcp_servers import bp as mcp_servers_bp +from routes.plugin_public_pages import bp as plugin_public_pages_bp # Brain Repo + Onboarding blueprints (loaded after routes are created) try: @@ -890,6 +982,11 @@ def auth_middleware(): app.register_blueprint(triggers_bp) app.register_blueprint(terminal_proxy_bp) +@app.route('/terminal/ws') +def proxy_ws_test(): + print("HIT PROXY_WS_TEST!!!", flush=True) + return "test" + # Mount the terminal-server WebSocket proxy on the same Sock instance the # rest of the app uses. Done after the blueprint is registered so route # names are unique. Without this, browsers connecting from a host other @@ -919,9 +1016,19 @@ def auth_middleware(): app.register_blueprint(knowledge_public_bp) app.register_blueprint(knowledge_proxy_bp) app.register_blueprint(knowledge_v1_bp) +app.register_blueprint(agent_knowledge_bp) app.register_blueprint(databases_bp) app.register_blueprint(plugins_bp) app.register_blueprint(mcp_servers_bp) +# B2.0: plugin public pages (unauthenticated, token-bound portals) +app.register_blueprint(plugin_public_pages_bp) + +# TKR Platform routes (observability, cache, queue) +try: + from routes.platform import bp as platform_bp + app.register_blueprint(platform_bp) +except ImportError: + pass # Platform module not available # --------------- Social Auth blueprints --------------- from auth.youtube import bp as youtube_auth_bp diff --git a/dashboard/backend/blueprint_registry.py b/dashboard/backend/blueprint_registry.py new file mode 100644 index 00000000..a6cf21d2 --- /dev/null +++ b/dashboard/backend/blueprint_registry.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from importlib import import_module + + +BLUEPRINT_MODULES = [ + "routes.overview", + "routes.workspace", + "routes.agents", + "routes.routines", + "routes.skills", + "routes.templates_routes", + "routes.memory", + "routes.costs", + "routes.config", + "routes.integrations", + "routes.scheduler", + "routes.services", + "routes.auth_routes", + "routes.systems", + "routes.docs", + "routes.mempalace", + "routes.tasks", + "routes.triggers", + "routes.backups", + "routes.providers", + "routes.settings", + "routes.shares", + "routes.heartbeats", + "routes.goals", + "routes.tickets", + "routes.platform", + "routes.health", + "routes.knowledge", + "routes.knowledge_public", + "routes.knowledge_proxy", + "routes.knowledge_v1", + "routes.agent_knowledge", + "routes.databases", +] + +SOCIAL_BLUEPRINT_MODULES = [ + "auth.youtube", + "auth.instagram", + "auth.linkedin", + "auth.twitter", + "auth.tiktok", + "auth.twitch", +] + + +def _register_modules(app, module_names): + for module_name in module_names: + module = import_module(module_name) + blueprint = getattr(module, "bp", None) + if blueprint is None: + raise RuntimeError(f"Blueprint module {module_name} does not expose bp") + app.register_blueprint(blueprint) + + +def register_blueprints(app) -> None: + _register_modules(app, BLUEPRINT_MODULES) + _register_modules(app, SOCIAL_BLUEPRINT_MODULES) diff --git a/dashboard/backend/brain_repo/job_runner.py b/dashboard/backend/brain_repo/job_runner.py index 3099b558..3fb81e50 100644 --- a/dashboard/backend/brain_repo/job_runner.py +++ b/dashboard/backend/brain_repo/job_runner.py @@ -37,6 +37,7 @@ import logging import shutil +import subprocess import threading from datetime import datetime, timezone from pathlib import Path @@ -338,6 +339,87 @@ def _decrypt_snapshot_token(encrypted: bytes) -> str: # Top-level pipelines # ──────────────────────────────────────────────────────────────────────── +def _authenticated_url(repo_url: str, token: str) -> str: + """Return repo_url with a one-shot token for git network commands.""" + if token and "://" in repo_url: + scheme, rest = repo_url.split("://", 1) + if "@" in rest: + rest = rest.split("@", 1)[1] + return f"{scheme}://{token}@{rest}" + return repo_url + + +def _run_git_checked( + args: list[str], + *, + cwd: Path | None = None, + timeout: int = 60, + token: str = "", +) -> subprocess.CompletedProcess: + """Run git and raise a compact, token-masked error on failure.""" + result = subprocess.run( + ["git", *args], + cwd=cwd, + check=False, + capture_output=True, + text=True, + timeout=timeout, + ) + if result.returncode != 0: + stderr = (result.stderr or result.stdout or "")[:500] + command = " ".join(args) + if token: + stderr = stderr.replace(token, "***") + command = command.replace(token, "***") + raise RuntimeError(f"git {command} failed: {stderr}") + return result + + +def _set_clean_origin(repo_dir: Path, repo_url: str) -> None: + """Ensure origin is configured without credentials persisted in .git/config.""" + remotes = _run_git_checked(["remote"], cwd=repo_dir).stdout.splitlines() + if "origin" in remotes: + _run_git_checked(["remote", "set-url", "origin", repo_url], cwd=repo_dir) + else: + _run_git_checked(["remote", "add", "origin", repo_url], cwd=repo_dir) + + +def _ensure_checkout_branch(repo_dir: Path) -> None: + """Make an empty clone usable without disturbing a normal cloned branch.""" + head = subprocess.run( + ["git", "rev-parse", "--verify", "HEAD"], + cwd=repo_dir, + check=False, + capture_output=True, + text=True, + timeout=10, + ) + if head.returncode == 0: + branch = _run_git_checked(["branch", "--show-current"], cwd=repo_dir).stdout.strip() + if branch: + return + + # Empty repositories and detached HEAD checkouts need a branch before the + # skeleton commit can be created. GitHub-created repos default to main. + _run_git_checked(["checkout", "-B", "main"], cwd=repo_dir) + + +def _clone_brain_repo(repo_url: str, token: str, local_path: Path) -> None: + """Create a fresh local working tree for an existing or empty remote repo.""" + if local_path.exists(): + shutil.rmtree(local_path, ignore_errors=True) + local_path.parent.mkdir(parents=True, exist_ok=True) + + auth_url = _authenticated_url(repo_url, token) + _run_git_checked( + ["clone", auth_url, str(local_path)], + timeout=180, + token=token, + ) + _set_clean_origin(local_path, repo_url) + _ensure_checkout_branch(local_path) + + def run_sync_pipeline( flask_app, user_id: int, @@ -428,58 +510,42 @@ def run_bootstrap_pipeline( repo_name: str, owner_username: str, github_username: str, + sync_after: bool = False, + workspace: Path | None = None, + tag_name: str | None = None, + commit_message: str | None = None, ) -> None: - """Bootstrap a freshly-created empty GitHub repo with the skeleton. + """Clone/bootstrap a GitHub brain repo and persist the local working tree. - Runs the same logic as routes.brain_repo._initialize_remote_brain_repo - but inside the job_runner lock so it serializes with sync operations, - and persists the final local_path into BrainRepoConfig so the UI stops - showing "initializing…". + Existing valid brain repos are cloned as-is. Empty or incomplete repos get + the standard skeleton before local_path is persisted to BrainRepoConfig. """ - import subprocess from models import BrainRepoConfig, db # type: ignore[import] with _job_lock: error: str | None = None local_path_str: str | None = None try: - workspace = Path(__file__).resolve().parent.parent.parent.parent - base_dir = workspace / "dashboard" / "data" / "brain-repos" + repo_root = Path(__file__).resolve().parent.parent.parent.parent + workspace_root = workspace or repo_root + base_dir = repo_root / "dashboard" / "data" / "brain-repos" base_dir.mkdir(parents=True, exist_ok=True) local_path = base_dir / repo_name - if local_path.exists(): - shutil.rmtree(local_path, ignore_errors=True) - from brain_repo import git_ops, manifest # type: ignore[import] - local_path.mkdir(parents=True, exist_ok=True) - - subprocess.run( - ["git", "init", "-b", "main"], - cwd=local_path, check=True, capture_output=True, timeout=30, - ) + _clone_brain_repo(repo_url, token, local_path) _check_cancel(flask_app, user_id) - # Token-embedded remote for the bootstrap push. A later follow-up - # should move this to `git credential helper` so the PAT never - # hits .git/config, but that's a separate change. - if "://" in repo_url: - scheme, rest = repo_url.split("://", 1) - auth_url = f"{scheme}://{token}@{rest}" - else: - auth_url = repo_url - subprocess.run( - ["git", "remote", "add", "origin", auth_url], - cwd=local_path, check=True, capture_output=True, timeout=30, - ) - _check_cancel(flask_app, user_id) - - manifest.initialize_brain_repo(local_path, { - "workspace_name": owner_username or "", - "owner_username": owner_username or "", - "github_username": github_username or "", - }) + manifest_data = manifest.read_manifest(local_path) + schema_ok, _migration_needed = manifest.validate_schema(manifest_data) + marker_exists = (local_path / ".evo-brain").exists() + if not marker_exists or not schema_ok: + manifest.initialize_brain_repo(local_path, { + "workspace_name": owner_username or "", + "owner_username": owner_username or "", + "github_username": github_username or "", + }) author_name = github_username or owner_username or "EvoNexus" author_email = ( @@ -496,10 +562,38 @@ def run_bootstrap_pipeline( ) _check_cancel(flask_app, user_id) - committed = git_ops.commit_all(local_path, "feat(brain-repo): initial structure") - if committed: + if sync_after: + copied, dropped = _mirror_workspace( + flask_app, + user_id, + workspace_root, + local_path, + ) + log.info( + "bootstrap+sync: mirrored %d files, removed %d with secrets", + copied, + dropped, + ) + + msg = commit_message or ( + f"manual sync {datetime.now(timezone.utc).isoformat()}" + if sync_after + else "feat(brain-repo): initial structure" + ) + committed = git_ops.commit_all(local_path, msg) + + if tag_name: _check_cancel(flask_app, user_id) - pushed, push_err = git_ops.push(local_path, token, with_tags=False) + git_ops.create_tag( + local_path, + tag_name, + f"Bootstrap sync: {tag_name} ({datetime.now(timezone.utc).isoformat()})", + force=True, + ) + + if committed or tag_name: + _check_cancel(flask_app, user_id) + pushed, push_err = git_ops.push(local_path, token, with_tags=bool(tag_name)) if not pushed: log.warning("bootstrap push failed for %s: %s", repo_name, push_err) error = f"bootstrap push failed: {push_err}" @@ -564,6 +658,10 @@ def enqueue_bootstrap( repo_name: str, owner_username: str, github_username: str, + sync_after: bool = False, + workspace: Path | None = None, + tag_name: str | None = None, + commit_message: str | None = None, ) -> bool: """Spawn daemon thread running run_bootstrap_pipeline. Returns False if busy.""" if not _acquire_db_lock(flask_app, user_id, JOB_KIND_BOOTSTRAP): @@ -578,6 +676,10 @@ def enqueue_bootstrap( "repo_name": repo_name, "owner_username": owner_username, "github_username": github_username, + "sync_after": sync_after, + "workspace": workspace, + "tag_name": tag_name, + "commit_message": commit_message, }, name=f"brain-repo-bootstrap-{user_id}", daemon=True, diff --git a/dashboard/backend/db_compat.py b/dashboard/backend/db_compat.py new file mode 100644 index 00000000..414edd23 --- /dev/null +++ b/dashboard/backend/db_compat.py @@ -0,0 +1,195 @@ +from __future__ import annotations + +import sqlite3 +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path +from typing import Any, Iterable + +from sqlalchemy import create_engine, text + +from runtime_config import database_backend, database_uri, sqlite_path_from_uri + + +@dataclass +class CompatRow: + keys_list: list[str] + values_list: list[Any] + + def __getitem__(self, item: int | str) -> Any: + if isinstance(item, int): + return self.values_list[item] + idx = self.keys_list.index(item) + return self.values_list[idx] + + def keys(self) -> list[str]: + return list(self.keys_list) + + def get(self, key: str, default: Any = None) -> Any: + try: + return self[key] + except Exception: + return default + + def items(self): + return list(zip(self.keys_list, self.values_list)) + + def __iter__(self): + return iter(self.items()) + + def __len__(self) -> int: + return len(self.values_list) + + def as_dict(self) -> dict[str, Any]: + return dict(self.items()) + + +def _translate_qmark_sql(sql: str, params: Iterable[Any] | dict[str, Any] | None) -> tuple[str, dict[str, Any] | tuple[Any, ...]]: + if params is None: + return sql, {} + if isinstance(params, dict): + return sql, params + + values = list(params) + if "?" not in sql: + return sql, tuple(values) + + chunks = sql.split("?") + translated: list[str] = [chunks[0]] + bind: dict[str, Any] = {} + for idx, chunk in enumerate(chunks[1:]): + key = f"p{idx}" + translated.append(f":{key}") + translated.append(chunk) + if idx < len(values): + bind[key] = values[idx] + return "".join(translated), bind + + +@lru_cache(maxsize=8) +def _engine_for(database_url: str): + return create_engine(database_url, pool_pre_ping=True, future=True) + + +class CompatCursor: + def __init__(self, connection: "CompatConnection"): + self._connection = connection + self._rows: list[CompatRow] = [] + self._index = 0 + self.rowcount = -1 + self._last_result = None + + def execute(self, sql: str, params: Iterable[Any] | dict[str, Any] | None = None): + if self._connection.backend == "sqlite": + bind = params if isinstance(params, dict) else tuple(params or ()) # type: ignore[arg-type] + cursor = self._connection._raw.execute(sql, bind) + keys = [desc[0] for desc in cursor.description] if cursor.description else [] + self._rows = [CompatRow(keys, list(row)) for row in cursor.fetchall()] if keys else [] + self.rowcount = cursor.rowcount + self._index = 0 + self._last_result = cursor + return self + + translated_sql, bind = _translate_qmark_sql(sql, params) + result = self._connection._conn.execute(text(translated_sql), bind) # type: ignore[arg-type] + keys = list(result.keys()) if result.returns_rows else [] + self._rows = [CompatRow(keys, list(row)) for row in result.fetchall()] if keys else [] + self.rowcount = result.rowcount + self._index = 0 + self._last_result = result + return self + + def fetchone(self) -> CompatRow | None: + if self._index >= len(self._rows): + return None + row = self._rows[self._index] + self._index += 1 + return row + + def fetchall(self) -> list[CompatRow]: + if self._index >= len(self._rows): + return [] + remaining = self._rows[self._index :] + self._index = len(self._rows) + return remaining + + +class CompatConnection: + def __init__(self, database_url: str | None = None, timeout: int = 30): + self.database_url = database_url or database_uri() + self.backend = database_backend(self.database_url) + self._raw = None + self._engine = None + self._conn = None + self._transaction = None + + if self.backend == "sqlite": + sqlite_path = sqlite_path_from_uri(self.database_url) + if sqlite_path is None: + sqlite_path = Path(self.database_url.removeprefix("sqlite:///")) + self._raw = sqlite3.connect(str(sqlite_path), timeout=timeout) + else: + self._engine = _engine_for(self.database_url) + self._conn = self._engine.connect() + self._transaction = self._conn.begin() + + def cursor(self) -> CompatCursor: + return CompatCursor(self) + + def execute(self, sql: str, params: Iterable[Any] | dict[str, Any] | None = None): + return self.cursor().execute(sql, params) + + def executescript(self, script: str): + statements = [statement.strip() for statement in script.split(";") if statement.strip()] + for statement in statements: + self.execute(statement) + return self + + def commit(self) -> None: + if self.backend == "sqlite": + assert self._raw is not None + self._raw.commit() + return + if self._conn is not None and self._transaction is not None: + self._transaction.commit() + self._transaction = self._conn.begin() + + def rollback(self) -> None: + if self.backend == "sqlite": + assert self._raw is not None + self._raw.rollback() + return + if self._conn is not None and self._transaction is not None: + self._transaction.rollback() + self._transaction = self._conn.begin() + + def close(self) -> None: + if self.backend == "sqlite": + if self._raw is not None: + self._raw.close() + return + if self._transaction is not None: + try: + self._transaction.commit() + except Exception: + try: + self._transaction.rollback() + except Exception: + pass + if self._conn is not None: + self._conn.close() + + def __enter__(self) -> "CompatConnection": + return self + + def __exit__(self, exc_type, exc, tb) -> bool: + if exc_type is None: + self.commit() + else: + self.rollback() + self.close() + return False + + +def connect_dashboard_db(database_url: str | None = None, timeout: int = 30) -> CompatConnection: + return CompatConnection(database_url=database_url, timeout=timeout) diff --git a/dashboard/backend/db_migrations.py b/dashboard/backend/db_migrations.py new file mode 100644 index 00000000..5684184d --- /dev/null +++ b/dashboard/backend/db_migrations.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from pathlib import Path +from urllib.parse import urlparse + +from alembic import command +from alembic.config import Config + +from structured_logging import emit_json_log + + +MIGRATIONS_DIR = Path(__file__).resolve().parent / "migrations" + + +def build_alembic_config(database_uri: str) -> Config: + config = Config(str(MIGRATIONS_DIR / "alembic.ini")) + config.set_main_option("script_location", str(MIGRATIONS_DIR)) + config.set_main_option("sqlalchemy.url", database_uri) + return config + + +def run_database_migrations(database_uri: str) -> None: + scheme = urlparse(database_uri).scheme or "unknown" + emit_json_log( + "info", + "database_migration_start", + service="dashboard", + database_scheme=scheme, + script_location=str(MIGRATIONS_DIR), + ) + try: + command.upgrade(build_alembic_config(database_uri), "head") + except Exception as exc: + emit_json_log( + "error", + "database_migration_failed", + service="dashboard", + database_scheme=scheme, + error=str(exc), + ) + raise + emit_json_log( + "info", + "database_migration_complete", + service="dashboard", + database_scheme=scheme, + revision="head", + ) diff --git a/dashboard/backend/knowledge/auto_migrator.py b/dashboard/backend/knowledge/auto_migrator.py index 29ba9298..03f8c04a 100644 --- a/dashboard/backend/knowledge/auto_migrator.py +++ b/dashboard/backend/knowledge/auto_migrator.py @@ -331,7 +331,7 @@ def configure_connection( "text-embedding-3-large": 3072, "text-embedding-ada-002": 1536, } -_PROVIDER_DEFAULT_DIMS = {"local": 768, "openai": 1536} +_PROVIDER_DEFAULT_DIMS = {"local": 768, "openai": 1536, "gemini": 768} def _clean(name: str) -> str: @@ -345,6 +345,16 @@ def _get_evonexus_dim(conn) -> int: if provider == "openai": model = _clean("KNOWLEDGE_OPENAI_MODEL") or "text-embedding-3-small" return _OPENAI_MODEL_DIMS.get(model, 1536) + if provider == "gemini": + raw_dim = _clean("KNOWLEDGE_GEMINI_DIM") + if raw_dim: + try: + dim = int(raw_dim) + if dim in {768, 1536, 3072}: + return dim + except ValueError: + pass + return 768 return _PROVIDER_DEFAULT_DIMS.get(provider, 768) diff --git a/dashboard/backend/knowledge/documents.py b/dashboard/backend/knowledge/documents.py index 2a69f79f..8cd20c84 100644 --- a/dashboard/backend/knowledge/documents.py +++ b/dashboard/backend/knowledge/documents.py @@ -17,7 +17,7 @@ import subprocess import sys import uuid -from datetime import datetime, timezone +from datetime import date, datetime, timezone from pathlib import Path from typing import Any, Dict, List, Optional @@ -34,6 +34,21 @@ def _sql(stmt: str): return text(stmt) +def _json_safe(value: Any) -> Any: + """Return a JSON-serializable copy of values commonly returned by psycopg.""" + if isinstance(value, uuid.UUID): + return str(value) + if isinstance(value, (datetime, date)): + return value.isoformat() + if isinstance(value, Path): + return str(value) + if isinstance(value, dict): + return {str(k): _json_safe(v) for k, v in value.items()} + if isinstance(value, (list, tuple, set)): + return [_json_safe(v) for v in value] + return value + + def _row_to_dict(row) -> Dict[str, Any]: d = dict(row._mapping) for col in ("tags", "metadata"): @@ -42,7 +57,7 @@ def _row_to_dict(row) -> Dict[str, Any]: d[col] = json.loads(d[col]) except (ValueError, TypeError): pass - return d + return _json_safe(d) def _get_engine(connection_id: str): @@ -81,6 +96,9 @@ def upload_document( file_path = Path(file_path) if metadata is None: metadata = {} + metadata = _json_safe(metadata) + space_id = str(space_id) + unit_id = str(unit_id) if unit_id is not None else None document_id = str(uuid.uuid4()) doc_title = metadata.get("title") or file_path.stem diff --git a/dashboard/backend/knowledge/embedders/base.py b/dashboard/backend/knowledge/embedders/base.py index ae0eb358..6850f402 100644 --- a/dashboard/backend/knowledge/embedders/base.py +++ b/dashboard/backend/knowledge/embedders/base.py @@ -3,7 +3,7 @@ Registry logic: get_embedder("local") → LocalEmbedder (sentence-transformers, 768 dim) get_embedder("openai") → OpenAIEmbedder (text-embedding-3-small, 1536 dim) - get_embedder("gemini") → GeminiEmbedder (gemini-embedding-001, MRL 768/1536/3072) + get_embedder("gemini") → GeminiEmbedder (gemini-embedding-2, MRL 768/1536/3072) get_embedder() → reads KNOWLEDGE_EMBEDDER_PROVIDER env var (default "local") """ diff --git a/dashboard/backend/knowledge/embedders/gemini_embedder.py b/dashboard/backend/knowledge/embedders/gemini_embedder.py index 17b4fec0..da964f58 100644 --- a/dashboard/backend/knowledge/embedders/gemini_embedder.py +++ b/dashboard/backend/knowledge/embedders/gemini_embedder.py @@ -41,7 +41,7 @@ genai_types = None # type: ignore[assignment,misc] -_DEFAULT_MODEL = "gemini-embedding-001" +_DEFAULT_MODEL = "gemini-embedding-2" _DEFAULT_DIM = 768 # Allowed dimensions per Gemini MRL guidance. @@ -53,13 +53,14 @@ # output_dimensionality is omitted). Kept separate from _DEFAULT_DIM so the # evo-nexus default stays at 768 across both models for storage alignment. _MODEL_NATIVE_DIMS = { - "gemini-embedding-001": 768, - "gemini-embedding-2-preview": 768, + "gemini-embedding-2": 3072, + "gemini-embedding-001": 3072, } # Models that accept the ``task_type`` parameter. The 2-preview model does # not — task optimisation is expressed inline in the prompt instead. _MODELS_WITH_TASK_TYPE = {"gemini-embedding-001"} +_MODELS_WITH_AGGREGATED_INPUTS = {"gemini-embedding-2"} # Conservative client-side batch size. gemini-embedding-001 has a 2048-token # input limit; at ~50 tokens/chunk that's ~40 chunks/call. We stay at 20 for @@ -106,9 +107,9 @@ def __init__(self) -> None: try: dim = int(raw_dim) except ValueError: - dim = _MODEL_NATIVE_DIMS.get(self._model, _DEFAULT_DIM) + dim = _DEFAULT_DIM else: - dim = _MODEL_NATIVE_DIMS.get(self._model, _DEFAULT_DIM) + dim = _DEFAULT_DIM # Silently coerce invalid dims to the default — we must not raise # in __init__ because the settings UI instantiates embedders just to @@ -164,6 +165,9 @@ def embed( client = genai.Client(api_key=api_key) config = self._build_config(task_type) + if self._model in _MODELS_WITH_AGGREGATED_INPUTS: + return self._embed_aggregating_model(client, texts, task_type, config) + all_vectors: List[List[float]] = [] for i in range(0, len(texts), _BATCH_SIZE): batch = texts[i : i + _BATCH_SIZE] @@ -184,6 +188,31 @@ def embed( # Internals # ------------------------------------------------------------------ + def _embed_aggregating_model( + self, + client: Any, + texts: List[str], + task_type: Optional[str], + config: Optional[Any], + ) -> List[List[float]]: + """Embed one text per request for Gemini Embedding 2. + + Gemini Embedding 2 aggregates multiple inputs in one request into a + single vector. RAG indexing needs one vector per chunk, so this method + intentionally performs a separate embed_content call per text. + """ + vectors: List[List[float]] = [] + for text in texts: + response = client.models.embed_content( + model=self._model, + contents=_format_embedding_2_text(text, task_type), + config=config, + ) + if not response.embeddings: + raise RuntimeError("Gemini returned no embeddings.") + vectors.append(list(response.embeddings[0].values)) + return vectors + def _build_config(self, task_type: Optional[str]) -> Optional[Any]: """Assemble ``EmbedContentConfig`` only when we need to override defaults.""" kwargs: dict = {} @@ -202,6 +231,27 @@ def _build_config(self, task_type: Optional[str]) -> Optional[Any]: return genai_types.EmbedContentConfig(**kwargs) +def _format_embedding_2_text(text: str, task_type: Optional[str]) -> str: + """Apply Google's text-only task prefixes for Gemini Embedding 2.""" + if task_type == "RETRIEVAL_QUERY": + return f"task: search result | query: {text}" + if task_type == "RETRIEVAL_DOCUMENT": + return f"title: none | text: {text}" + if task_type == "QUESTION_ANSWERING": + return f"task: question answering | query: {text}" + if task_type == "FACT_VERIFICATION": + return f"task: fact checking | query: {text}" + if task_type == "CODE_RETRIEVAL_QUERY": + return f"task: code retrieval | query: {text}" + if task_type == "SEMANTIC_SIMILARITY": + return f"task: sentence similarity | query: {text}" + if task_type == "CLASSIFICATION": + return f"task: classification | query: {text}" + if task_type == "CLUSTERING": + return f"task: clustering | query: {text}" + return text + + def _l2_normalize(vec: List[float]) -> List[float]: """Unit-length-normalize *vec* in L2 norm. diff --git a/dashboard/backend/knowledge/ingestion.py b/dashboard/backend/knowledge/ingestion.py index 000e6e16..fe3915c4 100644 --- a/dashboard/backend/knowledge/ingestion.py +++ b/dashboard/backend/knowledge/ingestion.py @@ -144,8 +144,8 @@ def _emit(phase: str) -> None: try: embedder = get_embedder() texts = [c["content"] for c in chunks] - # task_type is honoured by providers that support it (e.g. Gemini's - # gemini-embedding-001). Others (local MPNet, OpenAI) ignore it. + # task_type is honoured by providers that support it. Gemini + # Embedding 2 converts it into Google's text prefix format. vectors = embedder.embed(texts, task_type="RETRIEVAL_DOCUMENT") except Exception as exc: _mark_error(engine, doc_id, f"Embed error: {exc}") diff --git a/dashboard/backend/knowledge/migrations/versions/001_initial_schema.py b/dashboard/backend/knowledge/migrations/versions/001_initial_schema.py index 823a4940..6da8a024 100644 --- a/dashboard/backend/knowledge/migrations/versions/001_initial_schema.py +++ b/dashboard/backend/knowledge/migrations/versions/001_initial_schema.py @@ -28,7 +28,7 @@ _PROVIDER_DEFAULTS = { "local": ("sentence-transformers/paraphrase-multilingual-mpnet-base-v2", 768), "openai": ("text-embedding-3-small", 1536), - "gemini": ("gemini-embedding-001", 768), + "gemini": ("gemini-embedding-2", 768), } _OPENAI_MODEL_DIMS = { @@ -41,8 +41,8 @@ # 768, 1536, or 3072-dim vectors. Dim is controlled by KNOWLEDGE_GEMINI_DIM # (default 768 to align storage/index cost with the local provider). _GEMINI_MODEL_NATIVE_DIMS = { - "gemini-embedding-001": 768, - "gemini-embedding-2-preview": 768, + "gemini-embedding-2": 3072, + "gemini-embedding-001": 3072, } _GEMINI_ALLOWED_DIMS = {768, 1536, 3072} @@ -67,16 +67,15 @@ def _resolve_embedder_config() -> tuple[str, str, int]: elif provider == "gemini": model = _clean_env("KNOWLEDGE_GEMINI_MODEL") or default_model raw_dim = _clean_env("KNOWLEDGE_GEMINI_DIM") - native = _GEMINI_MODEL_NATIVE_DIMS.get(model, default_dim) if raw_dim: try: dim = int(raw_dim) if dim not in _GEMINI_ALLOWED_DIMS: - dim = native + dim = default_dim except ValueError: - dim = native + dim = default_dim else: - dim = native + dim = default_dim else: model = default_model dim = default_dim diff --git a/dashboard/backend/knowledge/parsers/marker_parser.py b/dashboard/backend/knowledge/parsers/marker_parser.py index b8c91e5f..267fd3ec 100644 --- a/dashboard/backend/knowledge/parsers/marker_parser.py +++ b/dashboard/backend/knowledge/parsers/marker_parser.py @@ -77,6 +77,16 @@ def parse(self, file_path: Path) -> ParseResult: """ file_path = Path(file_path) + if not file_path.exists(): + raise FileNotFoundError(f"File not found: {file_path}") + + if file_path.suffix.lower() == ".pdf": + try: + return _parse_pdf_text_fallback(file_path) + except Exception: + # No searchable text was available; fall through to Marker OCR. + pass + try: from marker.converters.pdf import PdfConverter # noqa: F401 except ImportError as exc: @@ -85,9 +95,6 @@ def parse(self, file_path: Path) -> ParseResult: "Run: pip install marker-pdf (or: uv add marker-pdf)" ) from exc - if not file_path.exists(): - raise FileNotFoundError(f"File not found: {file_path}") - # Run conversion in a thread so we can enforce a timeout result_holder: Dict[str, Any] = {} error_holder: Dict[str, Exception] = {} @@ -105,12 +112,13 @@ def _run() -> None: t.join(timeout=_MARKER_TIMEOUT_SECONDS) if t.is_alive(): - raise TimeoutError( + timeout_error = TimeoutError( f"Marker timed out after {_MARKER_TIMEOUT_SECONDS}s parsing {file_path.name}. " f"Increase MARKER_TIMEOUT_SECONDS to allow more time." ) + return _maybe_parse_pdf_fallback(file_path, timeout_error) if "exc" in error_holder: - raise error_holder["exc"] + return _maybe_parse_pdf_fallback(file_path, error_holder["exc"]) rendered = result_holder["rendered"] return _to_parse_result(rendered, file_path) @@ -148,6 +156,72 @@ def _to_parse_result(rendered: Any, file_path: Path) -> ParseResult: return ParseResult(markdown=md, pages=pages, metadata=metadata) +def _maybe_parse_pdf_fallback(file_path: Path, marker_error: Exception) -> ParseResult: + """Use a lightweight PDF text extractor when Marker fails on a PDF.""" + if file_path.suffix.lower() != ".pdf": + raise marker_error + + try: + return _parse_pdf_text_fallback(file_path, marker_error) + except Exception as fallback_error: + raise RuntimeError( + f"{marker_error}. PDF text fallback also failed: {fallback_error}" + ) from marker_error + + +def _parse_pdf_text_fallback(file_path: Path, marker_error: Optional[Exception] = None) -> ParseResult: + """Extract text from searchable PDFs without invoking Marker/Surya models.""" + try: + import pypdfium2 as pdfium + except ImportError as exc: + raise RuntimeError("pypdfium2 is not installed") from exc + + pdf = pdfium.PdfDocument(str(file_path)) + pages: List[PageInfo] = [] + markdown_parts: List[str] = [] + + try: + page_count = len(pdf) + for page_index in range(page_count): + page = pdf[page_index] + textpage = None + try: + textpage = page.get_textpage() + text = (textpage.get_text_range() or "").strip() + finally: + if textpage is not None and hasattr(textpage, "close"): + textpage.close() + if hasattr(page, "close"): + page.close() + + if not text: + continue + + page_number = page_index + 1 + page_markdown = f"## Page {page_number}\n\n{text}" + pages.append({"page_number": page_number, "markdown": page_markdown}) + markdown_parts.append(page_markdown) + finally: + if hasattr(pdf, "close"): + pdf.close() + + markdown = "\n\n".join(markdown_parts).strip() + if not markdown: + raise ValueError("No extractable PDF text was found") + + metadata: Dict[str, Any] = { + "title": file_path.stem, + "author": None, + "page_count": len(pages), + "source_mime": "application/pdf", + "parser_fallback": "pypdfium2", + } + if marker_error is not None: + metadata["marker_error"] = str(marker_error)[:500] + + return ParseResult(markdown=markdown, pages=pages, metadata=metadata) + + def _guess_mime(path: Path) -> Optional[str]: _map = { ".pdf": "application/pdf", diff --git a/dashboard/backend/knowledge/search.py b/dashboard/backend/knowledge/search.py index 9a72450e..753348ca 100644 --- a/dashboard/backend/knowledge/search.py +++ b/dashboard/backend/knowledge/search.py @@ -70,9 +70,8 @@ def _get_query_vector(connection_id: str, query: str) -> List[float]: embedder = get_embedder() # task_type=RETRIEVAL_QUERY pairs with RETRIEVAL_DOCUMENT used during - # ingestion for providers that support it (e.g. Gemini's - # gemini-embedding-001). Providers that don't support task hints (local - # MPNet, OpenAI) ignore this parameter silently. + # ingestion. Gemini Embedding 2 converts it into Google's text prefix + # format; providers that do not support hints ignore it. vectors = embedder.embed([query], task_type="RETRIEVAL_QUERY") vector = vectors[0] diff --git a/dashboard/backend/knowledge/spaces.py b/dashboard/backend/knowledge/spaces.py index 19a11ad5..e9ff8898 100644 --- a/dashboard/backend/knowledge/spaces.py +++ b/dashboard/backend/knowledge/spaces.py @@ -12,6 +12,8 @@ import json import uuid +from datetime import date, datetime +from pathlib import Path from typing import Any, Dict, List, Optional from sqlalchemy import text @@ -27,6 +29,21 @@ def _sql(stmt: str): return text(stmt) +def _json_safe(value: Any) -> Any: + """Return a JSON-serializable copy of values commonly returned by psycopg.""" + if isinstance(value, uuid.UUID): + return str(value) + if isinstance(value, (datetime, date)): + return value.isoformat() + if isinstance(value, Path): + return str(value) + if isinstance(value, dict): + return {str(k): _json_safe(v) for k, v in value.items()} + if isinstance(value, (list, tuple, set)): + return [_json_safe(v) for v in value] + return value + + def _row_to_dict(row) -> Dict[str, Any]: """Convert a SQLAlchemy Row to a plain dict.""" d = dict(row._mapping) @@ -37,7 +54,7 @@ def _row_to_dict(row) -> Dict[str, Any]: d[col] = json.loads(d[col]) except (ValueError, TypeError): pass - return d + return _json_safe(d) def _get_engine(connection_id: str): diff --git a/dashboard/backend/knowledge/tests/test_gemini_embedder.py b/dashboard/backend/knowledge/tests/test_gemini_embedder.py index 1aac5554..f49a3070 100644 --- a/dashboard/backend/knowledge/tests/test_gemini_embedder.py +++ b/dashboard/backend/knowledge/tests/test_gemini_embedder.py @@ -4,7 +4,7 @@ * Env-driven configuration (model, dim, API key resolution) * Input validation (empty list, missing key, missing SDK) * L2 normalization for dim < 3072 - * Model-specific task_type handling (001 honours it, 2-preview skips) + * Model-specific task_type handling (001 honours it, Embedding 2 prefixes text) * Client-side batching """ @@ -58,7 +58,7 @@ def test_default_dim_is_768(self, monkeypatch): mod = _fresh_module() e = mod.GeminiEmbedder() assert e.dim == 768 - assert e._model == "gemini-embedding-001" + assert e._model == "gemini-embedding-2" def test_custom_dim_1536(self, monkeypatch): monkeypatch.setenv("KNOWLEDGE_GEMINI_DIM", "1536") @@ -88,10 +88,10 @@ def test_dim_with_quotes_is_cleaned(self, monkeypatch): assert mod.GeminiEmbedder().dim == 1536 def test_custom_model(self, monkeypatch): - monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-2-preview") + monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-001") mod = _fresh_module() e = mod.GeminiEmbedder() - assert e._model == "gemini-embedding-2-preview" + assert e._model == "gemini-embedding-001" assert e.dim == 768 @@ -249,11 +249,10 @@ def test_001_passes_task_type_when_set(self, monkeypatch): kwargs = mock_types.EmbedContentConfig.call_args.kwargs assert kwargs.get("task_type") == "RETRIEVAL_QUERY" - def test_2_preview_skips_task_type(self, monkeypatch): - """gemini-embedding-2-preview does not support task_type per the - Google docs; our code must not pass it.""" + def test_embedding_2_prefixes_task_type_and_uses_one_call_per_text(self, monkeypatch): + """gemini-embedding-2 prefixes task hints and avoids aggregate calls.""" monkeypatch.setenv("GEMINI_API_KEY", "AIzaSy" + "x" * 33) - monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-2-preview") + monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-2") monkeypatch.setenv("KNOWLEDGE_GEMINI_DIM", "768") mod = _fresh_module() with patch.object(mod, "genai") as mock_genai, \ @@ -261,15 +260,22 @@ def test_2_preview_skips_task_type(self, monkeypatch): client = MagicMock() client.models.embed_content.return_value = _mock_embeddings_response(1) mock_genai.Client.return_value = client - mod.GeminiEmbedder().embed(["q"], task_type="RETRIEVAL_QUERY") + result = mod.GeminiEmbedder().embed(["q1", "q2"], task_type="RETRIEVAL_QUERY") + assert len(result) == 2 + assert client.models.embed_content.call_count == 2 + assert client.models.embed_content.call_args_list[0].kwargs["contents"] == "task: search result | query: q1" + assert client.models.embed_content.call_args_list[1].kwargs["contents"] == "task: search result | query: q2" # Since dim=768 is the native dim AND task_type is skipped, # there is no config to build at all → config=None. config = self._captured_config( client.models.embed_content.call_args ) - assert config is None + assert config is not None + kwargs = mock_types.EmbedContentConfig.call_args.kwargs + assert kwargs.get("output_dimensionality") == 768 + assert "task_type" not in kwargs # And EmbedContentConfig was never constructed - mock_types.EmbedContentConfig.assert_not_called() + mock_types.EmbedContentConfig.assert_called_once() def test_001_without_task_type_omits_kwarg(self, monkeypatch): monkeypatch.setenv("GEMINI_API_KEY", "AIzaSy" + "x" * 33) @@ -295,6 +301,7 @@ def test_001_without_task_type_omits_kwarg(self, monkeypatch): class TestBatching: def test_large_input_is_batched(self, monkeypatch): monkeypatch.setenv("GEMINI_API_KEY", "AIzaSy" + "x" * 33) + monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-001") monkeypatch.setenv("KNOWLEDGE_GEMINI_DIM", "768") mod = _fresh_module() # 50 texts with _BATCH_SIZE=20 → 3 calls (20, 20, 10) diff --git a/dashboard/backend/knowledge/tests/test_migration_dim_from_env.py b/dashboard/backend/knowledge/tests/test_migration_dim_from_env.py index f0dee97d..b54fa310 100644 --- a/dashboard/backend/knowledge/tests/test_migration_dim_from_env.py +++ b/dashboard/backend/knowledge/tests/test_migration_dim_from_env.py @@ -101,7 +101,7 @@ def test_gemini_default_is_768(self, monkeypatch): fn = self._fn() provider, model, dim = fn() assert provider == "gemini" - assert model == "gemini-embedding-001" + assert model == "gemini-embedding-2" assert dim == 768 def test_gemini_custom_dim_1536(self, monkeypatch): @@ -132,11 +132,11 @@ def test_gemini_non_numeric_dim_falls_back(self, monkeypatch): _, _, dim = fn() assert dim == 768 - def test_gemini_2_preview_model(self, monkeypatch): + def test_gemini_001_model(self, monkeypatch): monkeypatch.setenv("KNOWLEDGE_EMBEDDER_PROVIDER", "gemini") - monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-2-preview") + monkeypatch.setenv("KNOWLEDGE_GEMINI_MODEL", "gemini-embedding-001") monkeypatch.delenv("KNOWLEDGE_GEMINI_DIM", raising=False) fn = self._fn() _, model, dim = fn() - assert model == "gemini-embedding-2-preview" - assert dim == 768 # native default for 2-preview in our config + assert model == "gemini-embedding-001" + assert dim == 768 diff --git a/dashboard/backend/knowledge/tests/test_parsers.py b/dashboard/backend/knowledge/tests/test_parsers.py index 76216206..afd3d6c4 100644 --- a/dashboard/backend/knowledge/tests/test_parsers.py +++ b/dashboard/backend/knowledge/tests/test_parsers.py @@ -158,7 +158,7 @@ def test_parse_returns_pages_list(self, sample_pdf): class TestMarkerNotInstalled: - def test_parse_raises_actionable_error(self, monkeypatch): + def test_parse_raises_actionable_error(self, monkeypatch, tmp_path): _add_backend() import builtins real_import = builtins.__import__ @@ -175,14 +175,48 @@ def mock_import(name, *args, **kwargs): import knowledge.parsers.marker_parser as mod original_converter = mod._converter mod._converter = None + docx_path = tmp_path / "test.docx" + docx_path.write_bytes(b"fake docx") try: parser = MarkerParser() with pytest.raises(MarkerNotInstalledError, match="pip install marker-pdf"): - parser.parse(Path("/tmp/test.pdf")) + parser.parse(docx_path) finally: mod._converter = original_converter +class TestPdfTextFallback: + def test_pdf_text_fallback_runs_before_marker_import(self, monkeypatch, sample_pdf): + _add_backend() + pytest.importorskip("pypdfium2") + import builtins + real_import = builtins.__import__ + + def mock_import(name, *args, **kwargs): + if name == "marker" or name.startswith("marker."): + raise AssertionError("marker should not be imported for searchable PDFs") + return real_import(name, *args, **kwargs) + + monkeypatch.setattr(builtins, "__import__", mock_import) + + from knowledge.parsers.marker_parser import MarkerParser + result = MarkerParser().parse(sample_pdf) + + assert "Hello World" in result["markdown"] + assert result["metadata"]["parser_fallback"] == "pypdfium2" + + def test_pypdfium_fallback_records_marker_error(self, sample_pdf): + _add_backend() + pytest.importorskip("pypdfium2") + + import knowledge.parsers.marker_parser as mod + result = mod._maybe_parse_pdf_fallback(sample_pdf, RuntimeError("marker crashed")) + + assert "Hello World" in result["markdown"] + assert result["metadata"]["parser_fallback"] == "pypdfium2" + assert "marker crashed" in result["metadata"]["marker_error"] + + # --------------------------------------------------------------------------- # LlamaParseParser stub # --------------------------------------------------------------------------- diff --git a/dashboard/backend/knowledge/tests/test_settings_routes.py b/dashboard/backend/knowledge/tests/test_settings_routes.py index 4ba6b26e..d5a4b72f 100644 --- a/dashboard/backend/knowledge/tests/test_settings_routes.py +++ b/dashboard/backend/knowledge/tests/test_settings_routes.py @@ -181,8 +181,8 @@ def test_gemini_provider_lists_both_models(self, client): data = resp.get_json() assert data["provider"] == "gemini" model_ids = [m["id"] for m in data["models"]] + assert "gemini-embedding-2" in model_ids assert "gemini-embedding-001" in model_ids - assert "gemini-embedding-2-preview" in model_ids def test_filter_by_openai_provider(self, client): with patch("routes.knowledge._assert_key", return_value=None): @@ -353,7 +353,7 @@ def test_gemini_dim_invalid_value_rejected(self, client, monkeypatch): ) assert resp.status_code == 400 - def test_gemini_model_2_preview_accepted(self, client, monkeypatch, tmp_path): + def test_gemini_model_2_accepted(self, client, monkeypatch, tmp_path): monkeypatch.setenv("KNOWLEDGE_EMBEDDER_PROVIDER", "gemini") env_file = tmp_path / ".env" env_file.write_text("") @@ -368,7 +368,7 @@ def test_gemini_model_2_preview_accepted(self, client, monkeypatch, tmp_path): "/api/knowledge/settings", json={ "embedder_provider": "gemini", - "embedder_model": "gemini-embedding-2-preview", + "embedder_model": "gemini-embedding-2", }, headers=_WRITE_HEADERS, ) diff --git a/dashboard/backend/migrations/alembic.ini b/dashboard/backend/migrations/alembic.ini new file mode 100644 index 00000000..9e959190 --- /dev/null +++ b/dashboard/backend/migrations/alembic.ini @@ -0,0 +1,3 @@ +[alembic] +script_location = migrations +sqlalchemy.url = sqlite:///evonexus.db diff --git a/dashboard/backend/migrations/env.py b/dashboard/backend/migrations/env.py new file mode 100644 index 00000000..d0f37575 --- /dev/null +++ b/dashboard/backend/migrations/env.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import os +import sys +from pathlib import Path + +from alembic import context +from sqlalchemy import engine_from_config, pool + +BASE_DIR = Path(__file__).resolve().parents[1] +if str(BASE_DIR) not in sys.path: + sys.path.insert(0, str(BASE_DIR)) + +config = context.config + +db_url = os.environ.get("EVONEXUS_DATABASE_URL", "").strip() +if db_url: + config.set_main_option("sqlalchemy.url", db_url) + +target_metadata = None + + +def run_migrations_offline() -> None: + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + with connectable.connect() as connection: + context.configure(connection=connection, target_metadata=target_metadata) + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/dashboard/backend/migrations/versions/0001_bootstrap_schema.py b/dashboard/backend/migrations/versions/0001_bootstrap_schema.py new file mode 100644 index 00000000..42de0d66 --- /dev/null +++ b/dashboard/backend/migrations/versions/0001_bootstrap_schema.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from alembic import op + +from models import db +from schema_migrations import downgrade_app_schema, upgrade_app_schema + + +revision = "0001_bootstrap_schema" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + bind = op.get_bind() + db.metadata.create_all(bind=bind) + upgrade_app_schema(bind) + + +def downgrade() -> None: + bind = op.get_bind() + downgrade_app_schema(bind) + db.metadata.drop_all(bind=bind) diff --git a/dashboard/backend/platform_cache.py b/dashboard/backend/platform_cache.py new file mode 100644 index 00000000..8033e0d0 --- /dev/null +++ b/dashboard/backend/platform_cache.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +import json +import os +import threading +import time +from dataclasses import dataclass +from functools import lru_cache +from typing import Any, Callable + + +_lock = threading.Lock() +_memory_cache: dict[str, tuple[float | None, Any]] = {} + + +@dataclass(frozen=True) +class CacheStatus: + backend: str + available: bool + detail: str | None = None + + +class _MemoryCacheBackend: + backend = "memory" + + def get(self, key: str) -> Any: + now = time.time() + with _lock: + item = _memory_cache.get(key) + if not item: + return None + expires_at, value = item + if expires_at is not None and expires_at < now: + _memory_cache.pop(key, None) + return None + return value + + def set(self, key: str, value: Any, ttl: int | None = None) -> None: + expires_at = (time.time() + ttl) if ttl else None + with _lock: + _memory_cache[key] = (expires_at, value) + + def delete(self, key: str) -> None: + with _lock: + _memory_cache.pop(key, None) + + def clear(self) -> None: + with _lock: + _memory_cache.clear() + + def status(self) -> CacheStatus: + with _lock: + size = len(_memory_cache) + return CacheStatus(backend="memory", available=True, detail=f"{size} entries") + + +class _RedisCacheBackend: + backend = "redis" + + def __init__(self, url: str): + import redis # type: ignore + + self._redis = redis.Redis.from_url(url, decode_responses=True) + self._prefix = "evonexus:platform:cache:" + self._redis.ping() + + def _key(self, key: str) -> str: + return f"{self._prefix}{key}" + + def get(self, key: str) -> Any: + raw = self._redis.get(self._key(key)) + return None if raw is None else json.loads(raw) + + def set(self, key: str, value: Any, ttl: int | None = None) -> None: + payload = json.dumps(value, ensure_ascii=False) + if ttl: + self._redis.setex(self._key(key), ttl, payload) + else: + self._redis.set(self._key(key), payload) + + def delete(self, key: str) -> None: + self._redis.delete(self._key(key)) + + def clear(self) -> None: + for key in self._redis.scan_iter(match=f"{self._prefix}*"): + self._redis.delete(key) + + def status(self) -> CacheStatus: + try: + pong = self._redis.ping() + return CacheStatus(backend="redis", available=bool(pong), detail=os.environ.get("REDIS_URL")) + except Exception as exc: + return CacheStatus(backend="redis", available=False, detail=str(exc)[:200]) + + +@lru_cache(maxsize=1) +def _get_backend() -> Any: + redis_url = os.environ.get("REDIS_URL", "").strip() + if redis_url: + try: + return _RedisCacheBackend(redis_url) + except Exception: + pass + return _MemoryCacheBackend() + + +def cache_get(key: str) -> Any: + return _get_backend().get(key) + + +def cache_set(key: str, value: Any, ttl: int | None = None) -> None: + _get_backend().set(key, value, ttl) + + +def cache_delete(key: str) -> None: + _get_backend().delete(key) + + +def cache_get_or_set(key: str, loader: Callable[[], Any], ttl: int | None = None) -> Any: + cached = cache_get(key) + if cached is not None: + return cached + value = loader() + cache_set(key, value, ttl=ttl) + return value + + +def cache_status() -> dict[str, Any]: + status = _get_backend().status() + return { + "backend": status.backend, + "available": status.available, + "detail": status.detail, + } + diff --git a/dashboard/backend/platform_metrics.py b/dashboard/backend/platform_metrics.py new file mode 100644 index 00000000..5e64e0a8 --- /dev/null +++ b/dashboard/backend/platform_metrics.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from collections import defaultdict +from statistics import mean +from typing import Any + +from platform_support import PROVIDER_METRICS_PATH, append_jsonl, read_jsonl + + +def record_provider_event( + *, + provider_id: str, + event: str, + model: str | None = None, + latency_ms: float | None = None, + success: bool | None = None, + detail: str | None = None, + mode: str | None = None, + metadata: dict[str, Any] | None = None, +) -> dict[str, Any]: + payload: dict[str, Any] = { + "provider_id": provider_id, + "event": event, + "model": model, + "latency_ms": latency_ms, + "success": success, + "detail": detail, + "mode": mode, + "metadata": metadata or {}, + } + return append_jsonl(PROVIDER_METRICS_PATH, payload) + + +def load_provider_events(limit: int = 500) -> list[dict[str, Any]]: + return read_jsonl(PROVIDER_METRICS_PATH, limit=limit) + + +def summarize_provider_events(limit: int = 500) -> dict[str, Any]: + events = load_provider_events(limit=limit) + by_provider: dict[str, dict[str, Any]] = defaultdict(lambda: { + "events": 0, + "successes": 0, + "failures": 0, + "latencies": [], + "models": defaultdict(lambda: {"events": 0, "successes": 0, "failures": 0, "latencies": []}), + "last_event": None, + }) + + for event in events: + provider_id = event.get("provider_id") or "unknown" + provider_bucket = by_provider[provider_id] + provider_bucket["events"] += 1 + provider_bucket["last_event"] = event + model = event.get("model") or "unknown" + model_bucket = provider_bucket["models"][model] + model_bucket["events"] += 1 + if event.get("success") is True: + provider_bucket["successes"] += 1 + model_bucket["successes"] += 1 + elif event.get("success") is False: + provider_bucket["failures"] += 1 + model_bucket["failures"] += 1 + latency = event.get("latency_ms") + if isinstance(latency, (int, float)): + provider_bucket["latencies"].append(float(latency)) + model_bucket["latencies"].append(float(latency)) + + summary = [] + for provider_id, bucket in by_provider.items(): + latencies = bucket.pop("latencies") + models = [] + for model_id, model_bucket in bucket.pop("models").items(): + model_latencies = model_bucket.pop("latencies") + model_bucket["avg_latency_ms"] = round(mean(model_latencies), 2) if model_latencies else None + models.append({"model": model_id, **model_bucket}) + bucket["avg_latency_ms"] = round(mean(latencies), 2) if latencies else None + bucket["success_rate"] = round((bucket["successes"] / bucket["events"]) * 100, 1) if bucket["events"] else None + bucket["models"] = sorted(models, key=lambda row: (-row["events"], row["model"])) + summary.append({"provider_id": provider_id, **bucket}) + + summary.sort(key=lambda row: (-row["events"], row["provider_id"])) + return { + "events": events[-100:], + "providers": summary, + "total_events": len(events), + } + diff --git a/dashboard/backend/platform_observability.py b/dashboard/backend/platform_observability.py new file mode 100644 index 00000000..1be6dcd5 --- /dev/null +++ b/dashboard/backend/platform_observability.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Any + +import requests +from flask import current_app + +from platform_cache import cache_get_or_set, cache_status +from platform_metrics import summarize_provider_events +from platform_plugins import list_plugins, load_installed_plugins, load_plugin_registry +from platform_queue import list_events, queue_status +from platform_support import WORKSPACE, database_scheme, read_json + + +def _load_provider_config() -> dict[str, Any]: + config_path = WORKSPACE / "config" / "providers.json" + fallback_path = WORKSPACE / "config" / "providers.example.json" + config = read_json(config_path, read_json(fallback_path, {})) + return config if isinstance(config, dict) else {} + + +def _terminal_server_snapshot() -> dict[str, Any]: + import os + + url = os.environ.get("TERMINAL_SERVER_URL", "http://127.0.0.1:32352").rstrip("/") + try: + resp = requests.get(f"{url}/api/health/deep", timeout=3) + data = resp.json() + return { + "status": data.get("status", "warning") if resp.ok else "error", + "reachable": resp.ok, + "http_status": resp.status_code, + "url": url, + "snapshot": data, + } + except Exception as exc: + return { + "status": "warning", + "reachable": False, + "http_status": None, + "url": url, + "error": str(exc)[:200], + } + + +def _build_observability_summary() -> dict[str, Any]: + from routes.health import _build_report + from routes.costs import costs_summary + + backend_health = _build_report(deep=True) + terminal = _terminal_server_snapshot() + costs_resp = costs_summary() + costs = costs_resp.get_json(silent=True) if hasattr(costs_resp, "get_json") else None + provider_config = _load_provider_config() + registry = load_plugin_registry() + installed = load_installed_plugins() + + provider_metrics = summarize_provider_events(limit=500) + plugin_list = list_plugins() + events = list_events(limit=25) + + try: + database_uri = current_app.config.get("SQLALCHEMY_DATABASE_URI") + except RuntimeError: + from runtime_config import database_uri as resolve_database_uri + + database_uri = resolve_database_uri() + + return { + "generated_at": datetime.now(timezone.utc).isoformat(), + "backend": backend_health, + "terminal_server": terminal, + "costs": costs or {}, + "provider_config": provider_config, + "provider_metrics": provider_metrics, + "cache": cache_status(), + "queue": queue_status(), + "plugins": { + "registry_count": len(registry.get("plugins", [])), + "installed_count": len(installed.get("plugins", {})), + "items": plugin_list, + }, + "recent_events": events, + "database_backend": database_scheme(database_uri), + } + + +def build_observability_summary() -> dict[str, Any]: + return cache_get_or_set("observability:summary", _build_observability_summary, ttl=30) diff --git a/dashboard/backend/platform_plugins.py b/dashboard/backend/platform_plugins.py new file mode 100644 index 00000000..85efc482 --- /dev/null +++ b/dashboard/backend/platform_plugins.py @@ -0,0 +1,193 @@ +from __future__ import annotations + +import re +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +import yaml + +from platform_queue import publish_event +from platform_support import INSTALLED_PLUGINS_PATH, PLUGIN_REGISTRY_PATH, WORKSPACE, ensure_platform_data_dir, read_json, write_json + + +_SAFE_PLUGIN_ID = re.compile(r"^[a-z0-9][a-z0-9_-]*$") +_AGENTS_DIR = WORKSPACE / ".claude" / "agents" + + +DEFAULT_PLUGIN_REGISTRY = { + "plugins": [ + { + "id": "observability-scout", + "name": "Observability Scout", + "version": "1.0.0", + "category": "observability", + "description": "Installs a read-only agent pack for platform telemetry and incident triage.", + "agents": [ + { + "name": "observability-scout", + "description": "Read-only observability analyst for EvoNexus. Uses metrics, health, costs, and provider routing data to identify regressions.", + "model": "haiku", + "color": "cyan", + "memory": "project", + "disallowedTools": ["Write", "Edit", "Bash", "NotebookEdit"], + "prompt": ( + "You are Observability Scout.\n" + "Analyze EvoNexus platform health, provider routing, queue/cache status, and cost trends.\n" + "Recommend concrete fixes with clear severity. Do not edit files." + ), + } + ], + }, + { + "id": "provider-router", + "name": "Provider Router", + "version": "1.0.0", + "category": "platform", + "description": "Installs a read-only agent pack for provider failover and routing audits.", + "agents": [ + { + "name": "provider-router", + "description": "Read-only provider routing specialist for EvoNexus. Reviews failover order, provider health, and selected model mode.", + "model": "haiku", + "color": "violet", + "memory": "project", + "disallowedTools": ["Write", "Edit", "Bash", "NotebookEdit"], + "prompt": ( + "You are Provider Router.\n" + "Audit provider health, failover order, and model compatibility.\n" + "Recommend a safe routing chain and explain why a fallback should or should not be activated." + ), + } + ], + }, + ] +} + + +def _safe_plugin_id(plugin_id: str) -> str: + plugin_id = (plugin_id or "").strip().lower() + if not _SAFE_PLUGIN_ID.match(plugin_id): + raise ValueError("Invalid plugin id") + return plugin_id + + +def load_plugin_registry() -> dict[str, Any]: + registry = read_json(PLUGIN_REGISTRY_PATH, DEFAULT_PLUGIN_REGISTRY) + if not isinstance(registry, dict) or "plugins" not in registry: + return DEFAULT_PLUGIN_REGISTRY + plugins = registry.get("plugins") + if not isinstance(plugins, list): + return DEFAULT_PLUGIN_REGISTRY + return registry + + +def load_installed_plugins() -> dict[str, Any]: + payload = read_json(INSTALLED_PLUGINS_PATH, {"plugins": {}}) + if not isinstance(payload, dict): + return {"plugins": {}} + if not isinstance(payload.get("plugins"), dict): + payload["plugins"] = {} + return payload + + +def _save_installed_plugins(payload: dict[str, Any]) -> None: + ensure_platform_data_dir() + write_json(INSTALLED_PLUGINS_PATH, payload) + + +def _render_agent(agent: dict[str, Any], plugin_id: str) -> str: + frontmatter: dict[str, Any] = { + "name": agent["name"], + "description": agent["description"], + "model": agent.get("model", "haiku"), + "color": agent.get("color", "cyan"), + "memory": agent.get("memory", "project"), + "plugin": plugin_id, + "disallowedTools": agent.get("disallowedTools", ["Write", "Edit", "Bash", "NotebookEdit"]), + } + prompt = agent.get("prompt", "").strip() + return "---\n" + yaml.safe_dump(frontmatter, sort_keys=False, allow_unicode=True).strip() + "\n---\n\n" + prompt + "\n" + + +def _installed_files_for(plugin: dict[str, Any]) -> list[dict[str, str]]: + files: list[dict[str, str]] = [] + for agent in plugin.get("agents", []) or []: + files.append({ + "type": "agent", + "name": agent["name"], + "path": f".claude/agents/{agent['name']}.md", + }) + return files + + +def list_plugins() -> list[dict[str, Any]]: + registry = load_plugin_registry() + installed = load_installed_plugins().get("plugins", {}) + result = [] + for plugin in registry.get("plugins", []): + plugin_id = plugin.get("id") + if not plugin_id: + continue + state = installed.get(plugin_id, {}) + result.append({ + **plugin, + "installed": bool(state), + "installed_at": state.get("installed_at"), + "installed_files": state.get("files", []), + }) + return result + + +def install_plugin(plugin_id: str, workspace: Path = WORKSPACE) -> dict[str, Any]: + plugin_key = _safe_plugin_id(plugin_id) + registry = load_plugin_registry() + plugin = next((item for item in registry.get("plugins", []) if item.get("id") == plugin_key), None) + if not plugin: + raise KeyError(f"Unknown plugin: {plugin_key}") + + installed = load_installed_plugins() + if plugin_key in installed.get("plugins", {}): + return installed["plugins"][plugin_key] + + _AGENTS_DIR.mkdir(parents=True, exist_ok=True) + + written_files: list[str] = [] + for agent in plugin.get("agents", []) or []: + target = workspace / ".claude" / "agents" / f"{agent['name']}.md" + if target.exists(): + raise FileExistsError(f"Agent file already exists: {target.name}") + target.write_text(_render_agent(agent, plugin_key), encoding="utf-8") + written_files.append(str(target.relative_to(workspace)).replace("\\", "/")) + + state = { + "id": plugin_key, + "name": plugin.get("name", plugin_key), + "version": plugin.get("version", "1.0.0"), + "installed_at": datetime.now(timezone.utc).isoformat(), + "files": written_files, + } + installed.setdefault("plugins", {})[plugin_key] = state + _save_installed_plugins(installed) + publish_event("plugin-installed", {"plugin_id": plugin_key, "files": written_files}) + return state + + +def uninstall_plugin(plugin_id: str, workspace: Path = WORKSPACE) -> dict[str, Any]: + plugin_key = _safe_plugin_id(plugin_id) + installed = load_installed_plugins() + state = installed.get("plugins", {}).get(plugin_key) + if not state: + raise KeyError(f"Plugin not installed: {plugin_key}") + + removed: list[str] = [] + for rel_path in state.get("files", []): + target = workspace / rel_path + if target.exists(): + target.unlink() + removed.append(rel_path) + + installed["plugins"].pop(plugin_key, None) + _save_installed_plugins(installed) + publish_event("plugin-uninstalled", {"plugin_id": plugin_key, "files": removed}) + return {"id": plugin_key, "removed_files": removed} diff --git a/dashboard/backend/platform_queue.py b/dashboard/backend/platform_queue.py new file mode 100644 index 00000000..a495e4d1 --- /dev/null +++ b/dashboard/backend/platform_queue.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import os +from typing import Any + +import json + +from platform_cache import cache_delete +from platform_support import PLATFORM_EVENTS_PATH, append_jsonl, ensure_platform_data_dir, read_jsonl + + +def publish_event(topic: str, payload: dict[str, Any], source: str = "dashboard") -> dict[str, Any]: + ensure_platform_data_dir() + event = append_jsonl(PLATFORM_EVENTS_PATH, { + "topic": topic, + "source": source, + "payload": payload, + }) + + redis_url = os.environ.get("REDIS_URL", "").strip() + if redis_url: + try: + import redis # type: ignore + + redis.Redis.from_url(redis_url, decode_responses=True).publish( + "evonexus:platform", + json.dumps(event, ensure_ascii=False), + ) + except Exception: + pass + + for cache_key in ( + "providers:list", + "observability:summary", + "observability:providers", + "platform:queue:50", + ): + try: + cache_delete(cache_key) + except Exception: + pass + + return event + + +def list_events(limit: int = 100, topics: list[str] | None = None) -> list[dict[str, Any]]: + events = read_jsonl(PLATFORM_EVENTS_PATH, limit=limit) + if topics: + allowed = set(topics) + events = [event for event in events if event.get("topic") in allowed] + return events + + +def queue_status() -> dict[str, Any]: + events = read_jsonl(PLATFORM_EVENTS_PATH, limit=200) + latest = events[-1] if events else None + return { + "backend": "file", + "path": str(PLATFORM_EVENTS_PATH), + "event_count": len(events), + "latest_event": latest, + } diff --git a/dashboard/backend/platform_support.py b/dashboard/backend/platform_support.py new file mode 100644 index 00000000..29ee13d4 --- /dev/null +++ b/dashboard/backend/platform_support.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + + +WORKSPACE = Path(__file__).resolve().parent.parent.parent +PLATFORM_DATA_DIR = WORKSPACE / "dashboard" / "data" / "platform" +PLATFORM_EVENTS_PATH = PLATFORM_DATA_DIR / "events.jsonl" +PROVIDER_METRICS_PATH = PLATFORM_DATA_DIR / "provider-metrics.jsonl" +INSTALLED_PLUGINS_PATH = PLATFORM_DATA_DIR / "installed-plugins.json" +PLUGIN_REGISTRY_PATH = WORKSPACE / "config" / "plugin-registry.json" + + +def ensure_platform_data_dir() -> Path: + PLATFORM_DATA_DIR.mkdir(parents=True, exist_ok=True) + return PLATFORM_DATA_DIR + + +def _now_iso() -> str: + return datetime.now(timezone.utc).isoformat() + + +def read_json(path: Path, default: Any) -> Any: + try: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + except Exception: + return default + + +def write_json(path: Path, payload: Any) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False) + "\n", encoding="utf-8") + + +def append_jsonl(path: Path, payload: dict[str, Any]) -> dict[str, Any]: + ensure_platform_data_dir() + event = {"ts": _now_iso(), **payload} + with path.open("a", encoding="utf-8") as fh: + fh.write(json.dumps(event, ensure_ascii=False) + "\n") + return event + + +def read_jsonl(path: Path, limit: int | None = None) -> list[dict[str, Any]]: + if not path.exists(): + return [] + rows: list[dict[str, Any]] = [] + try: + for raw_line in path.read_text(encoding="utf-8", errors="replace").splitlines(): + line = raw_line.strip() + if not line: + continue + try: + row = json.loads(line) + except Exception: + continue + if isinstance(row, dict): + rows.append(row) + except Exception: + return [] + if limit is not None and limit >= 0: + return rows[-limit:] + return rows + + +def database_scheme(database_uri: str | None) -> str: + if not database_uri: + return "sqlite" + if "://" not in database_uri: + return "sqlite" + return database_uri.split(":", 1)[0].lower() diff --git a/dashboard/backend/plugin_schema.py b/dashboard/backend/plugin_schema.py index 1c30bf31..7f49f1e3 100644 --- a/dashboard/backend/plugin_schema.py +++ b/dashboard/backend/plugin_schema.py @@ -62,6 +62,10 @@ class Capability(str, Enum): # Wave 2.1 — full-screen plugin UI pages + writable data ui_pages = "ui_pages" writable_data = "writable_data" + # B2.0 — unauthenticated public pages served by the host (token-bound) + public_pages = "public_pages" + # B3 — safe uninstall with data preservation and 3-step wizard + safe_uninstall = "safe_uninstall" class PluginMcpServer(BaseModel): @@ -303,6 +307,13 @@ class ReadonlyQuery(BaseModel): id: Annotated[str, Field(min_length=1, max_length=100)] description: Annotated[str, Field(min_length=1, max_length=500)] sql: Annotated[str, Field(min_length=1)] + # B2.0: expose this query on the public portal without host auth. + # Value is the PluginPublicPage.id that gates access. + public_via: Optional[str] = None + # B2.0: named SQL parameter in ``sql`` that receives the URL token value. + # Required when public_via is set. The parameter must appear in ``sql`` + # as :token_param (e.g. ``WHERE magic_link_token = :token``). + bind_token_param: Optional[str] = None @field_validator("id") @classmethod @@ -566,6 +577,14 @@ class PluginWritableResource(BaseModel): ) # Optional JSON Schema for payload validation (jsonschema library) json_schema: Optional[WritableResourceJsonSchema] = None + # Wave 2.1.x: optional endpoint-level RBAC. When set, only authenticated + # users whose ``current_user.role`` is in this list may POST/PUT/DELETE this + # resource. Empty/None means any authenticated user passes (legacy default). + # Role 'admin' always passes regardless of the list (super-user override). + # Plugins use this to gate writable resources by role without needing a host + # PR or app-layer wrapper. See evonexus-plugin-nutri for split-endpoint + # patterns (patients_admin vs patients_clinical). + requires_role: Optional[List[Annotated[str, Field(min_length=1, max_length=64)]]] = None @field_validator("id") @classmethod @@ -585,6 +604,232 @@ def table_pattern(cls, v: str) -> str: ) return v + @field_validator("requires_role") + @classmethod + def requires_role_pattern(cls, v: Optional[List[str]]) -> Optional[List[str]]: + if v is None: + return v + for role in v: + if not re.match(r"^[a-z][a-z0-9-]*$", role): + raise ValueError( + f"requires_role entry '{role}' must match ^[a-z][a-z0-9-]*$ (kebab-case)" + ) + return v + + +class PluginPublicPageTokenSource(BaseModel): + """Token source declaration for a public page (B2.0). + + The host validates the incoming token against ``column`` in ``table`` + using a parametric query. Table must be slug-prefixed (enforced by the + PluginManifest validator ``public_pages_tables_slug_prefixed``). + + B2.0 v1 deliberately does NOT support a ``revoked_when`` SQL fragment to + prevent SQL injection. Revocation is the plugin's responsibility: nulling + or rotating the token column value causes the next request to 404. + """ + + # Plugin-owned table containing the token column (validated slug-prefixed) + table: Annotated[str, Field(min_length=1, max_length=200)] + # Column in ``table`` that holds the token value + column: Annotated[str, Field(min_length=1, max_length=100)] + + @field_validator("table") + @classmethod + def table_identifier(cls, v: str) -> str: + if not re.match(r"^[a-z][a-z0-9_]*$", v): + raise ValueError( + f"token_source.table '{v}' must match ^[a-z][a-z0-9_]*$" + ) + return v + + @field_validator("column") + @classmethod + def column_identifier(cls, v: str) -> str: + if not re.match(r"^[a-z][a-z0-9_]*$", v): + raise ValueError( + f"token_source.column '{v}' must match ^[a-z][a-z0-9_]*$" + ) + return v + + +class PluginPublicPage(BaseModel): + """A public (unauthenticated) page declared in plugin.yaml under public_pages (B2.0). + + The host registers ``/p/{slug}/{route_prefix}/{token}`` as a public route + and validates the token against ``token_source.column`` in ``token_source.table`` + on every request. Only B2.0 (read-only, no PIN) is supported in v1. + B2.1 (PIN + writable + auto_set_columns) is deferred. + """ + + # Unique identifier within this plugin's public_pages list + id: Annotated[str, Field(min_length=1, max_length=100)] + # Human-readable label for audit logs and admin UI + description: Annotated[str, Field(min_length=1, max_length=500)] + # URL prefix segment, without leading/trailing slashes (e.g. "portal") + route_prefix: Annotated[str, Field(min_length=1, max_length=100)] + # Token source — which plugin table/column the URL token is validated against + token_source: PluginPublicPageTokenSource + # Plugin JS bundle path (must be under ui/public/) + bundle: Annotated[str, Field(min_length=1, max_length=500)] + # Web component tag name registered by the bundle + custom_element_name: Annotated[str, Field(min_length=1, max_length=200)] + # auth_mode: only "token" is supported in B2.0 (B2.1 will add "pin") + auth_mode: Literal["token"] = "token" + # Rate limit override per page (requests/minute/IP); defaults to global limiter + rate_limit_per_ip: Optional[int] = None + # Optional action name to write to the audit log on each page view + audit_action: Optional[str] = None + + @field_validator("id") + @classmethod + def id_pattern(cls, v: str) -> str: + if not re.match(r"^[a-z0-9_]+$", v): + raise ValueError(f"PluginPublicPage id '{v}' must match ^[a-z0-9_]+$") + return v + + @field_validator("route_prefix") + @classmethod + def route_prefix_clean(cls, v: str) -> str: + """No leading/trailing slashes; only lowercase alphanum + hyphens.""" + v = v.strip("/") + if not re.match(r"^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$", v): + raise ValueError( + f"route_prefix '{v}' must be lowercase alphanum+hyphens, no slashes" + ) + return v + + @field_validator("bundle") + @classmethod + def bundle_in_public_subtree(cls, v: str) -> str: + """Bundle must live under ui/public/ to prevent leaking authenticated bundles.""" + if not v.startswith("ui/public/"): + raise ValueError( + f"PluginPublicPage bundle '{v}' must start with 'ui/public/' " + "(authenticated ui_pages bundles are not accessible from public routes)." + ) + ext = Path(v).suffix.lower() + if ext not in {".js", ".mjs"}: + raise ValueError( + f"PluginPublicPage bundle '{v}' must have a .js or .mjs extension." + ) + return v + + @field_validator("custom_element_name") + @classmethod + def custom_element_name_has_hyphen(cls, v: str) -> str: + if "-" not in v: + raise ValueError( + f"custom_element_name '{v}' must contain at least one hyphen " + "(Web Components specification requirement)." + ) + return v + + @field_validator("rate_limit_per_ip") + @classmethod + def rate_limit_positive(cls, v: Optional[int]) -> Optional[int]: + if v is not None and v < 1: + raise ValueError("rate_limit_per_ip must be a positive integer") + return v + + +class PluginPreUninstallHook(BaseModel): + """Pre-uninstall hook declaration (B3 safe_uninstall). + + Executed as a sandboxed subprocess before the uninstall wizard proceeds. + The hook must produce a file in ``output_dir`` when ``must_produce_file`` + is true — if it does not, the uninstall is blocked. + """ + + # Relative path to the hook script inside the plugin directory + script: Annotated[str, Field(min_length=1, max_length=500)] + # Output directory pattern (supports {slug} and {timestamp} interpolation) + output_dir: Annotated[str, Field(min_length=1, max_length=500)] + # Seconds before the subprocess is killed (max 600) + timeout_seconds: int = 600 + # If true, uninstall is blocked when the hook exits cleanly but produces no file + must_produce_file: bool = True + + @field_validator("script") + @classmethod + def script_relative(cls, v: str) -> str: + if v.startswith("/") or ".." in v: + raise ValueError( + f"pre_uninstall_hook.script '{v}' must be relative and must not traverse upward" + ) + return v + + @field_validator("timeout_seconds") + @classmethod + def timeout_in_range(cls, v: int) -> int: + if not 1 <= v <= 600: + raise ValueError("timeout_seconds must be between 1 and 600") + return v + + +class PluginUserConfirmation(BaseModel): + """User confirmation gate for safe_uninstall (B3). + + Defines the checkbox label and the exact phrase the user must type + to enable the Uninstall button. Phrase matching is case-sensitive. + """ + + checkbox_label: Annotated[str, Field(min_length=1, max_length=1000)] + typed_phrase: Annotated[str, Field(min_length=1, max_length=200)] + + +class PluginSafeUninstall(BaseModel): + """Safe uninstall declaration for plugins holding regulated data (B3). + + When ``enabled`` is true the host enforces: + 1. A 3-step wizard (pre-hook → checkbox → typed phrase + ZIP password). + 2. Preserved tables are NOT dropped and are renamed ``_orphan_{slug}_{table}``. + 3. Host-entity cascades respect ``preserved_host_entities`` filters. + 4. Reinstall detects orphaned tables and restores access after SHA256 verify. + + Plugins not declaring this block continue to use the default cascade-DELETE. + """ + + enabled: bool = False + # Human-readable regulatory reason shown to the admin before they confirm + reason: Optional[str] = None + # Pre-uninstall hook run before the wizard + pre_uninstall_hook: Optional[PluginPreUninstallHook] = None + # Checkbox + typed phrase gate + user_confirmation: Optional[PluginUserConfirmation] = None + # Tables that must NOT be dropped on uninstall (renamed to _orphan_{slug}_{table}) + preserved_tables: List[str] = Field(default_factory=list) + # Host-managed entity classes to partially preserve (table → WHERE clause EXCLUDING rows to delete) + # Dict mapping host table name to a SQL WHERE expression for rows that SHOULD be preserved. + # e.g. {"tickets": "source_plugin = 'nutri' AND linked_resource LIKE 'nutri_patients/%'"} + preserved_host_entities: Dict[str, str] = Field(default_factory=dict) + # If true, Uninstall button is completely disabled in the UI (for active audit windows, etc.) + block_uninstall: bool = False + + @field_validator("preserved_tables") + @classmethod + def table_names_identifier(cls, v: List[str]) -> List[str]: + for name in v: + if not re.match(r"^[a-z][a-z0-9_]*$", name): + raise ValueError( + f"preserved_tables entry '{name}' must match ^[a-z][a-z0-9_]*$" + ) + return v + + @field_validator("preserved_host_entities") + @classmethod + def host_entity_tables_known(cls, v: Dict[str, str]) -> Dict[str, str]: + _ALLOWED_HOST_TABLES = frozenset({ + "triggers", "tickets", "goal_tasks", "goals", "projects", "missions" + }) + for table in v: + if table not in _ALLOWED_HOST_TABLES: + raise ValueError( + f"preserved_host_entities key '{table}' is not a known host entity table. " + f"Allowed: {sorted(_ALLOWED_HOST_TABLES)}" + ) + return v + class PluginUIEntryPoints(BaseModel): """Typed container for ui_entry_points in plugin.yaml (Wave 2.1). @@ -655,6 +900,16 @@ class PluginManifest(BaseModel): # env_vars_needed is kept as deprecated warning-only for backwards compatibility. integrations: Optional[List["PluginIntegration"]] = None + # --- B2.0: Public pages (unauthenticated, token-bound) --- + # Declared under public_pages: in plugin.yaml. + # Requires Capability.public_pages in capabilities list. + public_pages: Optional[List[PluginPublicPage]] = None + + # --- B3: Safe uninstall with data preservation --- + # Declared under safe_uninstall: in plugin.yaml. + # Requires Capability.safe_uninstall in capabilities list. + safe_uninstall: Optional[PluginSafeUninstall] = None + @field_validator("id") @classmethod def slug_pattern(cls, v: str) -> str: @@ -802,6 +1057,133 @@ def pages_bundle_paths_unique(self) -> "PluginManifest": return self + @model_validator(mode="after") + def safe_uninstall_requires_capability(self) -> "PluginManifest": + """B3: safe_uninstall block requires Capability.safe_uninstall in capabilities.""" + if self.safe_uninstall and Capability.safe_uninstall not in self.capabilities: + raise ValueError( + "safe_uninstall is declared but Capability.safe_uninstall is missing " + "from capabilities list." + ) + return self + + @model_validator(mode="after") + def safe_uninstall_preserved_tables_slug_prefixed(self) -> "PluginManifest": + """B3: preserved_tables must start with {slug_under}.""" + if not self.safe_uninstall or not self.safe_uninstall.preserved_tables: + return self + slug_under = self.id.replace("-", "_") + "_" + for table in self.safe_uninstall.preserved_tables: + if not table.lower().startswith(slug_under): + raise ValueError( + f"safe_uninstall.preserved_tables entry '{table}' does not start " + f"with required prefix '{slug_under}'. " + "Preserved tables must be plugin-owned." + ) + return self + + @model_validator(mode="after") + def safe_uninstall_enabled_requires_confirmation(self) -> "PluginManifest": + """B3: if safe_uninstall.enabled is true, user_confirmation is required.""" + su = self.safe_uninstall + if su and su.enabled and not su.block_uninstall and not su.user_confirmation: + raise ValueError( + "safe_uninstall.enabled is true but user_confirmation is not declared. " + "Admin must confirm with a checkbox + typed phrase." + ) + return self + + @model_validator(mode="after") + def readonly_data_no_orphan_table_references(self) -> "PluginManifest": + """Vault B3.S4: readonly_data SQL must not reference _orphan_* tables. + + Orphan tables are renamed on uninstall to prevent hostile reinstall from + accessing them via readonly_data declarations. + """ + if not self.readonly_data: + return self + _TABLE_RE = re.compile( + r"\b(?:FROM|JOIN)\s+([a-zA-Z_][a-zA-Z0-9_]*)", + re.IGNORECASE, + ) + for query in self.readonly_data: + tables = _TABLE_RE.findall(query.sql) + for table in tables: + if table.lower().startswith("_orphan_"): + raise ValueError( + f"ReadonlyQuery '{query.id}' references orphan table '{table}'. " + "Queries must not reference _orphan_* tables — these are preserved " + "from a previous uninstall and are inaccessible under the plugin namespace." + ) + return self + + @model_validator(mode="after") + def public_pages_require_capability(self) -> "PluginManifest": + """B2.0: public_pages block requires Capability.public_pages in capabilities.""" + if self.public_pages and Capability.public_pages not in self.capabilities: + raise ValueError( + "public_pages is declared but Capability.public_pages is missing " + "from capabilities list." + ) + return self + + @model_validator(mode="after") + def public_pages_tables_slug_prefixed(self) -> "PluginManifest": + """B2.0: token_source.table must start with {slug_under} (same guard as readonly/writable).""" + if not self.public_pages: + return self + slug_under = self.id.replace("-", "_") + "_" + for page in self.public_pages: + table = page.token_source.table + if not table.lower().startswith(slug_under): + raise ValueError( + f"PluginPublicPage '{page.id}' token_source.table '{table}' " + f"does not start with required prefix '{slug_under}'. " + "Public page token sources must only reference the plugin's own tables." + ) + return self + + @model_validator(mode="after") + def public_pages_ids_unique(self) -> "PluginManifest": + """B2.0: public page ids and route_prefixes must be unique within a plugin.""" + if not self.public_pages: + return self + seen_ids: set[str] = set() + seen_prefixes: set[str] = set() + for page in self.public_pages: + if page.id in seen_ids: + raise ValueError( + f"Duplicate PluginPublicPage id '{page.id}' in public_pages." + ) + if page.route_prefix in seen_prefixes: + raise ValueError( + f"Duplicate PluginPublicPage route_prefix '{page.route_prefix}' in public_pages." + ) + seen_ids.add(page.id) + seen_prefixes.add(page.route_prefix) + return self + + @model_validator(mode="after") + def readonly_public_via_references_valid_page(self) -> "PluginManifest": + """B2.0: readonly_data[].public_via must reference a declared public_pages[].id.""" + has_public_via = [q for q in self.readonly_data if q.public_via] + if not has_public_via: + return self + page_ids = {p.id for p in (self.public_pages or [])} + for query in has_public_via: + if query.public_via not in page_ids: + raise ValueError( + f"ReadonlyQuery '{query.id}' references public_via='{query.public_via}' " + "which is not declared in public_pages." + ) + if not query.bind_token_param: + raise ValueError( + f"ReadonlyQuery '{query.id}' has public_via set but bind_token_param " + "is missing. The query must declare which SQL parameter receives the token." + ) + return self + + def load_plugin_manifest(plugin_dir: Path) -> PluginManifest: """Load and validate plugin.yaml from a plugin directory. diff --git a/dashboard/backend/rate_limit.py b/dashboard/backend/rate_limit.py new file mode 100644 index 00000000..c1b49939 --- /dev/null +++ b/dashboard/backend/rate_limit.py @@ -0,0 +1,26 @@ +"""Shared Flask-Limiter instance for EvoNexus. + +Placing the limiter here (rather than in app.py directly) breaks the +circular-import chain: app.py initialises it, route blueprints import it. + +Usage in a blueprint:: + + from rate_limit import limiter + + @bp.route("/api/shares//view") + @limiter.limit("60 per minute") + def view_share(token: str): + ... +""" + +from flask_limiter import Limiter +from flask_limiter.util import get_remote_address + +# Uninitialised instance — app.py calls limiter.init_app(app) at startup. +limiter = Limiter( + get_remote_address, + # Default: generous to avoid false positives on authenticated API routes. + # Individual endpoints override with @limiter.limit() decorators. + default_limits=["600 per minute"], + storage_uri="memory://", +) diff --git a/dashboard/backend/request_security.py b/dashboard/backend/request_security.py new file mode 100644 index 00000000..c1e356d8 --- /dev/null +++ b/dashboard/backend/request_security.py @@ -0,0 +1,41 @@ +"""HTTP request security helpers shared by Flask blueprints.""" + +from __future__ import annotations + +from flask import abort, request + +from session_security import CSRF_HEADER_NAME, XHR_HEADER_NAME, XHR_HEADER_VALUE, issue_session_token + +MUTATING_METHODS = {"POST", "PUT", "PATCH", "DELETE"} + +XHR_EXEMPT_PREFIXES = ( + "/api/knowledge/v1/", + "/api/triggers/webhook/", +) + + +def should_require_xhr(path: str, method: str, authorization_header: str = "") -> bool: + """Return True when a mutating API request must carry the XHR header.""" + + if method.upper() not in MUTATING_METHODS: + return False + if not path.startswith("/api/"): + return False + if (authorization_header or "").strip().startswith("Bearer "): + return False + for prefix in XHR_EXEMPT_PREFIXES: + if path.startswith(prefix): + return False + return True + + +def require_xhr(req=request) -> None: + """Abort with 403 if a mutating request lacks the XHR and CSRF headers.""" + + if not should_require_xhr(req.path, req.method, req.headers.get("Authorization", "")): + return + if req.headers.get(XHR_HEADER_NAME) != XHR_HEADER_VALUE: + abort(403, description="CSRF check failed: X-Requested-With header missing.") + expected = issue_session_token(force=False) + if req.headers.get(CSRF_HEADER_NAME, "").strip() != expected: + abort(403, description="CSRF check failed: invalid or missing CSRF token.") diff --git a/dashboard/backend/routes/agent_knowledge.py b/dashboard/backend/routes/agent_knowledge.py new file mode 100644 index 00000000..5c7d2787 --- /dev/null +++ b/dashboard/backend/routes/agent_knowledge.py @@ -0,0 +1,426 @@ +"""Agent Knowledge routes. + +This module provides an accounting-friendly facade over the pgvector Knowledge +engine. It keeps the canonical RAG divisions aligned with the custom accounting +agents while reusing the existing Knowledge ingestion/search pipeline. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from uuid import uuid4 + +from flask import Blueprint, jsonify, request +from flask_login import current_user +from sqlalchemy import text +from werkzeug.utils import secure_filename + +from routes.auth_routes import require_permission +from routes.knowledge import _assert_key, _get_sqlite, _require_xhr + +from knowledge import documents as documents_mod +from knowledge import search as search_mod +from knowledge import spaces as spaces_mod +from knowledge.connection_pool import get_dsn, get_engine + +bp = Blueprint("agent_knowledge", __name__) + +_UPLOAD_DIR = Path(__file__).resolve().parents[2] / "data" / "knowledge" / "uploads" + + +AGENT_KNOWLEDGE_DIVISIONS: list[dict[str, Any]] = [ + { + "slug": "geral", + "label": "Geral", + "agent": None, + "description": "Base compartilhada entre todos os agentes contabeis.", + "color": "#00FFA7", + }, + { + "slug": "custom-atendimento", + "label": "Atendimento", + "agent": "custom-atendimento", + "description": "Triagem, comunicacao com clientes, pendencias e protocolos.", + "color": "#38BDF8", + }, + { + "slug": "custom-controladoria", + "label": "Controladoria", + "agent": "custom-controladoria", + "description": "Indicadores, fechamentos gerenciais, controles e analises.", + "color": "#A78BFA", + }, + { + "slug": "custom-fiscal", + "label": "Fiscal", + "agent": "custom-fiscal", + "description": "Tributos, notas, apuracoes, obrigacoes e legislacao fiscal.", + "color": "#F59E0B", + }, + { + "slug": "custom-orquestrador", + "label": "Orquestrador", + "agent": "custom-orquestrador", + "description": "Procedimentos gerais, roteamento de demandas e regras de operacao.", + "color": "#00FFA7", + }, + { + "slug": "custom-rh", + "label": "RH", + "agent": "custom-rh", + "description": "Folha, admissao, desligamento, beneficios e rotinas trabalhistas.", + "color": "#F472B6", + }, + { + "slug": "custom-societario", + "label": "Societario", + "agent": "custom-societario", + "description": "Contratos sociais, alteracoes, CNPJ, licencas e Junta Comercial.", + "color": "#34D399", + }, +] + +_DIVISION_BY_SLUG = {d["slug"]: d for d in AGENT_KNOWLEDGE_DIVISIONS} + + +def _error(code: str, message: str, status: int = 400): + return jsonify({"error": code, "message": message}), status + + +def _connections() -> list[dict[str, Any]]: + _assert_key() + from knowledge.connections import list_connections + + conn = _get_sqlite() + try: + return list_connections(conn) + finally: + conn.close() + + +def _ready_connections() -> list[dict[str, Any]]: + return [c for c in _connections() if c.get("status") == "ready"] + + +def _resolve_connection_id(preferred: str | None = None) -> tuple[str | None, list[dict[str, Any]]]: + ready = _ready_connections() + if preferred: + for conn in ready: + if preferred in {conn.get("id"), conn.get("slug")}: + return conn["id"], ready + raise ValueError(f"Knowledge connection '{preferred}' is not ready or does not exist.") + if not ready: + return None, ready + return ready[0]["id"], ready + + +def _space_stats(connection_id: str, space_id: str) -> dict[str, int]: + engine = get_engine(connection_id, get_dsn(connection_id)) + with engine.connect() as pg: + row = pg.execute( + text( + """ + SELECT + COUNT(DISTINCT d.id) AS documents_count, + COUNT(c.id) AS chunks_count + FROM knowledge_spaces s + LEFT JOIN knowledge_documents d ON d.space_id = s.id + LEFT JOIN knowledge_chunks c ON c.space_id = s.id + WHERE s.id = :space_id + """ + ), + {"space_id": space_id}, + ).fetchone() + if row is None: + return {"documents_count": 0, "chunks_count": 0} + data = dict(row._mapping) + return { + "documents_count": int(data.get("documents_count") or 0), + "chunks_count": int(data.get("chunks_count") or 0), + } + + +def _space_payload(connection_id: str, division: dict[str, Any]) -> dict[str, Any]: + space = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + payload = { + **division, + "ready": bool(space), + "space": space, + "documents_count": 0, + "chunks_count": 0, + } + if space: + payload.update(_space_stats(connection_id, space["id"])) + return payload + + +def _ensure_space(connection_id: str, division: dict[str, Any]) -> dict[str, Any]: + existing = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + if existing: + return existing + + try: + return spaces_mod.create_space( + connection_id, + { + "slug": division["slug"], + "name": division["label"], + "description": division["description"], + "visibility": "shared", + "access_rules": { + "agent_knowledge": True, + "agent": division["agent"], + }, + "content_type_boosts": { + "faq": 1.25, + "reference": 1.20, + "decision": 1.15, + "tutorial": 1.10, + "article": 1.00, + "note": 0.95, + "transcript": 0.85, + }, + }, + ) + except Exception: + # Concurrent first-run bootstrap may create the same slug between the + # read and insert. Re-read before surfacing the original error. + existing = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + if existing: + return existing + raise + + +def _ensure_division(connection_id: str, slug: str) -> tuple[dict[str, Any], dict[str, Any]]: + division = _DIVISION_BY_SLUG.get(slug) + if not division: + raise KeyError(f"Unknown division '{slug}'") + return division, _ensure_space(connection_id, division) + + +def _get_division_space(connection_id: str, slug: str) -> tuple[dict[str, Any], dict[str, Any] | None]: + division = _DIVISION_BY_SLUG.get(slug) + if not division: + raise KeyError(f"Unknown division '{slug}'") + return division, spaces_mod.get_space_by_slug(connection_id, division["slug"]) + + +def _document_status(document_id: str) -> dict[str, Any]: + return documents_mod.get_ingestion_status(document_id) or {"document_id": document_id, "phase": "pending"} + + +@bp.route("/api/agent-knowledge/divisions", methods=["GET"]) +@require_permission("knowledge", "view") +def divisions(): + try: + connection_id, ready = _resolve_connection_id(request.args.get("connection_id")) + except Exception as exc: + return _error("connection_error", str(exc), 400) + + if not connection_id: + return jsonify( + { + "connections": ready, + "active_connection_id": None, + "divisions": AGENT_KNOWLEDGE_DIVISIONS, + "ready": False, + "message": "No ready pgvector Knowledge connection configured.", + } + ) + + try: + return jsonify( + { + "connections": ready, + "active_connection_id": connection_id, + "divisions": [_space_payload(connection_id, d) for d in AGENT_KNOWLEDGE_DIVISIONS], + "ready": True, + } + ) + except Exception as exc: + return _error("divisions_failed", str(exc), 500) + + +@bp.route("/api/agent-knowledge/bootstrap", methods=["POST"]) +@require_permission("knowledge", "manage") +def bootstrap(): + _require_xhr() + data = request.get_json(silent=True) or {} + try: + connection_id, _ready = _resolve_connection_id(data.get("connection_id")) + if not connection_id: + return _error("no_connection", "No ready pgvector Knowledge connection configured.", 400) + created = [] + for division in AGENT_KNOWLEDGE_DIVISIONS: + before = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + space = _ensure_space(connection_id, division) + if before is None: + created.append(space["slug"]) + return jsonify( + { + "status": "ready", + "created": created, + "divisions": [_space_payload(connection_id, d) for d in AGENT_KNOWLEDGE_DIVISIONS], + } + ) + except Exception as exc: + return _error("bootstrap_failed", str(exc), 500) + + +@bp.route("/api/agent-knowledge/documents", methods=["GET"]) +@require_permission("knowledge", "view") +def list_documents(): + try: + connection_id, _ready = _resolve_connection_id(request.args.get("connection_id")) + if not connection_id: + return _error("no_connection", "No ready pgvector Knowledge connection configured.", 400) + division_slug = request.args.get("division") or "all" + limit = min(int(request.args.get("limit", 25)), 100) + + if division_slug == "all": + documents = [] + for division in AGENT_KNOWLEDGE_DIVISIONS: + space = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + if not space: + continue + for doc in documents_mod.list_documents(connection_id, space_id=space["id"], limit=limit): + documents.append({**doc, "division": division["slug"], "division_label": division["label"]}) + documents.sort(key=lambda d: str(d.get("created_at") or ""), reverse=True) + return jsonify({"documents": documents[:limit]}) + + division, space = _get_division_space(connection_id, division_slug) + if not space: + return jsonify({"documents": []}) + documents = [ + {**doc, "division": division["slug"], "division_label": division["label"]} + for doc in documents_mod.list_documents(connection_id, space_id=space["id"], limit=limit) + ] + return jsonify({"documents": documents}) + except KeyError as exc: + return _error("bad_division", str(exc), 404) + except Exception as exc: + return _error("documents_failed", str(exc), 500) + + +@bp.route("/api/agent-knowledge/upload", methods=["POST"]) +@require_permission("knowledge", "manage") +def upload(): + _require_xhr() + try: + connection_id, _ready = _resolve_connection_id(request.form.get("connection_id")) + if not connection_id: + return _error("no_connection", "No ready pgvector Knowledge connection configured.", 400) + division_slug = request.form.get("division") or "" + division, space = _ensure_division(connection_id, division_slug) + except KeyError as exc: + return _error("bad_division", str(exc), 404) + except Exception as exc: + return _error("upload_failed", str(exc), 500) + + files = request.files.getlist("files") or request.files.getlist("file") + if not files: + return _error("bad_request", "Missing uploaded files.", 400) + + raw_tags = request.form.get("tags") or "" + extra_tags = [t.strip() for t in raw_tags.split(",") if t.strip()] + uploads = [] + tmp_dir = _UPLOAD_DIR + tmp_dir.mkdir(parents=True, exist_ok=True) + + for f in files: + original_name = f.filename or "document" + safe_name = secure_filename(original_name) or f"document-{uuid4().hex}" + tmp_path = tmp_dir / f"{uuid4().hex}-{safe_name}" + f.save(str(tmp_path)) + + metadata = { + "title": request.form.get("title") or Path(original_name).stem, + "tags": ["agent-knowledge", division["slug"], *(extra_tags or [])], + "owner_id": str(getattr(current_user, "id", "") or ""), + "agent": division["agent"], + "division": division["slug"], + "mime_type": f.mimetype or None, + } + doc = documents_mod.upload_document(connection_id, space["id"], str(tmp_path), metadata) + uploads.append( + { + "document": doc, + "document_id": doc.get("id"), + "filename": original_name, + "division": division["slug"], + "division_label": division["label"], + "status": _document_status(doc.get("id")), + } + ) + + return jsonify({"uploads": uploads}), 202 + + +@bp.route("/api/agent-knowledge/documents//status", methods=["GET"]) +@require_permission("knowledge", "view") +def status(document_id: str): + return jsonify(_document_status(document_id)) + + +@bp.route("/api/agent-knowledge/documents/", methods=["DELETE"]) +@require_permission("knowledge", "manage") +def delete_document(document_id: str): + _require_xhr() + try: + connection_id, _ready = _resolve_connection_id(request.args.get("connection_id")) + if not connection_id: + return _error("no_connection", "No ready pgvector Knowledge connection configured.", 400) + ok = documents_mod.delete_document(connection_id, document_id) + if not ok: + return _error("not_found", f"Document {document_id} not found.", 404) + return jsonify({"status": "deleted", "document_id": document_id}) + except Exception as exc: + return _error("delete_failed", str(exc), 500) + + +@bp.route("/api/agent-knowledge/search", methods=["GET"]) +@require_permission("knowledge", "view") +def search(): + query = (request.args.get("q") or request.args.get("query") or "").strip() + if not query: + return _error("bad_request", "query is required", 400) + + try: + connection_id, _ready = _resolve_connection_id(request.args.get("connection_id")) + if not connection_id: + return _error("no_connection", "No ready pgvector Knowledge connection configured.", 400) + division_slug = request.args.get("division") or "all" + top_k = min(int(request.args.get("top_k", 10)), 50) + + targets = [] + if division_slug == "all": + for division in AGENT_KNOWLEDGE_DIVISIONS: + space = spaces_mod.get_space_by_slug(connection_id, division["slug"]) + if space: + targets.append((division, space)) + else: + division, space = _get_division_space(connection_id, division_slug) + if not space: + return jsonify({"query": query, "results": [], "total": 0}) + targets.append((division, space)) + + results = [] + per_target_k = top_k if len(targets) <= 1 else max(3, top_k) + for division, space in targets: + hits = search_mod.hybrid_search( + connection_id=connection_id, + space_id=space["id"], + query=query, + top_k=per_target_k, + filters={}, + ) + for hit in hits: + results.append({**hit, "division": division["slug"], "division_label": division["label"]}) + + results.sort(key=lambda r: float(r.get("final_score") or 0), reverse=True) + return jsonify({"query": query, "results": results[:top_k], "total": len(results[:top_k])}) + except KeyError as exc: + return _error("bad_division", str(exc), 404) + except Exception as exc: + return _error("search_failed", str(exc), 500) diff --git a/dashboard/backend/routes/auth_routes.py b/dashboard/backend/routes/auth_routes.py index 42160cdb..e8f941ef 100644 --- a/dashboard/backend/routes/auth_routes.py +++ b/dashboard/backend/routes/auth_routes.py @@ -289,7 +289,31 @@ def create_user(): if not username or not password: abort(400, description="Username and password required") - if User.query.filter_by(username=username).first(): + existing = User.query.filter_by(username=username).first() + if existing: + if not existing.is_active: + # Reactivate a previously deactivated user instead of blocking + existing.is_active = True + existing.display_name = _as_text(data.get("display_name")).strip() or username + existing.email = _as_text(data.get("email")).strip() or existing.email + existing.role = role + existing.set_password(password) + # Auto-skip onboarding for reactivated users in configured workspaces + try: + from routes.providers import _read_config, PROVIDERS_CONFIG + from routes._helpers import WORKSPACE + if PROVIDERS_CONFIG.is_file(): + config = _read_config() + active_provider = config.get("active_provider") + has_provider = bool(active_provider and active_provider != "none") + has_workspace = (WORKSPACE / "config" / "workspace.yaml").exists() + if has_provider and has_workspace: + existing.onboarding_state = "skipped" + except Exception: + pass + db.session.commit() + audit(current_user, "user_reactivated", f"user:{existing.id}", f"role={role}") + return jsonify(existing.to_dict()), 200 abort(400, description="Username already exists") valid_roles = [r.name for r in Role.query.all()] if role not in valid_roles: @@ -303,6 +327,24 @@ def create_user(): role=role, created_by=current_user.id, ) + # Auto-skip onboarding when the workspace is already configured. + # This prevents new team members from being forced through the full + # setup wizard when the system is already operational. + # NOTE: _read_config() returns a fallback default with + # active_provider="anthropic" even when providers.json doesn't exist. + # We must check the FILE itself to avoid false positives. + try: + from routes.providers import _read_config, PROVIDERS_CONFIG + from routes._helpers import WORKSPACE + if PROVIDERS_CONFIG.is_file(): + config = _read_config() + active_provider = config.get("active_provider") + has_provider = bool(active_provider and active_provider != "none") + has_workspace = (WORKSPACE / "config" / "workspace.yaml").exists() + if has_provider and has_workspace: + user.onboarding_state = "skipped" + except Exception: + pass # Never block user creation if check fails user.set_password(password) db.session.add(user) db.session.commit() @@ -350,7 +392,34 @@ def deactivate_user(user_id): user.is_active = False db.session.commit() audit(current_user, "user_deactivated", f"user:{user.id}") - return jsonify({"message": "User deactivated"}) + return jsonify({"status": "ok", "message": "User deactivated", "user": user.to_dict()}) + + +@bp.route("/api/users//reactivate", methods=["POST"]) +@login_required +@require_permission("users", "manage") +def reactivate_user(user_id): + """Reactivate a previously deactivated user.""" + user = User.query.get_or_404(user_id) + if user.is_active: + return jsonify({"status": "ok", "message": "User is already active", "user": user.to_dict()}) + user.is_active = True + # Auto-skip onboarding for reactivated users in configured workspaces + try: + from routes.providers import _read_config, PROVIDERS_CONFIG + from routes._helpers import WORKSPACE + if PROVIDERS_CONFIG.is_file(): + config = _read_config() + active_provider = config.get("active_provider") + has_provider = bool(active_provider and active_provider != "none") + has_workspace = (WORKSPACE / "config" / "workspace.yaml").exists() + if has_provider and has_workspace: + user.onboarding_state = "skipped" + except Exception: + pass + db.session.commit() + audit(current_user, "user_reactivated", f"user:{user.id}") + return jsonify({"status": "ok", "message": "User reactivated", "user": user.to_dict()}) # ── Audit (admin only) ─────────────────────────────── diff --git a/dashboard/backend/routes/brain_repo.py b/dashboard/backend/routes/brain_repo.py index 5bd8dd03..b47703ba 100644 --- a/dashboard/backend/routes/brain_repo.py +++ b/dashboard/backend/routes/brain_repo.py @@ -240,6 +240,76 @@ def _decrypt_token(config: BrainRepoConfig) -> str: # ── Status ──────────────────────────────────────────── +def _repo_name_from_url(repo_url: str) -> str: + cleaned = repo_url.strip().rstrip("/") + if cleaned.endswith(".git"): + cleaned = cleaned[:-4] + return cleaned.rsplit("/", 1)[-1] if cleaned else "" + + +def _repo_owner_from_url(repo_url: str) -> str: + cleaned = repo_url.strip().rstrip("/") + if cleaned.endswith(".git"): + cleaned = cleaned[:-4] + if "/" not in cleaned: + return "" + owner = cleaned.rsplit("/", 2)[-2] + if ":" in owner: + owner = owner.rsplit(":", 1)[-1] + return owner + + +def _enqueue_bootstrap_for_config( + config: BrainRepoConfig, + token: str, + *, + sync_after: bool = False, + workspace: Path | None = None, + tag_name: str | None = None, + commit_message: str | None = None, +) -> bool: + """Repair/init the local clone for a connected Brain Repo config.""" + if not config.repo_url: + abort(400, description="Brain repo URL not configured") + + repo_name = config.repo_name or _repo_name_from_url(config.repo_url) + if not repo_name: + abort(400, description="Brain repo name not configured") + + repo_owner = config.repo_owner or _repo_owner_from_url(config.repo_url) + try: + from brain_repo.github_api import get_github_username + github_username = get_github_username(token) + except Exception: + github_username = repo_owner + + config.repo_name = repo_name + config.repo_owner = repo_owner + config.local_path = None + config.last_error = None + db.session.commit() + + try: + from brain_repo import job_runner + from flask import current_app + except ImportError: + abort(500, description="job_runner module unavailable") + + return job_runner.enqueue_bootstrap( + current_app._get_current_object(), # type: ignore[attr-defined] + current_user.id, + token=token, + repo_url=config.repo_url, + repo_name=repo_name, + owner_username=current_user.username or repo_owner, + github_username=github_username, + sync_after=sync_after, + workspace=workspace, + tag_name=tag_name, + commit_message=commit_message, + ) + + @bp.route("/api/brain-repo/status") @login_required def status(): @@ -392,15 +462,27 @@ def connect(): else: # Validate existing repo is private try: - from brain_repo.github_api import get_repo_info + from brain_repo.github_api import get_repo_info, get_github_username ok_private, repo_info = get_repo_info(token, repo_url) except ImportError: ok_private, repo_info = True, {} # graceful fallback if not ok_private: abort(400, description="Repository must be private") - repo_owner = repo_info.get("owner", {}).get("login", "") - repo_name = repo_info.get("name", "") + repo_owner = repo_info.get("owner", {}).get("login", "") or _repo_owner_from_url(repo_url) + repo_name = repo_info.get("name", "") or _repo_name_from_url(repo_url) + try: + github_username = get_github_username(token) + except Exception: + github_username = repo_owner + bootstrap_pending = True + bootstrap_params = { + "token": token, + "repo_url": repo_url, + "repo_name": repo_name, + "owner_username": current_user.username or repo_owner, + "github_username": github_username, + } # Encrypt and store token. # @@ -451,6 +533,8 @@ def connect(): config.repo_url = repo_url config.repo_owner = repo_owner config.repo_name = repo_name + if bootstrap_pending: + config.local_path = None # local_path stays NULL when bootstrap is deferred — it gets filled in by # job_runner.run_bootstrap_pipeline after the push succeeds. UI reads # sync_in_progress + local_path==null as "initializing". @@ -628,31 +712,67 @@ def sync_force(): if not config or not config.github_token_encrypted: abort(400, description="Brain repo not connected") + token = _decrypt_token(config) + if not token: + abort(500, description="Could not decrypt stored token - re-connect the brain repo") + + workspace = Path(__file__).resolve().parent.parent.parent.parent + now = datetime.now(timezone.utc) + # Seconds in the tag name avoids "tag already exists" on rapid re-clicks + tag_name = f"milestone/manual-{now.strftime('%Y-%m-%d-%H-%M-%S')}" + sync_commit_message = f"manual sync {now.isoformat()}" + local_path = config.local_path if not local_path: - abort(400, description="local_path not configured — repo not yet cloned") + enqueued = _enqueue_bootstrap_for_config( + config, + token, + sync_after=True, + workspace=workspace, + tag_name=tag_name, + commit_message=sync_commit_message, + ) + if not enqueued: + return jsonify({ + "ok": False, + "error": "Another sync is already running for this user.", + "code": "SYNC_IN_PROGRESS", + }), 409 + return jsonify({ + "ok": True, + "status": "queued", + "job": "bootstrap", + "message": "Local brain repo clone queued. Sync will run after initialization.", + }), 202 repo_dir = Path(local_path) if not repo_dir.is_dir() or not (repo_dir / ".git").is_dir(): - abort(500, description=f"Local brain repo at {local_path} is missing or corrupt — re-connect") - - # Quick token-decryption probe so bad keys fail fast (before enqueueing). - # The pipeline re-decrypts inside the thread — we just surface the error - # synchronously here so the UI can react inline. - token = _decrypt_token(config) - if not token: - abort(500, description="Could not decrypt stored token — re-connect the brain repo") + enqueued = _enqueue_bootstrap_for_config( + config, + token, + sync_after=True, + workspace=workspace, + tag_name=tag_name, + commit_message=sync_commit_message, + ) + if not enqueued: + return jsonify({ + "ok": False, + "error": "Another sync is already running for this user.", + "code": "SYNC_IN_PROGRESS", + }), 409 + return jsonify({ + "ok": True, + "status": "queued", + "job": "bootstrap", + "message": "Local brain repo clone repaired. Sync will run after initialization.", + }), 202 try: from brain_repo import job_runner except ImportError: abort(500, description="job_runner module unavailable") - workspace = Path(__file__).resolve().parent.parent.parent.parent - now = datetime.now(timezone.utc) - # Seconds in the tag name avoids "tag already exists" on rapid re-clicks - tag_name = f"milestone/manual-{now.strftime('%Y-%m-%d-%H-%M-%S')}" - from flask import current_app enqueued = job_runner.enqueue_sync( current_app._get_current_object(), # type: ignore[attr-defined] @@ -660,7 +780,7 @@ def sync_force(): workspace, kind=job_runner.JOB_KIND_SYNC, tag_name=tag_name, - commit_message=f"manual sync {now.isoformat()}", + commit_message=sync_commit_message, ) if not enqueued: return jsonify({ @@ -699,27 +819,68 @@ def tag_milestone(): if not config or not config.github_token_encrypted: abort(400, description="Brain repo not connected") + token = _decrypt_token(config) + if not token: + abort(500, description="Could not decrypt stored token - re-connect the brain repo") + + workspace = Path(__file__).resolve().parent.parent.parent.parent + tag = f"milestone/{name}" + now = datetime.now(timezone.utc) + milestone_commit_message = f"milestone: {name} ({now.isoformat()})" + local_path = config.local_path if not local_path: - abort(400, description="local_path not configured — repo not yet cloned") + enqueued = _enqueue_bootstrap_for_config( + config, + token, + sync_after=True, + workspace=workspace, + tag_name=tag, + commit_message=milestone_commit_message, + ) + if not enqueued: + return jsonify({ + "ok": False, + "error": "Another sync is already running for this user.", + "code": "SYNC_IN_PROGRESS", + }), 409 + return jsonify({ + "ok": True, + "status": "queued", + "job": "bootstrap", + "tag": tag, + "message": "Local brain repo clone queued. Milestone sync will run after initialization.", + }), 202 repo_dir = Path(local_path) if not repo_dir.is_dir() or not (repo_dir / ".git").is_dir(): - abort(500, description=f"Local brain repo at {local_path} is missing or corrupt — re-connect") - - token = _decrypt_token(config) - if not token: - abort(500, description="Could not decrypt stored token — re-connect the brain repo") + enqueued = _enqueue_bootstrap_for_config( + config, + token, + sync_after=True, + workspace=workspace, + tag_name=tag, + commit_message=milestone_commit_message, + ) + if not enqueued: + return jsonify({ + "ok": False, + "error": "Another sync is already running for this user.", + "code": "SYNC_IN_PROGRESS", + }), 409 + return jsonify({ + "ok": True, + "status": "queued", + "job": "bootstrap", + "tag": tag, + "message": "Local brain repo clone repaired. Milestone sync will run after initialization.", + }), 202 try: from brain_repo import job_runner except ImportError: abort(500, description="job_runner module unavailable") - workspace = Path(__file__).resolve().parent.parent.parent.parent - tag = f"milestone/{name}" - now = datetime.now(timezone.utc) - from flask import current_app enqueued = job_runner.enqueue_sync( current_app._get_current_object(), # type: ignore[attr-defined] @@ -727,7 +888,7 @@ def tag_milestone(): workspace, kind=job_runner.JOB_KIND_MILESTONE, tag_name=tag, - commit_message=f"milestone: {name} ({now.isoformat()})", + commit_message=milestone_commit_message, ) if not enqueued: return jsonify({ diff --git a/dashboard/backend/routes/knowledge.py b/dashboard/backend/routes/knowledge.py index 1d5ecfd1..f6c2e807 100644 --- a/dashboard/backend/routes/knowledge.py +++ b/dashboard/backend/routes/knowledge.py @@ -68,7 +68,7 @@ def _require_xhr() -> None: "vector_dim": 1536, }, "gemini": { - "model": "gemini-embedding-001", + "model": "gemini-embedding-2", "vector_dim": 768, }, } @@ -90,13 +90,12 @@ def _require_xhr() -> None: {"id": "text-embedding-ada-002", "dim": 1536, "legacy": True}, ], "gemini": [ - {"id": "gemini-embedding-001", "dim": 768, "recommended": True, - "supports_task_type": True, - "note": "Text-only. Supports task_type for retrieval optimization."}, - {"id": "gemini-embedding-2-preview", "dim": 768, + {"id": "gemini-embedding-2", "dim": 768, "recommended": True, "supports_task_type": False, - "preview": True, - "note": "Multimodal preview. Task hint must be inline in the prompt."}, + "note": "Stable multimodal Embedding 2. Task hints are encoded as text prefixes for RAG."}, + {"id": "gemini-embedding-001", "dim": 768, + "supports_task_type": True, + "note": "Legacy text-only model. Supports task_type for retrieval optimization."}, ], } diff --git a/dashboard/backend/routes/onboarding.py b/dashboard/backend/routes/onboarding.py index bdc688c6..87fb69f0 100644 --- a/dashboard/backend/routes/onboarding.py +++ b/dashboard/backend/routes/onboarding.py @@ -71,6 +71,60 @@ def skip(): db.session.commit() return jsonify({"onboarding_state": "skipped"}) + +@bp.route("/api/onboarding/workspace-status") +@login_required +def workspace_status(): + """Check if the workspace is already configured by another user. + + Returns a JSON object indicating whether the workspace is ready for use + without requiring the full onboarding wizard. This is the key endpoint + that enables multi-user onboarding: if the workspace is already set up + by an admin, new users can skip straight to the dashboard. + + IMPORTANT: _read_config() returns a default fallback of + {"active_provider": "anthropic"} even when no providers.json exists on + disk. We must check the FILE exists to avoid false positives. + """ + from pathlib import Path + from routes._helpers import WORKSPACE + from routes.providers import PROVIDERS_CONFIG + + # Only trust the config if providers.json actually exists on disk. + # The fallback default makes active_provider="anthropic" even when + # nobody has configured anything yet. + providers_file_exists = PROVIDERS_CONFIG.is_file() + config = _read_config() if providers_file_exists else {} + active_provider = config.get("active_provider") + has_provider = bool( + providers_file_exists + and active_provider + and active_provider != "none" + ) + + # Check if any other user has completed onboarding + other_completed = User.query.filter( + User.id != current_user.id, + User.onboarding_state.in_(["completed", "skipped"]) + ).count() > 0 + + # Check if workspace.yaml exists (created during initial setup) + workspace_yaml = WORKSPACE / "config" / "workspace.yaml" + has_workspace = workspace_yaml.exists() + + # Workspace is ready if the providers.json FILE truly exists with a + # non-none active_provider AND either another user completed onboarding + # or the workspace config file exists. + workspace_ready = has_provider and (other_completed or has_workspace) + + return jsonify({ + "workspace_ready": workspace_ready, + "has_provider": has_provider, + "active_provider": active_provider, + "other_users_configured": other_completed, + "has_workspace_config": has_workspace, + }) + @bp.route("/api/onboarding/provider", methods=["POST"]) @login_required def set_provider(): diff --git a/dashboard/backend/routes/platform.py b/dashboard/backend/routes/platform.py new file mode 100644 index 00000000..c13c3e3f --- /dev/null +++ b/dashboard/backend/routes/platform.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +from flask import Blueprint, jsonify, request +from flask_login import login_required + +from models import has_permission +from platform_cache import cache_delete, cache_get, cache_get_or_set +from platform_observability import build_observability_summary +from platform_plugins import install_plugin, list_plugins, uninstall_plugin +from platform_queue import list_events, queue_status + +bp = Blueprint("platform", __name__) + + +def _require(resource: str, action: str): + from flask_login import current_user + + if not has_permission(current_user.role, resource, action): + return jsonify({"error": "Forbidden"}), 403 + return None + + +@bp.route("/api/observability/summary") +@login_required +def observability_summary(): + denied = _require("systems", "view") + if denied: + return denied + return jsonify(build_observability_summary()) + + +@bp.route("/api/observability/providers") +@login_required +def observability_providers(): + denied = _require("systems", "view") + if denied: + return denied + def _load_snapshot(): + summary = build_observability_summary() + return { + "provider_metrics": summary.get("provider_metrics", {}), + "provider_config": summary.get("provider_config", {}), + } + + return jsonify(cache_get_or_set("observability:providers", _load_snapshot, ttl=30)) + + +@bp.route("/api/platform/cache") +@login_required +def platform_cache(): + denied = _require("systems", "view") + if denied: + return denied + return jsonify({ + "cached_provider_list": cache_get("providers:list"), + "cached_observability": cache_get("observability:summary"), + "cached_observability_providers": cache_get("observability:providers"), + "cached_platform_queue": cache_get("platform:queue:50"), + }) + + +@bp.route("/api/platform/cache/clear", methods=["POST"]) +@login_required +def platform_cache_clear(): + denied = _require("config", "manage") + if denied: + return denied + cache_delete("providers:list") + cache_delete("observability:summary") + cache_delete("observability:providers") + cache_delete("platform:queue:50") + return jsonify({"status": "ok"}) + + +@bp.route("/api/platform/queue") +@login_required +def platform_queue(): + denied = _require("systems", "view") + if denied: + return denied + return jsonify(cache_get_or_set( + "platform:queue:50", + lambda: { + "status": queue_status(), + "events": list_events(limit=50), + }, + ttl=10, + )) + + +@bp.route("/api/platform/plugins") +@login_required +def plugins_list(): + denied = _require("systems", "view") + if denied: + return denied + return jsonify({"plugins": list_plugins()}) + + +@bp.route("/api/platform/plugins/install", methods=["POST"]) +@login_required +def plugins_install(): + denied = _require("config", "manage") + if denied: + return denied + data = request.get_json(silent=True) or {} + plugin_id = (data.get("plugin_id") or "").strip() + if not plugin_id: + return jsonify({"error": "plugin_id is required"}), 400 + try: + result = install_plugin(plugin_id) + except KeyError as exc: + return jsonify({"error": str(exc)}), 404 + except FileExistsError as exc: + return jsonify({"error": str(exc)}), 409 + except ValueError as exc: + return jsonify({"error": str(exc)}), 400 + cache_delete("observability:summary") + return jsonify({"status": "ok", "plugin": result}) + + +@bp.route("/api/platform/plugins//uninstall", methods=["POST"]) +@login_required +def plugins_uninstall(plugin_id: str): + denied = _require("config", "manage") + if denied: + return denied + try: + result = uninstall_plugin(plugin_id) + except KeyError as exc: + return jsonify({"error": str(exc)}), 404 + except ValueError as exc: + return jsonify({"error": str(exc)}), 400 + cache_delete("observability:summary") + return jsonify({"status": "ok", "plugin": result}) diff --git a/dashboard/backend/routes/plugin_public_pages.py b/dashboard/backend/routes/plugin_public_pages.py new file mode 100644 index 00000000..3b88264d --- /dev/null +++ b/dashboard/backend/routes/plugin_public_pages.py @@ -0,0 +1,431 @@ +"""Plugin public pages — unauthenticated token-bound portals (B2.0). + +Routes registered here bypass the ``before_request`` auth gate in ``app.py``. +The host validates the URL token against a plugin-declared column in a +plugin-owned table on every request. + +B2.0 scope (read-only, no PIN): + GET /p/// — serve portal bundle + GET /p////data — serve public readonly query + GET /p//public-assets/ — serve ui/public/ static assets + +B2.1 (PIN + writable + token-bind) is deferred. + +Security controls applied here: + - Rate limit 60 req/min/IP (from rate_limit.py) on portal + data endpoints + - Vault §B2.S2: Referrer-Policy, Cache-Control no-store, HSTS on every response + - Token validated parametrically (no SQL injection risk on token value) + - table/column identifiers validated via PluginPublicPage schema at install time + - Path traversal prevented by realpath + startswith containment check + - MIME whitelist on public asset serving +""" + +from __future__ import annotations + +import os +import sqlite3 +from pathlib import Path +from typing import Any, Dict, Optional + +from flask import Blueprint, abort, jsonify, request, Response, after_this_request + +from models import audit +from rate_limit import limiter + +bp = Blueprint("plugin_public_pages", __name__) + +# Resolved once at module load; identical to plugins.py pattern. +WORKSPACE = Path(__file__).resolve().parent.parent.parent.parent +PLUGINS_DIR = WORKSPACE / "plugins" +DB_PATH = WORKSPACE / "dashboard" / "data" / "evonexus.db" + +# --------------------------------------------------------------------------- +# Module-level public prefix cache. +# Updated on install/uninstall via register_public_prefix / unregister_public_prefix. +# Read by app.py before_request middleware to bypass auth for /p/... paths. +# --------------------------------------------------------------------------- + +# Set of string prefixes, each entry like "/p/nutri/portal" +_PLUGIN_PUBLIC_PREFIXES: set[str] = set() + + +def register_public_prefix(slug: str, route_prefix: str) -> None: + """Add a plugin's public route prefix to the auth bypass cache. + + Called by plugin_loader.py (or routes/plugins.py) after a successful install. + """ + _PLUGIN_PUBLIC_PREFIXES.add(f"/p/{slug}/{route_prefix}") + + +def unregister_public_prefix(slug: str, route_prefix: str) -> None: + """Remove a plugin's public route prefix from the auth bypass cache. + + Called by routes/plugins.py during uninstall. + """ + _PLUGIN_PUBLIC_PREFIXES.discard(f"/p/{slug}/{route_prefix}") + + +def get_public_prefixes() -> frozenset[str]: + """Read-only snapshot of the current public prefix set. + + Used by app.py before_request middleware. + """ + return frozenset(_PLUGIN_PUBLIC_PREFIXES) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _security_headers(response: Response) -> Response: + """Vault §B2.S2: mandatory security headers on all public-page responses.""" + response.headers["Referrer-Policy"] = "no-referrer" + response.headers["Cache-Control"] = "no-store, private, no-cache, must-revalidate" + response.headers["Pragma"] = "no-cache" + response.headers["X-Content-Type-Options"] = "nosniff" + response.headers["Strict-Transport-Security"] = "max-age=63072000; includeSubDomains" + return response + + +def _get_db() -> sqlite3.Connection: + conn = sqlite3.connect(str(DB_PATH), timeout=10) + conn.row_factory = sqlite3.Row + return conn + + +def _load_page_config(slug: str, route_prefix: str) -> Optional[Dict[str, Any]]: + """Return the installed public_pages config for the given slug + route_prefix. + + Reads from the manifest stored in plugins_installed (same pattern as plugins.py). + Returns None if not found or not installed. + """ + import json as _json + conn = _get_db() + try: + row = conn.execute( + "SELECT manifest_json FROM plugins_installed WHERE slug = ? AND status = 'active'", + (slug,), + ).fetchone() + if not row: + return None + manifest = _json.loads(row["manifest_json"]) + for page in manifest.get("public_pages") or []: + if page.get("route_prefix") == route_prefix: + return page + return None + finally: + conn.close() + + +def _validate_token(page_config: Dict[str, Any], token: str) -> bool: + """Validate the URL token against the plugin-declared token_source column. + + Uses a parametric query — only the `?` value is user-supplied. + Table and column names come from the manifest (validated at install by + PluginPublicPage schema; both are slug-prefixed and identifier-safe). + """ + token_source = page_config.get("token_source", {}) + table = token_source.get("table", "") + column = token_source.get("column", "") + + if not table or not column: + return False + + # Identifiers are validated at install time (PluginPublicPage schema) to + # match ^[a-z][a-z0-9_]*$ — safe to interpolate here. + sql = f"SELECT 1 FROM {table} WHERE {column} = ?" # noqa: S608 — identifiers whitelisted at install + + # The plugin DB is kept inside the plugin's own data directory. + # EvoNexus uses the shared evonexus.db for all plugin tables (no per-plugin DB). + conn = _get_db() + try: + row = conn.execute(sql, (token,)).fetchone() + return row is not None + except sqlite3.OperationalError: + # Table doesn't exist yet (e.g. install in progress) — fail closed. + return False + finally: + conn.close() + + +def _serve_bundle(slug: str, bundle_path: str) -> Response: + """Serve a plugin's ui/public/ bundle file (no auth check needed here — + caller already verified token; bundle is the entire page shell). + + ``bundle_path`` is relative to the plugin dir (e.g. "ui/public/portal.js"). + """ + plugin_dir = PLUGINS_DIR / slug + ui_public_root = os.path.realpath(str(plugin_dir / "ui" / "public")) + # Strip "ui/public/" prefix to get the sub-path + relative = bundle_path[len("ui/public/"):] + requested = os.path.realpath(os.path.join(ui_public_root, relative)) + + # Containment check — must stay inside plugins/{slug}/ui/public/ + if not requested.startswith(ui_public_root + os.sep) and requested != ui_public_root: + abort(404) + + if not os.path.isfile(requested): + abort(404) + + ext = os.path.splitext(requested)[1].lower() + mime_map = { + ".js": "application/javascript; charset=utf-8", + ".mjs": "application/javascript; charset=utf-8", + ".css": "text/css; charset=utf-8", + ".json": "application/json; charset=utf-8", + ".html": "text/html; charset=utf-8", + } + mime = mime_map.get(ext) + if not mime: + abort(404) + + with open(requested, "rb") as fh: + content = fh.read() + + resp = Response(content, mimetype=mime) + resp.headers["X-Content-Type-Options"] = "nosniff" + # Content-Security-Policy: restrict resource loading to same origin. + # 'unsafe-inline' is included for inline scripts in plugin bundles (Web Component pattern). + resp.headers["Content-Security-Policy"] = ( + "default-src 'self'; script-src 'self' 'unsafe-inline'; " + "style-src 'self' 'unsafe-inline'; img-src 'self' data:; " + "connect-src 'self'; frame-ancestors 'none'" + ) + return resp + + +# --------------------------------------------------------------------------- +# Endpoints +# --------------------------------------------------------------------------- + +@bp.route("/p///", methods=["GET"]) +@limiter.limit("60 per minute") +def portal_page(slug: str, route_prefix: str, token: str): + """Serve the plugin portal page after validating the URL token. + + Flow: + 1. Load page config from plugins_installed manifest. + 2. Validate token against token_source.column (parametric SQL). + 3. Serve the plugin's ui/public/ bundle. + 4. Apply security headers. + """ + @after_this_request + def _headers(response: Response) -> Response: + return _security_headers(response) + + page_config = _load_page_config(slug, route_prefix) + if not page_config: + return jsonify({"error": "Link inválido ou expirado", "code": "not_found"}), 404 + + if not _validate_token(page_config, token): + ip = request.remote_addr or "-" + audit( + None, + page_config.get("audit_action") or "portal_view_denied", + f"plugins/{slug}/public_pages/{route_prefix}", + detail=f"token={token[:8]}... ip={ip} reason=token_invalid", + ) + return jsonify({"error": "Link inválido ou expirado", "code": "not_found"}), 404 + + # Token valid — log successful view + ip = request.remote_addr or "-" + ua = (request.headers.get("User-Agent", "-") or "-")[:200] + audit( + None, + page_config.get("audit_action") or "portal_view", + f"plugins/{slug}/public_pages/{route_prefix}", + detail=f"token={token[:8]}... ip={ip} ua={ua[:80]}", + ) + + bundle_path = page_config.get("bundle", "") + + # Wave 2.1.x — content negotiation: browsers send Accept: text/html and + # expect a rendered page; programmatic clients (or asset prefetchers) can + # fetch the raw bundle by NOT sending text/html in Accept (or by hitting + # /p/{slug}/public-assets/{file} directly, which does not require a token). + # + # When the caller wants HTML, generate a minimal shell that loads the bundle + # and instantiates the declared custom element. Without this, browsers see + # the JS source. With this, the portal renders. + accept = (request.headers.get("Accept") or "").lower() + wants_html = "text/html" in accept and "application/javascript" not in accept + if wants_html: + return _serve_html_shell(slug, page_config, token) + + return _serve_bundle(slug, bundle_path) + + +def _serve_html_shell(slug: str, page_config: dict, token: str) -> Response: + """Render a minimal HTML page that boots the plugin's custom element. + + The shell is generated server-side so plugin authors only ship a JS bundle + (custom element definition). The bundle is fetched as a module from + /p/{slug}/public-assets/{file} (no auth required for assets — they contain + no patient data; data lives behind the token-gated /data endpoint). + + The custom element receives the URL token via ``data-token`` attribute so + the bundle does not need to re-parse window.location. + """ + from html import escape as h + bundle_path = page_config.get("bundle", "") or "" + if not bundle_path.startswith("ui/public/"): + abort(404) + bundle_relative = bundle_path[len("ui/public/"):] + custom_element = page_config.get("custom_element_name") or "" + if not custom_element or not all(c.isalnum() or c in "-" for c in custom_element): + # Defense in depth — schema already validates this on install + abort(500) + label = page_config.get("description") or page_config.get("label") or "Portal" + + # CSP for the shell: + # - script-src 'self' allows the bundle module from same origin (public-assets/) + # - style-src 'self' 'unsafe-inline' covers minimal inline shell styling + # - img-src 'self' data: covers logos embedded as data URIs (brand workaround) + # - connect-src 'self' so the bundle can fetch /p/{slug}/{route}/{token}/data + asset_url = f"/p/{h(slug)}/public-assets/{h(bundle_relative)}" + body = ( + "" + "" + "" + "" + "" + f"{h(label)}" + "" + "" + f"<{custom_element} data-token=\"{h(token)}\" data-slug=\"{h(slug)}\">" + f"" + "" + ) + resp = Response(body, mimetype="text/html; charset=utf-8") + resp.headers["X-Content-Type-Options"] = "nosniff" + resp.headers["Content-Security-Policy"] = ( + "default-src 'self'; " + "script-src 'self' 'unsafe-inline'; " + "style-src 'self' 'unsafe-inline'; " + "img-src 'self' data:; " + "connect-src 'self'; " + "frame-ancestors 'none'" + ) + return resp + + +@bp.route("/p////data", methods=["GET"]) +@limiter.limit("120 per minute") +def portal_data(slug: str, route_prefix: str, token: str): + """Serve public readonly query results bound to the URL token. + + Requires a ``query_id`` query-string param that matches a declared + readonly_data entry with ``public_via`` pointing to this page. + """ + @after_this_request + def _headers(response: Response) -> Response: + return _security_headers(response) + + query_id = request.args.get("query_id", "").strip() + if not query_id: + return jsonify({"error": "query_id is required", "code": "bad_request"}), 400 + + page_config = _load_page_config(slug, route_prefix) + if not page_config: + return jsonify({"error": "Link inválido ou expirado", "code": "not_found"}), 404 + + if not _validate_token(page_config, token): + return jsonify({"error": "Link inválido ou expirado", "code": "not_found"}), 404 + + # Load readonly_data entries from the manifest to find the matching public query + import json as _json + conn_meta = _get_db() + try: + row = conn_meta.execute( + "SELECT manifest_json FROM plugins_installed WHERE slug = ? AND status = 'active'", + (slug,), + ).fetchone() + if not row: + return jsonify({"error": "Plugin not found", "code": "not_found"}), 404 + manifest = _json.loads(row["manifest_json"]) + finally: + conn_meta.close() + + # Find the query + public_page_id = page_config.get("id") + query_spec = None + for q in manifest.get("readonly_data") or []: + if q.get("id") == query_id and q.get("public_via") == public_page_id: + query_spec = q + break + + if not query_spec: + return jsonify({"error": "Query not found or not public", "code": "not_found"}), 404 + + bind_param = query_spec.get("bind_token_param") + sql = query_spec.get("sql", "") + + # Execute query with token bound to the declared parameter + conn_data = _get_db() + try: + if bind_param: + rows = conn_data.execute(sql, {bind_param: token}).fetchall() + else: + rows = conn_data.execute(sql).fetchall() + results = [dict(r) for r in rows] + except sqlite3.OperationalError as exc: + return jsonify({"error": "Query execution failed", "detail": str(exc)}), 500 + finally: + conn_data.close() + + return jsonify({"query_id": query_id, "rows": results}) + + +@bp.route("/p//public-assets/", methods=["GET"]) +def portal_static(slug: str, subpath: str): + """Serve plugin static assets from ui/public/ (no token required). + + CSS, images, and other non-JS assets referenced by the portal bundle. + Path must stay within plugins/{slug}/ui/public/ (containment check). + """ + @after_this_request + def _headers(response: Response) -> Response: + return _security_headers(response) + + plugin_dir = PLUGINS_DIR / slug + ui_public_root = os.path.realpath(str(plugin_dir / "ui" / "public")) + requested = os.path.realpath(os.path.join(ui_public_root, subpath)) + + # Containment check + if not requested.startswith(ui_public_root + os.sep): + abort(404) + + if not os.path.isfile(requested): + abort(404) + + ext = os.path.splitext(requested)[1].lower() + mime_map = { + ".js": "application/javascript; charset=utf-8", + ".mjs": "application/javascript; charset=utf-8", + ".css": "text/css; charset=utf-8", + ".png": "image/png", + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".webp": "image/webp", + ".json": "application/json; charset=utf-8", + ".html": "text/html; charset=utf-8", + ".ico": "image/x-icon", + ".woff2": "font/woff2", + ".woff": "font/woff", + ".ttf": "font/ttf", + } + mime = mime_map.get(ext) + if not mime: + abort(404) + + with open(requested, "rb") as fh: + content = fh.read() + + resp = Response(content, mimetype=mime) + resp.headers["X-Content-Type-Options"] = "nosniff" + # Static assets can be cached by the browser (shorter TTL for public portal) + resp.headers["Cache-Control"] = "public, max-age=300" + return resp diff --git a/dashboard/backend/routes/plugins.py b/dashboard/backend/routes/plugins.py index a7f33cdc..5b92d728 100644 --- a/dashboard/backend/routes/plugins.py +++ b/dashboard/backend/routes/plugins.py @@ -13,8 +13,11 @@ import os import shutil import sqlite3 +import subprocess +import tempfile import threading import time +import uuid from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -778,6 +781,40 @@ def install_plugin(): except RuntimeError as exc: return jsonify({"error": str(exc)}), 409 + # B3: Check for orphaned tables from a previous uninstall (safe_uninstall). + # If orphans exist, verify SHA256 to prevent hostile reinstall (Vault B3.S3). + _orphan_check_conn = _get_db() + try: + _orphan_rows = _orphan_check_conn.execute( + "SELECT tablename, original_sha256, original_plugin_version FROM plugin_orphans " + "WHERE slug = ? AND recovered_at IS NULL", + (slug,), + ).fetchall() + except Exception: + _orphan_rows = [] + finally: + _orphan_check_conn.close() + + if _orphan_rows: + # Verify SHA256: the plugin being installed must match what was originally installed. + _install_sha256 = tarball_sha256 or "" + _original_sha256s = {row[1] for row in _orphan_rows if row[1]} + if _original_sha256s and _install_sha256: + if _install_sha256 not in _original_sha256s: + _admin_confirm = data.get("confirmed_sha256_change", False) + if not _admin_confirm: + return jsonify({ + "error": "sha256_mismatch", + "detail": ( + "Source changed since last install — possible hostile reinstall. " + "This plugin has orphaned tables from a previous install. " + "Pass confirmed_sha256_change=true to override (will be audited)." + ), + "orphaned_tables": [row[0] for row in _orphan_rows], + "expected_sha256": list(_original_sha256s), + "provided_sha256": _install_sha256, + }), 409 + plugin_dir = PLUGINS_DIR / slug state: dict[str, Any] = { "slug": slug, @@ -788,6 +825,40 @@ def install_plugin(): conn = _get_db() try: + # B3: Recover orphaned tables BEFORE copying/migrating (Vault B3.S3). + # Rename _orphan_{slug}_{table} back to {table} so install.sql can use them. + _recovered_tables: list[str] = [] + if _orphan_rows: + _recovery_conn = _get_db() + try: + for _orphan_row in _orphan_rows: + _orig_table = _orphan_row[0] + _orphan_table_name = f"_orphan_{slug}_{_orig_table}" + _existing = { + row[0] for row in _recovery_conn.execute( + "SELECT name FROM sqlite_master WHERE type='table'" + ).fetchall() + } + if _orphan_table_name in _existing: + _recovery_conn.execute( + f"ALTER TABLE {_orphan_table_name} RENAME TO {_orig_table}" + ) + _recovery_conn.commit() + _recovered_tables.append(_orig_table) + logger.info("B3 reinstall: recovered orphaned table '%s'", _orig_table) + + # Mark orphans as recovered in plugin_orphans + if _recovered_tables: + _now = _now_iso() + for _t in _recovered_tables: + _recovery_conn.execute( + "UPDATE plugin_orphans SET recovered_at = ? WHERE slug = ? AND tablename = ?", + (_now, slug, _t), + ) + _recovery_conn.commit() + finally: + _recovery_conn.close() + # --- Step: copy plugin source to plugins/{slug}/ --- plugin_dir.mkdir(parents=True, exist_ok=True) @@ -1164,9 +1235,172 @@ def uninstall_plugin(slug: str): if not plugin_dir.exists(): return jsonify({"error": f"Plugin '{slug}' not found"}), 404 + # --- B3: safe_uninstall enforcement --- + # Load the installed manifest to check if safe_uninstall capability is declared. + _force_uninstall = os.environ.get("EVONEXUS_ALLOW_FORCE_UNINSTALL", "").strip() == "1" + _manifest_for_b3: dict = {} + _safe_uninstall_spec: dict = {} + try: + _manifest_conn = _get_db() + _manifest_row = _manifest_conn.execute( + "SELECT manifest_json FROM plugins_installed WHERE slug = ?", (slug,) + ).fetchone() + _manifest_conn.close() + if _manifest_row: + _manifest_for_b3 = json.loads(_manifest_row["manifest_json"] or "{}") + _safe_uninstall_spec = _manifest_for_b3.get("safe_uninstall") or {} + except Exception as _exc: + logger.warning("B3: could not load manifest for safe_uninstall check: %s", _exc) + + _su_enabled = _safe_uninstall_spec.get("enabled", False) + _block_uninstall = _safe_uninstall_spec.get("block_uninstall", False) + + if _block_uninstall and not _force_uninstall: + return jsonify({ + "error": "uninstall_blocked", + "detail": _safe_uninstall_spec.get("reason", "Plugin has declared block_uninstall: true."), + "code": "blocked", + }), 409 + + if _su_enabled and not _force_uninstall: + # Vault B3.S1: backend enforcement — require admin + confirmation_phrase + exported_at + if not hasattr(current_user, "role") or getattr(current_user, "role", None) != "admin": + return jsonify({ + "error": "admin_required", + "detail": "Only admin users may uninstall plugins with safe_uninstall enabled.", + "code": "forbidden", + }), 403 + + _body = request.get_json(force=True, silent=True) or {} + _phrase_required = (_safe_uninstall_spec.get("user_confirmation") or {}).get("typed_phrase", "") + _phrase_given = _body.get("confirmation_phrase", "") + if _phrase_required and _phrase_given != _phrase_required: + return jsonify({ + "error": "confirmation_phrase_mismatch", + "detail": f"Typed phrase must be exactly: {_phrase_required}", + "code": "bad_request", + }), 400 + + _exported_at = _body.get("exported_at", "") + if _exported_at: + if not os.path.exists(_exported_at): + return jsonify({ + "error": "export_file_not_found", + "detail": f"Export file not found at path: {_exported_at}", + "code": "bad_request", + }), 400 + + # Vault B3.S1: zip_password must be present (the actual encryption happens in the pre-hook) + _zip_password = _body.get("zip_password", "") + if not _zip_password: + return jsonify({ + "error": "zip_password_required", + "detail": "A ZIP password is required to encrypt the export archive.", + "code": "bad_request", + }), 400 + + if _force_uninstall: + # Vault B3.S6: force-uninstall MUST produce an audit row with reason + _force_reason = (request.get_json(force=True, silent=True) or {}).get("force_reason", "") + logger.warning( + "FORCE UNINSTALL activated for '%s' (EVONEXUS_ALLOW_FORCE_UNINSTALL=1). reason=%r user=%s", + slug, _force_reason, getattr(current_user, "username", "unknown"), + ) + # --- End B3 enforcement gate --- + conn = _get_db() + _orphan_records: list[str] = [] # B3: populated during orphan table rename phase try: - # Pre-uninstall hook + # B3: Sandboxed pre-uninstall hook (Vault B3.S2) + # Run BEFORE the legacy hook so it has access to DB state. + _su_hook_spec = _safe_uninstall_spec.get("pre_uninstall_hook") or {} + if _su_enabled and not _force_uninstall and _su_hook_spec: + _hook_script = _su_hook_spec.get("script", "") + _hook_output_dir_template = _su_hook_spec.get("output_dir", "") + _hook_timeout = _su_hook_spec.get("timeout_seconds", 600) + _must_produce = _su_hook_spec.get("must_produce_file", True) + _hook_script_path = plugin_dir / _hook_script + + if _hook_script_path.exists(): + _ts = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + _output_dir_str = _hook_output_dir_template.format(slug=slug, timestamp=_ts) + _output_dir_path = (WORKSPACE / _output_dir_str).resolve() + _output_dir_path.mkdir(parents=True, exist_ok=True) + + # Create a read-only copy of the DB for the hook (Vault B3.S2) + _db_readonly_path = "" + try: + _tmp_db = tempfile.NamedTemporaryFile(suffix=".db", delete=False) + _tmp_db.close() + _tmp_db_path = _tmp_db.name + _src_conn = sqlite3.connect(str(DB_PATH)) + _bk_conn = sqlite3.connect(_tmp_db_path) + _src_conn.backup(_bk_conn) + _src_conn.close() + _bk_conn.close() + _db_readonly_path = _tmp_db_path + except Exception as _dbe: + logger.warning("B3: could not create DB snapshot for hook: %s", _dbe) + + # Vault B3.S2: locked-down env — NO BRAIN_REPO_MASTER_KEY + _hook_env = { + "PATH": os.environ.get("PATH", "/usr/bin:/bin"), + "PLUGIN_SLUG": slug, + "PLUGIN_VERSION": _manifest_for_b3.get("version", ""), + "OUTPUT_DIR": str(_output_dir_path), + "DB_READONLY_PATH": _db_readonly_path, + } + + try: + _proc = subprocess.run( + ["python3", str(_hook_script_path)], + cwd=str(plugin_dir), + env=_hook_env, + capture_output=True, + text=True, + timeout=_hook_timeout, + ) + _hook_stdout = _proc.stdout[:5000] + _hook_stderr = _proc.stderr[:5000] + _hook_exit = _proc.returncode + + _audit(conn, slug, "safe_uninstall_hook", { + "exit_code": _hook_exit, + "stdout": _hook_stdout, + "stderr": _hook_stderr, + "output_dir": str(_output_dir_path), + }) + + if _hook_exit != 0: + return jsonify({ + "error": "pre_hook_failed", + "detail": "Pre-uninstall hook failed — uninstall aborted to prevent data loss.", + "exit_code": _hook_exit, + "stderr": _hook_stderr, + }), 400 + + if _must_produce: + _produced = any(_output_dir_path.iterdir()) if _output_dir_path.exists() else False + if not _produced: + return jsonify({ + "error": "pre_hook_no_output", + "detail": "Pre-uninstall hook produced no files — uninstall aborted to prevent data loss.", + }), 400 + + except subprocess.TimeoutExpired: + return jsonify({ + "error": "pre_hook_timeout", + "detail": f"Pre-uninstall hook exceeded timeout of {_hook_timeout}s.", + }), 400 + finally: + # Clean up DB snapshot + if _db_readonly_path: + try: + os.unlink(_db_readonly_path) + except Exception: + pass + + # Legacy pre-uninstall hook (non-B3 path) pre_hook = plugin_dir / "hooks" / "pre-uninstall.sh" if pre_hook.exists(): try: @@ -1219,14 +1453,75 @@ def uninstall_plugin(slug: str): # Delete host rows this plugin seeded (goals/tasks/triggers capabilities). # DELETE WHERE source_plugin = ? leaves user-created rows untouched. # Order matters because of FKs: children → parents. + # B3: respect preserved_host_entities filters from safe_uninstall spec. + _preserved_host_entities = _safe_uninstall_spec.get("preserved_host_entities") or {} for _tbl in ("triggers", "tickets", "goal_tasks", "goals", "projects", "missions"): try: - conn.execute(f"DELETE FROM {_tbl} WHERE source_plugin = ?", (slug,)) + _where = "source_plugin = ?" + if _tbl in _preserved_host_entities and not _force_uninstall: + # Preserve rows matching the declared WHERE clause. + # Only the base condition (source_plugin = ?) is parameterized; + # the preservation clause comes from the manifest (validated at install). + _preserve_clause = _preserved_host_entities[_tbl] + _where = f"(source_plugin = ?) AND NOT ({_preserve_clause})" + conn.execute(f"DELETE FROM {_tbl} WHERE {_where}", (slug,)) conn.commit() except Exception as exc: logger.warning("Uninstall: failed to clean %s: %s", _tbl, exc) - # SQL uninstall + # B3: Rename preserved tables to _orphan_{slug}_{tablename} BEFORE SQL uninstall. + # This removes them from the plugin namespace (Vault B3.S4) and records them + # in plugin_orphans so reinstall can detect and recover them. + _preserved_tables = _safe_uninstall_spec.get("preserved_tables") or [] + if _preserved_tables and _su_enabled and not _force_uninstall: + _orphan_conn = sqlite3.connect(str(DB_PATH)) + try: + _existing_tables_set = { + row[0] for row in _orphan_conn.execute( + "SELECT name FROM sqlite_master WHERE type='table'" + ).fetchall() + } + _user_id = getattr(current_user, "id", None) + _plugin_version = _manifest_for_b3.get("version", "") + _plugin_sha256 = _manifest_for_b3.get("source_sha256", "") + _plugin_publisher_url = _manifest_for_b3.get("source_url", "") + + for _orig_table in _preserved_tables: + if _orig_table not in _existing_tables_set: + logger.info("B3: preserved table '%s' does not exist, skipping", _orig_table) + continue + _orphan_name = f"_orphan_{slug}_{_orig_table}" + try: + # Rename to orphan name + _orphan_conn.execute(f"ALTER TABLE {_orig_table} RENAME TO {_orphan_name}") + _orphan_conn.commit() + logger.info("B3: renamed '%s' to '%s'", _orig_table, _orphan_name) + + # Record in plugin_orphans + _orphan_conn.execute( + "INSERT OR REPLACE INTO plugin_orphans " + "(id, slug, tablename, orphaned_at, orphaned_by_user_id, " + " original_plugin_version, original_sha256, original_publisher_url) " + "VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + ( + str(uuid.uuid4()), + slug, + _orig_table, + _now_iso(), + _user_id, + _plugin_version, + _plugin_sha256, + _plugin_publisher_url, + ), + ) + _orphan_conn.commit() + _orphan_records.append(_orig_table) + except Exception as _te: + logger.warning("B3: failed to rename table '%s': %s", _orig_table, _te) + finally: + _orphan_conn.close() + + # SQL uninstall (runs after preserved tables are renamed — DROP won't touch them) uninstall_sql = plugin_dir / "migrations" / "uninstall.sql" if uninstall_sql.exists(): try: @@ -1294,10 +1589,15 @@ def uninstall_plugin(slug: str): # Reload scheduler _reload_scheduler() - _audit(conn, slug, "uninstall", { + _audit_action = "plugin_uninstall_safe" if (_su_enabled and not _force_uninstall) else "uninstall" + if _force_uninstall: + _audit_action = "plugin_uninstall_force" + _audit(conn, slug, _audit_action, { "removed_env_keys": _removed_env_keys, "removed_health_cache_count": _health_cache_removed, "mcp_audit": _mcp_audit, + "preserved_tables": _orphan_records, + "force_uninstall": _force_uninstall, }, success=True) invalidate_agent_meta_cache() return jsonify({ @@ -1305,6 +1605,7 @@ def uninstall_plugin(slug: str): "status": "uninstalled", "mcp_audit": _mcp_audit, "removed_env_keys": _removed_env_keys, + "preserved_tables": _orphan_records, }) except Exception as exc: @@ -1820,10 +2121,17 @@ def readonly_data(slug: str, query_name: str): if not sql: return jsonify({"error": "Invalid query declaration"}), 500 - # Build query params from request.args — only declared params allowed + # Build query params from request.args — only declared params allowed. + # Wave 2.1.x reserved params (current_user_id, current_user_role) are + # injected server-side below and MUST NOT come from the client. + _RESERVED_PARAMS = {"current_user_id", "current_user_role"} declared_params = query_decl.get("params", {}) params: dict = {} for key, value in request.args.items(): + if key in _RESERVED_PARAMS: + return jsonify({ + "error": f"Parameter '{key}' is reserved and cannot be supplied by the client" + }), 400 if key not in declared_params: return jsonify({"error": f"Parameter '{key}' not declared in manifest"}), 400 params[key] = value @@ -1845,6 +2153,15 @@ def readonly_data(slug: str, query_name: str): elif ":limit" in sql: params["limit"] = 1000 + # Wave 2.1.x — auto-inject current_user identity bind params (Gap 5 fix + # from evonexus-plugin-nutri Step 3). Plugins reference these as + # :current_user_id and :current_user_role in their SQL to enforce + # server-side scoping (e.g. `WHERE primary_nutritionist_id = :current_user_id`). + # These keys are reserved — manifest params with the same name are + # silently overridden. Always present, regardless of declaration. + params["current_user_id"] = getattr(current_user, "id", None) + params["current_user_role"] = getattr(current_user, "role", "viewer") + try: conn = _get_db() cur = conn.execute(sql, params) @@ -1926,6 +2243,19 @@ def writable_data(slug: str, resource_id: str): ) return jsonify({"error": "Internal manifest error"}), 500 + # Wave 2.1.x — endpoint-level RBAC enforcement (Gap 1 fix from + # evonexus-plugin-nutri Step 3 RBAC decision). When requires_role is set + # in the manifest, only users whose role is in the list may mutate. + # 'admin' always passes (super-user override). + requires_role = resource_decl.get("requires_role") + if requires_role: + actor_role = getattr(current_user, "role", "viewer") + if actor_role != "admin" and actor_role not in requires_role: + return jsonify({ + "error": f"Resource '{resource_id}' requires role in {requires_role}, " + f"current role is '{actor_role}'" + }), 403 + allowed_columns: list[str] = resource_decl.get("allowed_columns") or [] method = request.method diff --git a/dashboard/backend/routes/providers.py b/dashboard/backend/routes/providers.py index a2127ef8..c7ec03ef 100644 --- a/dashboard/backend/routes/providers.py +++ b/dashboard/backend/routes/providers.py @@ -55,6 +55,16 @@ "CLOUD_ML_REGION", }) +DEFAULT_FAILOVER_ORDER = [ + "openrouter", + "anthropic", + "openai", + "codex_auth", + "gemini", + "bedrock", + "vertex", +] + def _read_config() -> dict: """Read providers.json. If missing, copy from providers.example.json.""" @@ -68,7 +78,7 @@ def _read_config() -> dict: return json.loads(PROVIDERS_CONFIG.read_text(encoding="utf-8")) except (json.JSONDecodeError, OSError): pass - return {"active_provider": "anthropic", "providers": {}} + return {"active_provider": "openrouter", "providers": {}} def _write_config(config: dict): @@ -80,6 +90,45 @@ def _write_config(config: dict): ) +def _normalize_failover_order(order: list[str] | tuple[str, ...] | None, provider_map: dict, active_provider: str) -> list[str]: + """Return a stable failover order with active first and unavailable ids removed.""" + seen: set[str] = set() + normalized: list[str] = [] + + def append(provider_id: str | None): + if not provider_id or provider_id in seen: + return + provider = provider_map.get(provider_id) + if provider is None or provider.get("coming_soon"): + return + seen.add(provider_id) + normalized.append(provider_id) + + append(active_provider) + for provider_id in order or []: + append(provider_id) + for provider_id in DEFAULT_FAILOVER_ORDER: + append(provider_id) + for provider_id in provider_map: + append(provider_id) + return normalized + + +def _ensure_routing(config: dict) -> dict: + providers = config.get("providers", {}) or {} + active = config.get("active_provider", "openrouter") + routing = config.get("routing") if isinstance(config.get("routing"), dict) else {} + raw_order = routing.get("failover_order") + if not isinstance(raw_order, list): + raw_order = DEFAULT_FAILOVER_ORDER + normalized = { + "enabled": routing.get("enabled", True) is not False, + "failover_order": _normalize_failover_order(raw_order, providers, active), + } + config["routing"] = normalized + return normalized + + def _mask_secret(value: str) -> str: """Mask an API key for safe display: sk-or-v1-abc...xyz → sk-or-****xyz.""" if not value or len(value) < 8: @@ -87,6 +136,10 @@ def _mask_secret(value: str) -> str: return value[:6] + "****" + value[-4:] +def _is_secret_env_var(name: str) -> bool: + return "KEY" in name or "SECRET" in name or "TOKEN" in name + + def _run_cli_version(command: str, env: dict | None = None) -> dict: """Run ' --version' safely using hardcoded dispatch. @@ -169,6 +222,32 @@ def _save_codex_auth(tokens: dict): CODEX_AUTH_FILE.write_text(json.dumps(auth_data, indent=2), encoding="utf-8") +def _notify_terminal_server_provider_change(new_provider: str, old_provider: str): + """Notify the terminal-server that the active provider has changed. + + Fire-and-forget: the UI response should not be blocked by this call. + The terminal-server will invalidate stale PTY sessions and broadcast + a WebSocket event to all connected clients. + """ + import threading + + def _do_notify(): + import requests as http_req + try: + http_req.post( + "http://localhost:32352/api/provider-changed", + json={"new_provider": new_provider, "old_provider": old_provider}, + timeout=5, + ) + except Exception as e: + import logging + logging.getLogger(__name__).warning( + "Failed to notify terminal-server of provider change: %s", e + ) + + threading.Thread(target=_do_notify, daemon=True).start() + + # ── Endpoints ────────────────────────────────────────────── @@ -177,8 +256,9 @@ def _save_codex_auth(tokens: dict): def list_providers(): """List all providers with status info.""" config = _read_config() - active = config.get("active_provider", "anthropic") + active = config.get("active_provider", "openrouter") providers = config.get("providers", {}) + routing = _ensure_routing(config) # Check CLI installation status for both binaries claude_status = _check_cli("claude") @@ -228,6 +308,7 @@ def list_providers(): return jsonify({ "providers": result, "active_provider": active, + "routing": routing, "claude_installed": claude_status["installed"], "openclaude_installed": openclaude_status["installed"], }) @@ -238,7 +319,7 @@ def list_providers(): def get_active_provider(): """Get the active provider.""" config = _read_config() - active = config.get("active_provider", "anthropic") + active = config.get("active_provider", "openrouter") provider = config.get("providers", {}).get(active, {}) return jsonify({ "active_provider": active, @@ -261,9 +342,15 @@ def set_active_provider(): if provider_id != "none" and provider_id not in config.get("providers", {}): return jsonify({"error": f"Unknown provider: {provider_id}"}), 400 + old_provider = config.get("active_provider", "openrouter") config["active_provider"] = provider_id + _ensure_routing(config) _write_config(config) + # Notify terminal-server so it can invalidate stale PTY sessions immediately. + if provider_id != old_provider: + _notify_terminal_server_provider_change(provider_id, old_provider) + return jsonify({"status": "ok", "active_provider": provider_id}) @@ -314,6 +401,11 @@ def update_provider_config(provider_id): # Skip if value looks masked (contains ****) if "****" in str(value): continue + # Secret inputs are rendered blank when the UI only has a masked value. + # Treat blank secret fields as "unchanged" so saving another provider + # field does not erase an existing API key/token. + if _is_secret_env_var(key) and str(value).strip() == "" and existing.get(key): + continue # Reject values with shell metacharacters if not isinstance(value, str) or re.search(r'[;&|`$\n\r]', value): continue @@ -685,6 +777,6 @@ def openai_logout(): if CODEX_AUTH_FILE.is_file(): CODEX_AUTH_FILE.unlink() config = _read_config() - config["active_provider"] = "anthropic" + config["active_provider"] = "openrouter" _write_config(config) return jsonify({"status": "ok"}) diff --git a/dashboard/backend/routes/shares.py b/dashboard/backend/routes/shares.py index 458b6550..901ebb07 100644 --- a/dashboard/backend/routes/shares.py +++ b/dashboard/backend/routes/shares.py @@ -5,10 +5,11 @@ from datetime import datetime, timezone, timedelta from pathlib import Path -from flask import Blueprint, jsonify, request, Response +from flask import Blueprint, jsonify, request, Response, after_this_request from flask_login import login_required, current_user from models import db, FileShare, audit, has_workspace_folder_access +from rate_limit import limiter from routes.auth_routes import require_permission bp = Blueprint("shares", __name__) @@ -184,6 +185,7 @@ def revoke_share(token: str): # ── Public endpoint (no auth required) ────────────────────────────────────── @bp.route("/api/shares//view", methods=["GET"]) +@limiter.limit("60 per minute") def view_share(token: str): """Serve the file content for a valid share token. No authentication required.""" share = FileShare.query.filter_by(token=token).first() @@ -214,6 +216,16 @@ def view_share(token: str): ua = (request.headers.get("User-Agent", "-") or "-")[:200] audit(None, "share_view", "shares", detail=f"token={token} ip={ip} ua={ua[:80]}") + # Vault §2.S2: security headers on all public share responses. + @after_this_request + def _add_security_headers(response): + response.headers["Referrer-Policy"] = "no-referrer" + response.headers["Cache-Control"] = "no-store, private, no-cache, must-revalidate" + response.headers["Pragma"] = "no-cache" + response.headers["X-Content-Type-Options"] = "nosniff" + response.headers["Strict-Transport-Security"] = "max-age=63072000; includeSubDomains" + return response + suffix = full.suffix.lower() # HTML/HTM: serve raw so browser renders it as a full page diff --git a/dashboard/backend/routes/terminal_proxy.py b/dashboard/backend/routes/terminal_proxy.py index 17b5a6af..12ef780b 100644 --- a/dashboard/backend/routes/terminal_proxy.py +++ b/dashboard/backend/routes/terminal_proxy.py @@ -156,6 +156,10 @@ def proxy_ws(client_ws): target = f"{TERMINAL_WS_BASE}/ws" try: upstream = create_connection(target, timeout=10) + # `timeout` above should only bound the initial connection. The + # websocket-client library keeps it as the socket read timeout too, + # so an otherwise healthy but quiet terminal closes after 10s. + upstream.settimeout(None) except Exception as exc: log.warning("terminal_proxy: upstream WS connect failed: %s", exc) try: @@ -190,7 +194,10 @@ def _pump_upstream_to_client(): try: while not stop.is_set(): - msg = client_ws.receive(timeout=30) + # Keep the tunnel open while the browser is idle. A fixed + # receive timeout closes valid agent sessions when the user is + # reading output or a hidden tab has throttled timers. + msg = client_ws.receive() if msg is None: break upstream.send(msg) diff --git a/dashboard/backend/runtime_config.py b/dashboard/backend/runtime_config.py new file mode 100644 index 00000000..da1266ec --- /dev/null +++ b/dashboard/backend/runtime_config.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +import os +import re +import secrets +from dataclasses import dataclass +from pathlib import Path + +import yaml + + +WORKSPACE = Path(__file__).resolve().parent.parent.parent + + +def is_production() -> bool: + env = ( + os.environ.get("EVONEXUS_ENV") + or os.environ.get("FLASK_ENV") + or os.environ.get("ENV") + or "" + ).strip().lower() + return env in {"production", "prod"} + + +def cors_allowed_origins() -> list[str] | str: + raw = os.environ.get("CORS_ALLOWED_ORIGINS", "").strip() + if raw: + if raw == "*": + return "*" + origins = [origin.strip() for origin in re.split(r"[,\s]+", raw) if origin.strip()] + return origins or "*" + return "*" if not is_production() else [] + + +def database_uri(workspace: Path = WORKSPACE) -> str: + for key in ("SQLALCHEMY_DATABASE_URI", "EVONEXUS_DATABASE_URL", "DATABASE_URL"): + raw = os.environ.get(key, "").strip() + if raw: + return raw + return f"sqlite:///{workspace / 'dashboard' / 'data' / 'evonexus.db'}" + + +def database_backend(database_uri_value: str) -> str: + if "://" not in database_uri_value: + return "sqlite" + return (database_uri_value.split(":", 1)[0] or "sqlite").lower() + + +def sqlite_path_from_uri(database_uri_value: str) -> Path | None: + prefix = "sqlite:///" + if not database_uri_value.startswith(prefix): + return None + return Path(database_uri_value.removeprefix(prefix)) + + +def dashboard_port(workspace: Path = WORKSPACE) -> int: + for key in ("EVONEXUS_PORT", "DASHBOARD_PORT", "PORT"): + raw = os.environ.get(key, "").strip() + if raw: + try: + port = int(raw) + if 1 <= port <= 65535: + return port + except ValueError: + pass + + config_path = workspace / "config" / "workspace.yaml" + if config_path.is_file(): + try: + cfg = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {} + dashboard_cfg = cfg.get("dashboard", {}) if isinstance(cfg, dict) else {} + for candidate in (dashboard_cfg.get("port"), cfg.get("port") if isinstance(cfg, dict) else None): + if candidate is None: + continue + port = int(candidate) + if 1 <= port <= 65535: + return port + except Exception: + pass + + return 8080 + + +def resolve_secret_key(workspace: Path = WORKSPACE) -> str: + """Resolve the dashboard secret key from env or the local fallback file.""" + + secret_key = os.environ.get("EVONEXUS_SECRET_KEY", "").strip() + if secret_key: + return secret_key + + if is_production(): + raise RuntimeError("EVONEXUS_SECRET_KEY must be set in production") + + key_file = workspace / "dashboard" / "data" / ".secret_key" + key_file.parent.mkdir(parents=True, exist_ok=True) + if key_file.exists(): + return key_file.read_text(encoding="utf-8").strip() + + secret_key = secrets.token_hex(32) + key_file.write_text(secret_key, encoding="utf-8") + key_file.chmod(0o600) + return secret_key + + +@dataclass(frozen=True) +class DashboardRuntimeConfig: + secret_key: str + database_uri: str + database_backend: str + cors_allowed_origins: list[str] | str + dashboard_port: int + + +def load_dashboard_runtime_config(workspace: Path = WORKSPACE) -> DashboardRuntimeConfig: + db_uri = database_uri(workspace) + return DashboardRuntimeConfig( + secret_key=resolve_secret_key(workspace), + database_uri=db_uri, + database_backend=database_backend(db_uri), + cors_allowed_origins=cors_allowed_origins(), + dashboard_port=dashboard_port(workspace), + ) diff --git a/dashboard/backend/schema_migrations.py b/dashboard/backend/schema_migrations.py new file mode 100644 index 00000000..5c46a280 --- /dev/null +++ b/dashboard/backend/schema_migrations.py @@ -0,0 +1,291 @@ +from __future__ import annotations + +from sqlalchemy import inspect + + +def _table_names(connection) -> set[str]: + return set(inspect(connection).get_table_names()) + + +def _column_names(connection, table_name: str) -> set[str]: + if table_name not in _table_names(connection): + return set() + return {column["name"] for column in inspect(connection).get_columns(table_name)} + + +def _scalar(connection, sql: str, params: tuple | dict | None = None): + result = connection.exec_driver_sql(sql, params or ()) + row = result.fetchone() + return row[0] if row else None + + +def _execute(connection, sql: str, params: tuple | dict | None = None) -> None: + connection.exec_driver_sql(sql, params or ()) + + +def upgrade_app_schema(connection) -> None: + """Apply the schema drift fixes that used to live inline in app.py.""" + + tables = _table_names(connection) + dialect_name = getattr(getattr(connection, "dialect", None), "name", "sqlite") + is_postgres = dialect_name == "postgresql" + current_ts = "CURRENT_TIMESTAMP" if is_postgres else "datetime('now')" + + if "roles" in tables: + role_cols = _column_names(connection, "roles") + if "agent_access_json" not in role_cols: + _execute(connection, "ALTER TABLE roles ADD COLUMN agent_access_json TEXT DEFAULT '{\"mode\": \"all\"}'") + if "workspace_folders_json" not in role_cols: + _execute(connection, "ALTER TABLE roles ADD COLUMN workspace_folders_json TEXT DEFAULT '{\"mode\": \"all\"}'") + + # Goal cascade helpers: keep the view and trigger available on every boot. + if "goals" in tables and "goal_tasks" in tables: + _execute( + connection, + """ + CREATE VIEW IF NOT EXISTS goal_progress_v AS + SELECT g.id as goal_id, g.slug, g.target_value, + COUNT(t.id) as total_tasks, + COUNT(CASE WHEN t.status='done' THEN 1 END) as done_tasks, + CASE WHEN COUNT(t.id) > 0 + THEN CAST(COUNT(CASE WHEN t.status='done' THEN 1 END) AS REAL) / COUNT(t.id) * 100.0 + ELSE 0 END as pct_complete + FROM goals g LEFT JOIN goal_tasks t ON t.goal_id = g.id + GROUP BY g.id; + """, + ) + if is_postgres: + _execute( + connection, + """ + CREATE OR REPLACE FUNCTION trg_task_done_updates_goal_fn() + RETURNS trigger AS $$ + BEGIN + IF NEW.goal_id IS NOT NULL AND NEW.status = 'done' AND OLD.status <> 'done' THEN + UPDATE goals + SET current_value = current_value + 1, updated_at = CURRENT_TIMESTAMP + WHERE id = NEW.goal_id; + UPDATE goals + SET status = 'achieved' + WHERE id = NEW.goal_id AND current_value >= target_value AND status = 'active'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + """, + ) + _execute( + connection, + """ + CREATE TRIGGER trg_task_done_updates_goal + AFTER UPDATE OF status ON goal_tasks + FOR EACH ROW + EXECUTE FUNCTION trg_task_done_updates_goal_fn(); + """, + ) + else: + _execute( + connection, + """ + CREATE TRIGGER IF NOT EXISTS trg_task_done_updates_goal + AFTER UPDATE OF status ON goal_tasks + WHEN NEW.goal_id IS NOT NULL AND NEW.status = 'done' AND OLD.status != 'done' + BEGIN + UPDATE goals SET current_value = current_value + 1, updated_at = datetime('now') WHERE id = NEW.goal_id; + UPDATE goals SET status = 'achieved' WHERE id = NEW.goal_id AND current_value >= target_value AND status = 'active'; + END; + """, + ) + + mission_count = _scalar(connection, "SELECT COUNT(*) FROM missions") + if mission_count == 0: + _now_seed = "2026-04-14T00:00:00.000000Z" + _execute( + connection, + """ + INSERT INTO missions (slug, title, description, target_metric, target_value, current_value, due_date, status, created_at, updated_at) + VALUES ('evo-revenue-1m-q4-2026', 'Evolution Revenue $1M Q4 2026', + 'Atingir $1M de receita anual até o Q4 2026', + 'revenue_usd', 1000000, 0, '2026-12-31', 'active', ?, ?) + """, + (_now_seed, _now_seed), + ) + mission_id = _scalar(connection, "SELECT id FROM missions WHERE slug = 'evo-revenue-1m-q4-2026'") + for slug, title, description in [ + ("evo-ai", "Evo AI", "CRM + AI agents — produto principal"), + ("evo-summit", "Evolution Summit", "Evento de lançamento (14-16 Abr 2026)"), + ("evo-academy", "Evo Academy", "Plataforma de cursos"), + ]: + _execute( + connection, + """ + INSERT INTO projects (slug, mission_id, title, description, status, created_at, updated_at) + VALUES (?, ?, ?, ?, 'active', ?, ?) + """, + (slug, mission_id, title, description, _now_seed, _now_seed), + ) + + project_ids = { + "evo-ai": _scalar(connection, "SELECT id FROM projects WHERE slug = 'evo-ai'"), + "evo-summit": _scalar(connection, "SELECT id FROM projects WHERE slug = 'evo-summit'"), + "evo-academy": _scalar(connection, "SELECT id FROM projects WHERE slug = 'evo-academy'"), + } + goals_seed = [ + ("evo-ai-100-customers", project_ids["evo-ai"], "100 paying customers by Jun 30", "customers", "count", 100, "2026-06-30"), + ("evo-ai-billing-v2", project_ids["evo-ai"], "Ship billing v2", "shipped", "boolean", 1, "2026-05-31"), + ("evo-summit-200-tickets", project_ids["evo-summit"], "Sell 200 tickets", "tickets_sold", "count", 200, "2026-04-13"), + ("evo-summit-3-sponsors", project_ids["evo-summit"], "Close 3 sponsors", "sponsors", "count", 3, "2026-04-10"), + ("evo-academy-50-students", project_ids["evo-academy"], "50 beta students", "students", "count", 50, "2026-06-30"), + ] + for slug, project_id, title, target_metric, metric_type, target_value, due_date in goals_seed: + _execute( + connection, + """ + INSERT INTO goals (slug, project_id, title, target_metric, metric_type, target_value, current_value, status, created_at, updated_at, due_date) + VALUES (?, ?, ?, ?, ?, ?, 0, 'active', ?, ?, ?) + """, + (slug, project_id, title, target_metric, metric_type, target_value, _now_seed, _now_seed, due_date), + ) + + if "tickets" in tables: + ticket_cols = _column_names(connection, "tickets") + for column_name, ddl in [ + ("source_agent", "ALTER TABLE tickets ADD COLUMN source_agent TEXT"), + ("source_session_id", "ALTER TABLE tickets ADD COLUMN source_session_id TEXT"), + ("workspace_path", "ALTER TABLE tickets ADD COLUMN workspace_path TEXT"), + ("memory_md_path", "ALTER TABLE tickets ADD COLUMN memory_md_path TEXT"), + ("thread_session_id", "ALTER TABLE tickets ADD COLUMN thread_session_id TEXT"), + ("message_count", "ALTER TABLE tickets ADD COLUMN message_count INTEGER NOT NULL DEFAULT 0"), + ("last_summary_at_message", "ALTER TABLE tickets ADD COLUMN last_summary_at_message INTEGER NOT NULL DEFAULT 0"), + ]: + if column_name not in ticket_cols: + _execute(connection, ddl) + ticket_cols.add(column_name) + + if "users" in tables: + user_cols = _column_names(connection, "users") + if "totp_secret" not in user_cols: + _execute(connection, "ALTER TABLE users ADD COLUMN totp_secret TEXT") + if "totp_enabled" not in user_cols: + _execute( + connection, + "ALTER TABLE users ADD COLUMN totp_enabled BOOLEAN NOT NULL DEFAULT FALSE" if is_postgres else "ALTER TABLE users ADD COLUMN totp_enabled INTEGER NOT NULL DEFAULT 0", + ) + if "totp_last_used_step" not in user_cols: + _execute(connection, "ALTER TABLE users ADD COLUMN totp_last_used_step INTEGER") + if "totp_confirmed_at" not in user_cols: + _execute(connection, "ALTER TABLE users ADD COLUMN totp_confirmed_at TIMESTAMP" if is_postgres else "ALTER TABLE users ADD COLUMN totp_confirmed_at TEXT") + + # Knowledge tables are managed outside SQLAlchemy models, so keep them here. + if "knowledge_connections" not in tables: + _execute( + connection, + """ + CREATE TABLE IF NOT EXISTS knowledge_connections ( + id TEXT PRIMARY KEY, + slug TEXT UNIQUE NOT NULL, + name TEXT NOT NULL, + connection_string_encrypted BLOB, + host TEXT, + port INTEGER, + database_name TEXT, + username TEXT, + ssl_mode TEXT, + status TEXT DEFAULT 'disconnected', + schema_version TEXT, + pgvector_version TEXT, + postgres_version TEXT, + last_health_check TIMESTAMP, + last_error TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + """, + ) + _execute( + connection, + """ + CREATE TABLE IF NOT EXISTS knowledge_connection_events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + connection_id TEXT REFERENCES knowledge_connections(id) ON DELETE CASCADE, + event_type TEXT, + details TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + """, + ) + _execute(connection, "CREATE INDEX IF NOT EXISTS idx_kconn_status ON knowledge_connections(status)") + _execute( + connection, + "CREATE INDEX IF NOT EXISTS idx_kconn_events_conn ON knowledge_connection_events(connection_id, created_at)", + ) + + if "knowledge_api_keys" not in tables: + _execute( + connection, + """ + CREATE TABLE IF NOT EXISTS knowledge_api_keys ( + id TEXT PRIMARY KEY, + name TEXT, + prefix TEXT NOT NULL, + token_hash TEXT NOT NULL, + connection_id TEXT NOT NULL, + space_ids TEXT NOT NULL DEFAULT '[]', + scopes TEXT NOT NULL DEFAULT '["read"]', + rate_limit_per_min INTEGER NOT NULL DEFAULT 60, + rate_limit_per_day INTEGER NOT NULL DEFAULT 10000, + created_at TEXT NOT NULL, + last_used_at TEXT, + expires_at TEXT + ); + """, + ) + _execute(connection, "CREATE INDEX IF NOT EXISTS idx_kak_prefix ON knowledge_api_keys(prefix)") + + # Fix corrupted datetime columns that can crash SQLAlchemy on load. + for table_name, column_name in [("roles", "created_at"), ("users", "created_at"), ("users", "last_login")]: + try: + if column_name in _column_names(connection, table_name): + _execute( + connection, + f"UPDATE {table_name} SET {column_name} = {current_ts} WHERE {column_name} IS NOT NULL AND typeof({column_name}) != 'text'" if not is_postgres else f"UPDATE {table_name} SET {column_name} = CURRENT_TIMESTAMP WHERE {column_name} IS NOT NULL", + ) + _execute( + connection, + f"UPDATE {table_name} SET {column_name} = {current_ts} WHERE {column_name} IS NOT NULL AND {column_name} != '' AND {column_name} NOT LIKE '____-__-__%'" if not is_postgres else f"UPDATE {table_name} SET {column_name} = CURRENT_TIMESTAMP WHERE {column_name} IS NOT NULL", + ) + except Exception: + pass + + +def downgrade_app_schema(connection) -> None: + """Drop the schema additions managed by upgrade_app_schema.""" + + dialect_name = getattr(getattr(connection, "dialect", None), "name", "sqlite") + is_postgres = dialect_name == "postgresql" + + if is_postgres: + for sql in [ + "DROP TRIGGER IF EXISTS trg_task_done_updates_goal ON goal_tasks", + "DROP FUNCTION IF EXISTS trg_task_done_updates_goal_fn()", + "DROP VIEW IF EXISTS goal_progress_v", + "DROP TABLE IF EXISTS knowledge_api_keys", + "DROP TABLE IF EXISTS knowledge_connection_events", + "DROP TABLE IF EXISTS knowledge_connections", + ]: + try: + _execute(connection, sql) + except Exception: + pass + return + + for sql in [ + "DROP TRIGGER IF EXISTS trg_task_done_updates_goal", + "DROP VIEW IF EXISTS goal_progress_v", + "DROP TABLE IF EXISTS knowledge_api_keys", + "DROP TABLE IF EXISTS knowledge_connection_events", + "DROP TABLE IF EXISTS knowledge_connections", + ]: + try: + _execute(connection, sql) + except Exception: + pass diff --git a/dashboard/backend/session_security.py b/dashboard/backend/session_security.py new file mode 100644 index 00000000..3205f133 --- /dev/null +++ b/dashboard/backend/session_security.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +import os +import secrets +from datetime import datetime, timezone + +from flask import abort, request, session + + +CSRF_SESSION_KEY = "_evonexus_csrf_token" +CSRF_ISSUED_AT_KEY = "_evonexus_csrf_issued_at" +CSRF_HEADER_NAME = "X-CSRF-Token" +XHR_HEADER_NAME = "X-Requested-With" +XHR_HEADER_VALUE = "XMLHttpRequest" + + +def _rotation_minutes() -> int: + raw = os.environ.get("EVONEXUS_SESSION_KEY_ROTATION_MINUTES", "").strip() + try: + value = int(raw) + except (TypeError, ValueError): + value = 24 * 60 + return value if value > 0 else 24 * 60 + + +def _parse_issued_at(raw: object) -> datetime | None: + if not raw: + return None + if isinstance(raw, datetime): + return raw.astimezone(timezone.utc) if raw.tzinfo else raw.replace(tzinfo=timezone.utc) + text = str(raw).strip() + if not text: + return None + try: + parsed = datetime.fromisoformat(text.replace("Z", "+00:00")) + return parsed.astimezone(timezone.utc) if parsed.tzinfo else parsed.replace(tzinfo=timezone.utc) + except ValueError: + return None + + +def issue_session_token(force: bool = False) -> str: + """Return the current per-session CSRF token, rotating it when needed.""" + + token = session.get(CSRF_SESSION_KEY) + issued_at = _parse_issued_at(session.get(CSRF_ISSUED_AT_KEY)) + age_minutes = None + if issued_at is not None: + age_minutes = (datetime.now(timezone.utc) - issued_at).total_seconds() / 60.0 + + if force or not token or age_minutes is None or age_minutes >= _rotation_minutes(): + token = secrets.token_urlsafe(32) + session[CSRF_SESSION_KEY] = token + session[CSRF_ISSUED_AT_KEY] = datetime.now(timezone.utc).isoformat() + + return str(token) + + +def current_session_token() -> str | None: + token = session.get(CSRF_SESSION_KEY) + return str(token) if token else None + + +def attach_session_token(response): + token = issue_session_token(force=False) + response.headers[CSRF_HEADER_NAME] = token + expose = response.headers.get("Access-Control-Expose-Headers", "") + exposed = [item.strip() for item in expose.split(",") if item.strip()] + if CSRF_HEADER_NAME not in exposed: + exposed.append(CSRF_HEADER_NAME) + response.headers["Access-Control-Expose-Headers"] = ", ".join(exposed) + return response + + +def require_csrf_token(req=request) -> None: + expected = issue_session_token(force=False) + provided = req.headers.get(CSRF_HEADER_NAME, "").strip() + if not provided or provided != expected: + abort(403, description="CSRF check failed: invalid or missing CSRF token.") + + +def force_rotate_session_token() -> str: + return issue_session_token(force=True) diff --git a/dashboard/backend/structured_logging.py b/dashboard/backend/structured_logging.py new file mode 100644 index 00000000..d7bac813 --- /dev/null +++ b/dashboard/backend/structured_logging.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +import json +import sys +import time +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from flask import g, request + + +def _json_default(value: Any): + if isinstance(value, Path): + return str(value) + if hasattr(value, "isoformat"): + try: + return value.isoformat() + except Exception: + pass + return str(value) + + +def emit_json_log(level: str, event: str, **fields: Any) -> None: + payload = { + "ts": datetime.now(timezone.utc).isoformat(), + "level": level, + "event": event, + **fields, + } + stream = sys.stderr if level.lower() in {"warning", "error", "critical"} else sys.stdout + print(json.dumps(payload, ensure_ascii=False, default=_json_default), file=stream, flush=True) + + +def install_request_logging(app, service: str = "dashboard") -> None: + """Emit one structured JSON record for every API/WebSocket request.""" + + @app.before_request + def _start_request_timer(): + if request.path.startswith("/api/") or request.path.startswith("/ws/"): + g._structured_started_at = time.perf_counter() + + @app.after_request + def _log_request(response): + if request.path.startswith("/api/") or request.path.startswith("/ws/"): + started_at = getattr(g, "_structured_started_at", None) + duration_ms = None + if started_at is not None: + duration_ms = round((time.perf_counter() - started_at) * 1000, 1) + emit_json_log( + "info", + "http_request", + service=service, + method=request.method, + path=request.path, + status=response.status_code, + duration_ms=duration_ms, + remote_addr=request.headers.get("X-Forwarded-For", request.remote_addr), + ) + return response diff --git a/dashboard/backend/totp_security.py b/dashboard/backend/totp_security.py new file mode 100644 index 00000000..4e61aee1 --- /dev/null +++ b/dashboard/backend/totp_security.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import base64 +import hashlib +import hmac +import re +import secrets +import struct +import time +import urllib.parse + + +TOTP_ISSUER = "EvoNexus" + + +def generate_totp_secret() -> str: + return base64.b32encode(secrets.token_bytes(20)).decode("ascii").rstrip("=") + + +def provisioning_uri(secret: str, account_name: str, issuer: str = TOTP_ISSUER) -> str: + label = urllib.parse.quote(f"{issuer}:{account_name}") + params = urllib.parse.urlencode({ + "secret": secret, + "issuer": issuer, + "algorithm": "SHA1", + "digits": 6, + "period": 30, + }) + return f"otpauth://totp/{label}?{params}" + + +def normalize_totp_code(code: str | None) -> str: + return re.sub(r"\s+", "", str(code or "")).strip() + + +def _decode_secret(secret: str) -> bytes: + normalized = re.sub(r"\s+", "", secret or "").strip().upper() + padding = "=" * ((8 - len(normalized) % 8) % 8) + return base64.b32decode(normalized + padding, casefold=True) + + +def _hotp(secret: str, counter: int, digits: int = 6) -> str: + key = _decode_secret(secret) + msg = struct.pack(">Q", counter) + digest = hmac.new(key, msg, hashlib.sha1).digest() + offset = digest[-1] & 0x0F + binary = struct.unpack(">I", digest[offset:offset + 4])[0] & 0x7FFFFFFF + return str(binary % (10 ** digits)).zfill(digits) + + +def generate_totp_code(secret: str, *, interval: int = 30, digits: int = 6, for_time: float | None = None) -> str: + now = for_time if for_time is not None else time.time() + counter = int(now // interval) + return _hotp(secret, counter, digits=digits) + + +def verify_totp_code( + secret: str, + code: str | None, + *, + interval: int = 30, + digits: int = 6, + window: int = 1, + last_used_step: int | None = None, + for_time: float | None = None, +) -> dict[str, object]: + normalized = normalize_totp_code(code) + if not normalized.isdigit() or len(normalized) != digits: + return {"valid": False, "step": None} + + now = for_time if for_time is not None else time.time() + current_step = int(now // interval) + for step in range(current_step - window, current_step + window + 1): + if last_used_step is not None and step <= last_used_step: + continue + if _hotp(secret, step, digits=digits) == normalized: + return {"valid": True, "step": step} + return {"valid": False, "step": None} diff --git a/dashboard/frontend/index.html b/dashboard/frontend/index.html index 9f57cf68..aca0738d 100644 --- a/dashboard/frontend/index.html +++ b/dashboard/frontend/index.html @@ -9,12 +9,13 @@ + content="connect-src 'self' ws: wss:" /> Evo Workspace diff --git a/dashboard/frontend/playwright.config.ts b/dashboard/frontend/playwright.config.ts new file mode 100644 index 00000000..bcc05795 --- /dev/null +++ b/dashboard/frontend/playwright.config.ts @@ -0,0 +1,26 @@ +import { defineConfig } from '@playwright/test' +import path from 'node:path' +import { fileURLToPath } from 'node:url' + +const frontendRoot = path.dirname(fileURLToPath(import.meta.url)) + +export default defineConfig({ + testDir: './tests/e2e', + timeout: 60_000, + expect: { timeout: 10_000 }, + use: { + baseURL: 'http://127.0.0.1:8080', + trace: 'on-first-retry', + }, + webServer: { + command: 'node scripts/start-e2e-server.mjs', + cwd: frontendRoot, + url: 'http://127.0.0.1:8080', + timeout: 180_000, + reuseExistingServer: false, + env: { + ...process.env, + EVONEXUS_PORT: '8080', + }, + }, +}) diff --git a/dashboard/frontend/public/manifest.webmanifest b/dashboard/frontend/public/manifest.webmanifest new file mode 100644 index 00000000..f4d40ccd --- /dev/null +++ b/dashboard/frontend/public/manifest.webmanifest @@ -0,0 +1,29 @@ +{ + "name": "Evo Workspace", + "short_name": "EvoNexus", + "start_url": "/", + "scope": "/", + "display": "standalone", + "background_color": "#0C111D", + "theme_color": "#0C111D", + "description": "EvoNexus dashboard and agent workspace.", + "icons": [ + { + "src": "/favicon.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/favicon.png", + "sizes": "512x512", + "type": "image/png" + }, + { + "src": "/EVO_NEXUS.webp", + "sizes": "512x512", + "type": "image/webp", + "purpose": "any" + } + ] +} + diff --git a/dashboard/frontend/public/sw.js b/dashboard/frontend/public/sw.js new file mode 100644 index 00000000..0ea13939 --- /dev/null +++ b/dashboard/frontend/public/sw.js @@ -0,0 +1,65 @@ +const CACHE_NAME = 'evonexus-shell-v1' +const PRECACHE_URLS = [ + '/', + '/index.html', + '/favicon.png', + '/EVO_NEXUS.webp', + '/manifest.webmanifest', +] + +self.addEventListener('install', (event) => { + event.waitUntil( + caches.open(CACHE_NAME).then((cache) => cache.addAll(PRECACHE_URLS)).then(() => self.skipWaiting()), + ) +}) + +self.addEventListener('activate', (event) => { + event.waitUntil( + caches.keys().then((keys) => + Promise.all(keys.map((key) => (key === CACHE_NAME ? null : caches.delete(key)))).then(() => self.clients.claim()), + ), + ) +}) + +async function cacheFirst(request) { + const cache = await caches.open(CACHE_NAME) + const cached = await cache.match(request) + if (cached) return cached + + const response = await fetch(request) + if (response && response.ok) { + cache.put(request, response.clone()) + } + return response +} + +async function networkFirstNavigation(request) { + const cache = await caches.open(CACHE_NAME) + try { + const response = await fetch(request) + if (response && response.ok) { + cache.put('/index.html', response.clone()) + } + return response + } catch { + const cachedIndex = await cache.match('/index.html') + return cachedIndex || cache.match('/') + } +} + +self.addEventListener('fetch', (event) => { + const request = event.request + if (request.method !== 'GET') return + + const url = new URL(request.url) + if (url.origin !== self.location.origin) return + if (url.pathname.startsWith('/api/') || url.pathname.startsWith('/terminal/')) return + + if (request.mode === 'navigate') { + event.respondWith(networkFirstNavigation(request)) + return + } + + event.respondWith(cacheFirst(request)) +}) + diff --git a/dashboard/frontend/scripts/start-e2e-server.mjs b/dashboard/frontend/scripts/start-e2e-server.mjs new file mode 100644 index 00000000..2f4a4dfc --- /dev/null +++ b/dashboard/frontend/scripts/start-e2e-server.mjs @@ -0,0 +1,48 @@ +import { execFileSync, spawn } from 'node:child_process' +import fs from 'node:fs' +import os from 'node:os' +import path from 'node:path' +import { fileURLToPath } from 'node:url' + +const scriptDir = path.dirname(fileURLToPath(import.meta.url)) +const frontendRoot = path.resolve(scriptDir, '..') +const repoRoot = path.resolve(frontendRoot, '..', '..') + +function sqliteUrl(filePath) { + return `sqlite:///${path.resolve(filePath).replace(/\\/g, '/')}` +} + +const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'evonexus-e2e-')) +const databasePath = path.join(tempDir, 'dashboard.db') + +execFileSync('npm', ['run', 'build'], { + cwd: frontendRoot, + stdio: 'inherit', + env: process.env, + shell: true, +}) + +const child = spawn(process.platform === 'win32' ? 'python' : 'python3', ['dashboard/backend/app.py'], { + cwd: repoRoot, + stdio: 'inherit', + env: { + ...process.env, + SQLALCHEMY_DATABASE_URI: sqliteUrl(databasePath), + EVONEXUS_SECRET_KEY: process.env.EVONEXUS_SECRET_KEY || 'e2e-secret-key', + EVONEXUS_PORT: process.env.EVONEXUS_PORT || '8080', + CORS_ALLOWED_ORIGINS: 'http://127.0.0.1:8080', + EVONEXUS_ENV: 'development', + }, +}) + +const shutdown = () => { + if (!child.killed) { + child.kill('SIGTERM') + } +} + +process.on('SIGINT', shutdown) +process.on('SIGTERM', shutdown) +child.on('exit', (code, signal) => { + process.exit(code ?? (signal ? 1 : 0)) +}) diff --git a/dashboard/frontend/src/App.tsx b/dashboard/frontend/src/App.tsx index d6f411a2..58206445 100644 --- a/dashboard/frontend/src/App.tsx +++ b/dashboard/frontend/src/App.tsx @@ -57,6 +57,7 @@ const KnowledgeUpload = lazyDefault(() => import('./pages/Knowledge/Upload')) const KnowledgeBrowse = lazyDefault(() => import('./pages/Knowledge/Browse')) const KnowledgeSearch = lazyDefault(() => import('./pages/Knowledge/Search')) const KnowledgeApiKeys = lazyDefault(() => import('./pages/Knowledge/ApiKeys')) +const AgentKnowledge = lazyDefault(() => import('./pages/AgentKnowledge')) function FullPageRoute({ locationKey, @@ -256,6 +257,7 @@ function AppContent() { {hasPermission('heartbeats', 'view') && } />} } /> } /> + {hasPermission('knowledge', 'view') && } />} } /> {hasPermission('config', 'view') && } />} {hasPermission('config', 'view') && } />} diff --git a/dashboard/frontend/src/components/AgentChat.tsx b/dashboard/frontend/src/components/AgentChat.tsx index b60ff664..e4d9442d 100644 --- a/dashboard/frontend/src/components/AgentChat.tsx +++ b/dashboard/frontend/src/components/AgentChat.tsx @@ -138,8 +138,17 @@ export default function AgentChat({ agent, sessionId, accentColor = '#00FFA7', e setErrorMsg(null) let cancelled = false let ws: WebSocket | null = null + let reconnectTimer: ReturnType | null = null + + const scheduleReconnect = () => { + if (cancelled || reconnectTimer) return + reconnectTimer = setTimeout(() => { + reconnectTimer = null + if (!cancelled) connectSocket() + }, 1000) + } - ;(async () => { + async function connectSocket() { // 1) HTTP preflight — fails fast on ECONNREFUSED so we can show a real error // instead of hanging in 'connecting' forever (same pattern as AgentTerminal). try { @@ -156,14 +165,21 @@ export default function AgentChat({ agent, sessionId, accentColor = '#00FFA7', e // 2) Open WS ws = new WebSocket(`${TS_WS}/ws`) wsRef.current = ws + let opened = false + + const isCurrentSocket = () => !cancelled && wsRef.current === ws ws.onopen = () => { + if (!isCurrentSocket()) return + opened = true + setErrorMsg(null) ws!.send(JSON.stringify({ type: 'join_session', sessionId })) setStatus('idle') } ws.onmessage = (ev) => { - if (cancelled) return + if (!isCurrentSocket()) return + setErrorMsg(null) let msg: any try { msg = JSON.parse(ev.data) } catch { return } @@ -273,13 +289,25 @@ export default function AgentChat({ agent, sessionId, accentColor = '#00FFA7', e } ws.onerror = () => { - if (cancelled) return - setStatus('error') - setErrorMsg('WebSocket error') + if (!isCurrentSocket()) return + if (!opened) { + setStatus('error') + setErrorMsg(`Could not open WebSocket at ${TS_WS}/ws`) + } } ws.onclose = () => { if (pingRef.current) { clearInterval(pingRef.current); pingRef.current = null } + if (wsRef.current === ws) { + wsRef.current = null + } else { + return + } + if (!cancelled) { + setStatus('connecting') + setErrorMsg(opened ? 'WebSocket disconnected. Reconnecting...' : `Could not open WebSocket at ${TS_WS}/ws`) + scheduleReconnect() + } } pingRef.current = setInterval(() => { @@ -287,10 +315,13 @@ export default function AgentChat({ agent, sessionId, accentColor = '#00FFA7', e ws!.send(JSON.stringify({ type: 'ping' })) } }, 25000) - })() + } + + connectSocket() return () => { cancelled = true + if (reconnectTimer) clearTimeout(reconnectTimer) if (pingRef.current) { clearInterval(pingRef.current); pingRef.current = null } try { ws?.close() } catch {} wsRef.current = null diff --git a/dashboard/frontend/src/components/AgentTerminal.tsx b/dashboard/frontend/src/components/AgentTerminal.tsx index ef438a0a..352b2ec8 100644 --- a/dashboard/frontend/src/components/AgentTerminal.tsx +++ b/dashboard/frontend/src/components/AgentTerminal.tsx @@ -3,6 +3,7 @@ import { Terminal } from '@xterm/xterm' import { FitAddon } from '@xterm/addon-fit' import { WebLinksAddon } from '@xterm/addon-web-links' import '@xterm/xterm/css/xterm.css' +import { TS_HTTP, TS_WS } from '../lib/terminal-url' interface AgentTerminalProps { agent: string @@ -11,65 +12,8 @@ interface AgentTerminalProps { accentColor?: string } -// Terminal connection URL resolution. -// -// We always go through the dashboard's /terminal proxy in production builds. -// Direct cross-port fetches (e.g. localhost:32352 from a page served at -// localhost:8080) are blocked by the dashboard's `connect-src 'self'` CSP -// directive even when the network path would work. The proxy gives us: -// 1. Same-origin requests pass CSP `'self'`. -// 2. No CORS preflight (same origin). -// 3. Works through SSH tunnels, Tailscale Funnel, or any reverse proxy -// that only exposes the dashboard port. -// -// Escape hatch for cases where the proxy can't be used (e.g. a static -// dashboard build hosted somewhere unrelated to the terminal-server): set -// VITE_TERMINAL_URL at build time to force a specific base URL. When set, -// it overrides the proxy. Trailing slash is stripped so both -// `https://x.y/terminal` and `https://x.y/terminal/` work. -// -// In Vite's `npm run dev` mode (port 5173, no proxy mounted) we fall back -// to a direct connection to terminal-server. That path is local-only by -// definition. -const rawOverride = (import.meta.env.VITE_TERMINAL_URL as string | undefined)?.trim() -const terminalOverride = rawOverride ? rawOverride.replace(/\/+$/, '') : null - -const hostname = window.location.hostname -const isViteDev = import.meta.env.DEV - -// Resolve an override URL into the (httpBase, wsBase) pair the rest of the -// component expects. Accepts either http(s):// or ws(s):// — both schemes -// are mapped to their counterpart so users can paste whichever they have -// on hand. Invalid input falls back to the heuristic. -function resolveOverride(raw: string): { http: string; ws: string } | null { - try { - const u = new URL(raw) - const isSecure = u.protocol === 'https:' || u.protocol === 'wss:' - const httpProto = isSecure ? 'https:' : 'http:' - const wsProto = isSecure ? 'wss:' : 'ws:' - const path = u.pathname.replace(/\/+$/, '') + u.search - return { - http: `${httpProto}//${u.host}${path}`, - ws: `${wsProto}//${u.host}${path}`, - } - } catch { - return null - } -} - -const override = terminalOverride ? resolveOverride(terminalOverride) : null - -const CC_WEB_HTTP = override - ? override.http - : isViteDev - ? `http://${hostname}:32352` - : `${window.location.origin}/terminal` - -const CC_WEB_WS = override - ? override.ws - : isViteDev - ? `ws://${hostname}:32352` - : `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}/terminal` +const CC_WEB_HTTP = TS_HTTP +const CC_WEB_WS = TS_WS type Status = 'connecting' | 'ready' | 'starting' | 'running' | 'error' | 'exited' @@ -80,6 +24,8 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor const wsRef = useRef(null) const sessionIdRef = useRef(null) const pingRef = useRef | null>(null) + const reconnectTimerRef = useRef | null>(null) + const processEndedRef = useRef(false) const [status, setStatus] = useState('connecting') const [errorMsg, setErrorMsg] = useState(null) @@ -184,10 +130,29 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor const term = termRef.current if (!term) return - async function run() { + function clearReconnectTimer() { + if (reconnectTimerRef.current) { + clearTimeout(reconnectTimerRef.current) + reconnectTimerRef.current = null + } + } + + function scheduleReconnect() { + if (cancelled || processEndedRef.current || reconnectTimerRef.current) return + reconnectTimerRef.current = setTimeout(() => { + reconnectTimerRef.current = null + if (!cancelled && !processEndedRef.current) { + run(false) + } + }, 1000) + } + + async function run(clearTerminal = true) { + clearReconnectTimer() + processEndedRef.current = false setStatus('connecting') setErrorMsg(null) - term!.clear() + if (clearTerminal) term!.clear() // 1) Use provided sessionId or find-or-create for this agent let sessionId: string @@ -226,18 +191,25 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor // 2) Open WS const ws = new WebSocket(`${CC_WEB_WS}/ws`) wsRef.current = ws + let opened = false + + const isCurrentSocket = () => !cancelled && wsRef.current === ws ws.onopen = () => { + if (!isCurrentSocket()) return + opened = true + setErrorMsg(null) ws.send(JSON.stringify({ type: 'join_session', sessionId })) } ws.onmessage = (ev) => { - if (cancelled) return + if (!isCurrentSocket()) return let msg: any try { msg = JSON.parse(ev.data) } catch { return } switch (msg.type) { case 'session_joined': { + setErrorMsg(null) // Replay any buffered output if (Array.isArray(msg.outputBuffer)) { msg.outputBuffer.forEach((chunk: string) => term!.write(chunk)) @@ -276,9 +248,11 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor break } case 'output': + setErrorMsg(null) term!.write(msg.data) break case 'claude_started': + setErrorMsg(null) setStatus('running') // resize after start { @@ -290,6 +264,7 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor } break case 'exit': + processEndedRef.current = true setStatus('exited') term!.write(`\r\n\x1b[33m[Process exited${msg.code != null ? ` with code ${msg.code}` : ''}]\x1b[0m\r\n`) break @@ -304,9 +279,11 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor } ws.onerror = () => { - if (cancelled) return - setStatus('error') - setErrorMsg('WebSocket error') + if (!isCurrentSocket()) return + if (!opened) { + setStatus('error') + setErrorMsg(`Could not open WebSocket at ${CC_WEB_WS}/ws`) + } } ws.onclose = () => { @@ -314,6 +291,16 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor clearInterval(pingRef.current) pingRef.current = null } + if (wsRef.current === ws) { + wsRef.current = null + } else { + return + } + if (!cancelled && !processEndedRef.current) { + setStatus('connecting') + setErrorMsg(opened ? 'WebSocket disconnected. Reconnecting...' : `Could not open WebSocket at ${CC_WEB_WS}/ws`) + scheduleReconnect() + } } // Keepalive @@ -328,6 +315,7 @@ export default function AgentTerminal({ agent, sessionId: externalSessionId, wor return () => { cancelled = true + clearReconnectTimer() if (pingRef.current) { clearInterval(pingRef.current) pingRef.current = null diff --git a/dashboard/frontend/src/components/CommandPalette.tsx b/dashboard/frontend/src/components/CommandPalette.tsx new file mode 100644 index 00000000..35187155 --- /dev/null +++ b/dashboard/frontend/src/components/CommandPalette.tsx @@ -0,0 +1,341 @@ +import { + createContext, + useCallback, + useContext, + useEffect, + useMemo, + useRef, + useState, + type KeyboardEvent as ReactKeyboardEvent, + type ReactNode, +} from 'react' +import { useLocation, useNavigate, type NavigateFunction } from 'react-router-dom' +import { useTranslation } from 'react-i18next' +import { Moon, RefreshCw, Search, Sun, X, type LucideIcon } from 'lucide-react' +import { useAuth } from '../context/AuthContext' +import { useTheme } from '../context/ThemeContext' +import { DOCS_NAV_ITEM, getVisibleNavGroups, NAV_GROUPS, type NavItem } from '../lib/navigation' + +interface CommandPaletteContextValue { + isOpen: boolean + openCommandPalette: () => void + closeCommandPalette: () => void + toggleCommandPalette: () => void +} + +interface PaletteCommand { + id: string + label: string + description: string + group: string + icon: LucideIcon + action: () => void + keywords: string[] +} + +const CommandPaletteContext = createContext(null) + +export function useCommandPalette() { + const ctx = useContext(CommandPaletteContext) + if (!ctx) { + throw new Error('useCommandPalette must be used within ') + } + return ctx +} + +function commandLabel(item: NavItem, t: (key: string) => string) { + return t(`nav.${item.labelKey}`) +} + +function buildRouteCommands( + hasPermission: (resource: string, action: string) => boolean, + t: (key: string) => string, + navigate: NavigateFunction, + close: () => void, +): PaletteCommand[] { + const visibleGroups = getVisibleNavGroups(hasPermission) + const byKey = new Map(visibleGroups.map((group) => [group.key, group])) + const commands: PaletteCommand[] = [] + + for (const group of NAV_GROUPS) { + const visible = byKey.get(group.key) + if (!visible) continue + + for (const item of visible.items) { + commands.push({ + id: `route:${item.to}`, + label: commandLabel(item, t), + description: item.to === '/' ? 'Go to the main dashboard' : item.to, + group: t(`nav.groups.${group.key}`), + icon: item.icon, + keywords: [item.to, group.key, commandLabel(item, t), t(`nav.groups.${group.key}`)], + action: () => { + close() + navigate(item.to) + }, + }) + } + } + + commands.push({ + id: `route:${DOCS_NAV_ITEM.to}`, + label: t('nav.docs'), + description: 'Open the public documentation view', + group: 'Public', + icon: DOCS_NAV_ITEM.icon, + keywords: [DOCS_NAV_ITEM.to, 'docs', 'documentation'], + action: () => { + close() + navigate(DOCS_NAV_ITEM.to) + }, + }) + + return commands +} + +export function CommandPaletteProvider({ children }: { children: ReactNode }) { + const [isOpen, setIsOpen] = useState(false) + + const openCommandPalette = useCallback(() => setIsOpen(true), []) + const closeCommandPalette = useCallback(() => setIsOpen(false), []) + const toggleCommandPalette = useCallback(() => setIsOpen((prev) => !prev), []) + + useEffect(() => { + const onKeyDown = (event: KeyboardEvent) => { + const key = event.key.toLowerCase() + if ((event.metaKey || event.ctrlKey) && key === 'k') { + event.preventDefault() + setIsOpen(true) + } + if (event.key === 'Escape') { + setIsOpen(false) + } + } + + window.addEventListener('keydown', onKeyDown) + return () => window.removeEventListener('keydown', onKeyDown) + }, []) + + return ( + + {children} + + + ) +} + +function CommandPaletteDialog() { + const { isOpen, closeCommandPalette } = useCommandPalette() + const { theme, toggleTheme } = useTheme() + const { hasPermission } = useAuth() + const { t } = useTranslation() + const navigate = useNavigate() + const location = useLocation() + const inputRef = useRef(null) + const [query, setQuery] = useState('') + const [selectedIndex, setSelectedIndex] = useState(0) + + useEffect(() => { + if (!isOpen) { + setQuery('') + setSelectedIndex(0) + return + } + + const prevOverflow = document.body.style.overflow + document.body.style.overflow = 'hidden' + + const timer = window.setTimeout(() => { + inputRef.current?.focus() + inputRef.current?.select() + }, 0) + + return () => { + window.clearTimeout(timer) + document.body.style.overflow = prevOverflow + } + }, [isOpen]) + + const commands = useMemo(() => { + const routeCommands = buildRouteCommands(hasPermission, t, navigate, closeCommandPalette) + const actionCommands: PaletteCommand[] = [ + { + id: 'action:theme', + label: theme === 'dark' ? 'Switch to light theme' : 'Switch to dark theme', + description: 'Toggle the dashboard appearance', + group: 'Shell', + icon: theme === 'dark' ? Sun : Moon, + keywords: ['theme', 'dark', 'light', 'appearance'], + action: () => { + toggleTheme() + closeCommandPalette() + }, + }, + { + id: 'action:reload', + label: 'Reload app', + description: 'Refresh the current page', + group: 'Shell', + icon: RefreshCw, + keywords: ['reload', 'refresh', 'restart'], + action: () => window.location.reload(), + }, + ] + + return [...actionCommands, ...routeCommands] + }, [closeCommandPalette, hasPermission, navigate, t, theme, toggleTheme]) + + const filteredCommands = useMemo(() => { + const term = query.trim().toLowerCase() + if (!term) return commands + return commands.filter((command) => { + const haystack = [command.label, command.description, command.group, ...command.keywords].join(' ').toLowerCase() + return haystack.includes(term) + }) + }, [commands, query]) + + useEffect(() => { + setSelectedIndex((current) => { + if (filteredCommands.length === 0) return 0 + return Math.min(current, filteredCommands.length - 1) + }) + }, [filteredCommands.length]) + + if (!isOpen) return null + + const grouped = filteredCommands.reduce>((acc, command) => { + const bucket = acc[command.group] ?? [] + bucket.push(command) + acc[command.group] = bucket + return acc + }, {}) + + const groupOrder = Object.keys(grouped) + const flatCommands = groupOrder.flatMap((group) => grouped[group]) + + const runSelected = () => { + const command = flatCommands[selectedIndex] + if (!command) return + command.action() + } + + const handleKeyDown = (event: ReactKeyboardEvent) => { + if (event.key === 'ArrowDown') { + event.preventDefault() + setSelectedIndex((current) => (flatCommands.length === 0 ? 0 : (current + 1) % flatCommands.length)) + return + } + + if (event.key === 'ArrowUp') { + event.preventDefault() + setSelectedIndex((current) => (flatCommands.length === 0 ? 0 : (current - 1 + flatCommands.length) % flatCommands.length)) + return + } + + if (event.key === 'Enter') { + event.preventDefault() + runSelected() + return + } + + if (event.key === 'Escape') { + event.preventDefault() + closeCommandPalette() + } + } + + return ( +
+ +
+ +
+ {flatCommands.length === 0 ? ( +
+ No matches found +
+ ) : ( + groupOrder.map((group) => ( +
+
+ {group} +
+
+ {grouped[group].map((command) => { + const index = flatCommands.findIndex((item) => item.id === command.id) + const active = index === selectedIndex + + return ( + + ) + })} +
+
+ )) + )} +
+ + + ) +} + diff --git a/dashboard/frontend/src/components/PageStates.tsx b/dashboard/frontend/src/components/PageStates.tsx index 2fd856f0..9dba449a 100644 --- a/dashboard/frontend/src/components/PageStates.tsx +++ b/dashboard/frontend/src/components/PageStates.tsx @@ -12,6 +12,23 @@ export function FullPageLoader({ label = 'Loading...' }: { label?: string }) { ) } +export function PageSkeleton({ rows = 3, cards = 0 }: { rows?: number; cards?: number }) { + return ( +
+ {cards > 0 && ( +
+ {Array.from({ length: cards }).map((_, i) => ( +
+ ))} +
+ )} + {Array.from({ length: rows }).map((_, i) => ( +
+ ))} +
+ ) +} + export function SectionLoader({ label = 'Loading section...' }: { label?: string }) { return (
diff --git a/dashboard/frontend/src/components/PluginInstallModal.tsx b/dashboard/frontend/src/components/PluginInstallModal.tsx index 834e3708..ccc32703 100644 --- a/dashboard/frontend/src/components/PluginInstallModal.tsx +++ b/dashboard/frontend/src/components/PluginInstallModal.tsx @@ -7,7 +7,10 @@ import SecurityScanSection, { type ScanVerdict, type ScanResult } from './Securi interface PreviewResult { manifest: Record warnings: string[] - conflicts?: Record + // Backend returns conflicts as a list of human-readable strings (not a dict). + // See plugin_loader.PluginInstaller.preview() — each blocker is a string + // appended to result["conflicts"]. + conflicts?: string[] } interface Props { @@ -140,7 +143,9 @@ export default function PluginInstallModal({ onClose, onInstalled }: Props) { const manifest = preview?.manifest ?? {} const warnings = preview?.warnings ?? [] - const conflicts = preview?.conflicts ? Object.keys(preview.conflicts) : [] + const conflicts: string[] = Array.isArray(preview?.conflicts) + ? (preview!.conflicts as string[]).filter((c): c is string => typeof c === 'string' && c.length > 0) + : [] // Install button is amber for WARN, normal green otherwise const installBtnClass = @@ -338,7 +343,11 @@ export default function PluginInstallModal({ onClose, onInstalled }: Props) {

{t('plugins.conflicts')}

-

{conflicts.join(', ')}

+
    + {conflicts.map((c, i) => ( +
  • {c}
  • + ))} +
)} diff --git a/dashboard/frontend/src/components/PluginUninstall.tsx b/dashboard/frontend/src/components/PluginUninstall.tsx new file mode 100644 index 00000000..181a1f61 --- /dev/null +++ b/dashboard/frontend/src/components/PluginUninstall.tsx @@ -0,0 +1,320 @@ +/** + * PluginUninstall — B3 safe_uninstall 3-step wizard. + * + * Shown instead of window.confirm() when the plugin manifest declares + * safe_uninstall.enabled: true. + * + * Step 1 — Regulatory reason + "I accept responsibility" checkbox. + * Step 2 — ZIP password input (Vault B3.S5: AES-256 export encryption). + * Step 3 — Typed confirmation phrase + Uninstall button. + * + * For plugins without safe_uninstall (or safe_uninstall.enabled: false), + * render nothing — the caller falls back to the legacy window.confirm() path. + * + * Force-uninstall banner: if EVONEXUS_ALLOW_FORCE_UNINSTALL=1 is detected + * in the API response, a persistent orange alert is shown. + */ + +import { useState } from 'react' +import { AlertTriangle, Lock, Shield, Trash2, X } from 'lucide-react' +import { api } from '../lib/api' + +export interface SafeUninstallSpec { + enabled?: boolean + block_uninstall?: boolean + reason?: string + user_confirmation?: { + checkbox_label?: string + typed_phrase?: string + } + pre_uninstall_hook?: { + script?: string + output_dir?: string + timeout_seconds?: number + must_produce_file?: boolean + } + preserved_tables?: string[] +} + +interface Props { + slug: string + safeUninstall: SafeUninstallSpec + forceUninstallActive?: boolean + onClose: () => void + onUninstalled: () => void +} + +type Step = 1 | 2 | 3 + +export default function PluginUninstall({ + slug, + safeUninstall, + forceUninstallActive = false, + onClose, + onUninstalled, +}: Props) { + const [step, setStep] = useState(1) + const [checkboxChecked, setCheckboxChecked] = useState(false) + const [zipPassword, setZipPassword] = useState('') + const [zipPasswordConfirm, setZipPasswordConfirm] = useState('') + const [typedPhrase, setTypedPhrase] = useState('') + const [uninstalling, setUninstalling] = useState(false) + const [error, setError] = useState(null) + + const requiredPhrase = safeUninstall?.user_confirmation?.typed_phrase ?? '' + const checkboxLabel = + safeUninstall?.user_confirmation?.checkbox_label ?? + 'Tenho uma cópia dos dados exportados e assumo responsabilidade pela retenção legal.' + const reason = safeUninstall?.reason ?? '' + const preservedTables = safeUninstall?.preserved_tables ?? [] + + const phraseMatches = typedPhrase === requiredPhrase + const passwordsMatch = zipPassword === zipPasswordConfirm && zipPassword.length >= 8 + + async function handleUninstall() { + setUninstalling(true) + setError(null) + try { + const body: Record = { + confirmation_phrase: typedPhrase, + zip_password: zipPassword, + } + await api.delete(`/plugins/${slug}`, body) + onUninstalled() + } catch (e: unknown) { + setError(e instanceof Error ? e.message : 'Unexpected error during uninstall.') + setUninstalling(false) + } + } + + return ( +
+
+ {/* Header */} +
+
+ + Desinstalar plugin: {slug} +
+ +
+ + {/* Force-uninstall alert */} + {forceUninstallActive && ( +
+ ⚠ Force uninstall ATIVO — todas proteções desabilitadas +

+ EVONEXUS_ALLOW_FORCE_UNINSTALL=1 está definido. Esta ação ignora a confirmação e + preservação de dados. Todas as ações são auditadas. +

+
+ )} + +
+ {/* Step indicator */} +
+ {([1, 2, 3] as Step[]).map((s) => ( + s + ? 'bg-green-700 text-white' + : 'bg-neutral-700 text-neutral-400' + }`} + > + {s} + + ))} +
+ + {/* ── Step 1: Reason + checkbox ── */} + {step === 1 && ( +
+
+ +
+

Aviso regulatório

+

{reason}

+
+
+ + {preservedTables.length > 0 && ( +
+

+ Tabelas preservadas (renomeadas, não excluídas): +

+
    + {preservedTables.map((t) => ( +
  • + {t} → _orphan_{slug}_{t} +
  • + ))} +
+
+ )} + + + +
+ + +
+
+ )} + + {/* ── Step 2: ZIP password ── */} + {step === 2 && ( +
+
+ +
+

Senha do export (AES-256)

+

+ O arquivo de export será criptografado com esta senha. Anote em local seguro — + sem ela, o arquivo é inutilizável. +

+
+
+ +
+
+ + setZipPassword(e.target.value)} + placeholder="Senha do ZIP de export" + className="w-full rounded border border-neutral-700 bg-neutral-800 px-3 py-2 text-sm text-white placeholder-neutral-500 focus:border-[#00FFA7] focus:outline-none" + /> +
+
+ + setZipPasswordConfirm(e.target.value)} + placeholder="Repita a senha" + className="w-full rounded border border-neutral-700 bg-neutral-800 px-3 py-2 text-sm text-white placeholder-neutral-500 focus:border-[#00FFA7] focus:outline-none" + /> + {zipPassword && zipPasswordConfirm && !passwordsMatch && ( +

+ {zipPassword.length < 8 + ? 'Senha deve ter pelo menos 8 caracteres.' + : 'Senhas não coincidem.'} +

+ )} +
+
+ +
+ + +
+
+ )} + + {/* ── Step 3: Typed phrase confirmation ── */} + {step === 3 && ( +
+
+ +
+

+ Digite exatamente a frase abaixo para confirmar a desinstalação: +

+

+ {requiredPhrase} +

+
+
+ + setTypedPhrase(e.target.value)} + placeholder={requiredPhrase} + className="w-full rounded border border-neutral-700 bg-neutral-800 px-3 py-2 text-sm text-white placeholder-neutral-500 focus:border-[#00FFA7] focus:outline-none" + /> + {typedPhrase && !phraseMatches && ( +

+ Texto deve ser exatamente: {requiredPhrase} +

+ )} + + {error && ( +

+ {error} +

+ )} + +
+ + +
+
+ )} +
+
+
+ ) +} diff --git a/dashboard/frontend/src/components/Sidebar.tsx b/dashboard/frontend/src/components/Sidebar.tsx index 42bb21eb..3bb07035 100644 --- a/dashboard/frontend/src/components/Sidebar.tsx +++ b/dashboard/frontend/src/components/Sidebar.tsx @@ -73,6 +73,7 @@ const navGroups: NavGroup[] = [ { to: '/memory', labelKey: 'memory', icon: Brain, resource: 'memory' }, { to: '/mempalace', labelKey: 'mempalace', icon: Library, resource: 'mempalace' }, { to: '/knowledge', labelKey: 'knowledge', icon: Database, resource: 'knowledge' }, + { to: '/agent-knowledge', labelKey: 'agentKnowledge', icon: Bot, resource: 'knowledge' }, { to: '/costs', labelKey: 'costs', icon: DollarSign, resource: 'costs' }, ], }, diff --git a/dashboard/frontend/src/components/agent-chat/ChatBlocks.tsx b/dashboard/frontend/src/components/agent-chat/ChatBlocks.tsx new file mode 100644 index 00000000..7ca3050c --- /dev/null +++ b/dashboard/frontend/src/components/agent-chat/ChatBlocks.tsx @@ -0,0 +1,303 @@ +import { useState } from 'react' +import { + Ban, + Check, + CheckCircle2, + ChevronDown, + ChevronRight, + Edit2, + FileCode, + FileText, + ShieldAlert, + Terminal as TermIcon, +} from 'lucide-react' +import { AgentAvatar } from '../AgentAvatar' + +export function TypingIndicator({ accentColor, isThinking }: { accentColor: string; isThinking?: boolean }) { + return ( +
+
+ {[0, 1, 2].map((i) => ( + + ))} +
+ + {isThinking ? 'Thinking...' : 'Typing...'} + +
+ ) +} + +export function AgentInputToggle({ parsedInput, rawInput }: { parsedInput: any; rawInput: string }) { + const [showInput, setShowInput] = useState(false) + return ( +
+ + {showInput && ( +
+          {parsedInput ? JSON.stringify(parsedInput, null, 2) : rawInput}
+        
+ )} +
+ ) +} + +export function TypingIndicatorMini({ accentColor }: { accentColor: string }) { + return ( + + {[0, 1, 2].map((i) => ( + + ))} + + ) +} + +export function ToolCard({ block, accentColor }: { block: any; accentColor: string }) { + const [open, setOpen] = useState(false) + + let parsedInput: any = null + try { + parsedInput = JSON.parse(block.input) + } catch {} + + const isAgentTool = block.toolName === 'Agent' || block.toolName === 'SendMessage' + const subagentName = parsedInput?.subagent_type || parsedInput?.name || parsedInput?.to || '' + const subagentDesc = parsedInput?.description || parsedInput?.summary || block.subagentType || '' + + if (isAgentTool) { + const isRunning = block.subagentStatus === 'running' + const isDone = block.done || block.subagentStatus === 'completed' || block.subagentStatus === 'failed' + const subagentTools = block.subagentTools || [] + const toolCount = subagentTools.length + + const getToolIcon = (toolName: string) => { + if (toolName === 'Bash') return + if (toolName === 'Read') return + if (toolName === 'Edit' || toolName === 'Write') return + return + } + + return ( +
+ + + {open && ( +
+
+ {subagentTools.length === 0 ? ( +
No tools yet
+ ) : ( + subagentTools.map((tool: any, index: number) => { + let inputPreview = '' + try { + const parsed = JSON.parse(tool.input) + inputPreview = (parsed.command || parsed.file_path || parsed.path || parsed.pattern || parsed.description || tool.input).slice(0, 60) + } catch { + inputPreview = tool.input.slice(0, 60) + } + + return ( +
+ {getToolIcon(tool.toolName)} + {tool.toolName} + {inputPreview && {inputPreview}} +
+ ) + }) + )} +
+ {block.input && } +
+ )} +
+ ) + } + + if (block.toolName === 'TodoWrite' && Array.isArray(parsedInput?.todos)) { + const todos: Array<{ content: string; status: string; priority?: string; id?: string }> = parsedInput.todos + const completedCount = todos.filter((todo) => todo.status === 'completed').length + + return ( +
+ + +
+ {todos.map((todo, index) => { + const isPending = todo.status === 'pending' + const isInProgress = todo.status === 'in_progress' + const isCompleted = todo.status === 'completed' + const icon = isPending ? '○' : isInProgress ? '◐' : '●' + return ( +
+ + {icon} + + + {todo.content} + +
+ ) + })} +
+
+ ) + } + + const displayInfo = parsedInput + ? (parsedInput.command || parsedInput.file_path || parsedInput.path || parsedInput.pattern || parsedInput.description || '') + : '' + + return ( +
+ + + {open && block.input && ( +
+
+            {parsedInput ? JSON.stringify(parsedInput, null, 2) : block.input}
+          
+
+ )} +
+ ) +} + +export function ApprovalCard({ req, accentColor, onAllow, onDeny }: { req: any; accentColor: string; onAllow: () => void; onDeny: () => void }) { + let summary = '' + const inp = req.input as any + + if (req.toolName === 'Bash') { + summary = inp?.command ? String(inp.command).slice(0, 120) : '' + } else if (req.toolName === 'Write') { + const lines = inp?.content ? String(inp.content).split('\n').slice(0, 5).join('\n') : '' + summary = inp?.file_path ? `${inp.file_path}${lines ? '\n' + lines : ''}` : lines + } else if (req.toolName === 'Edit') { + summary = inp?.file_path ? String(inp.file_path) : '' + } else if (req.toolName === 'Agent') { + const agentName = inp?.subagent_type || inp?.agent || '' + const prompt = inp?.prompt || inp?.description || '' + summary = agentName ? `@${agentName}${prompt ? ' — ' + String(prompt).slice(0, 80) : ''}` : String(prompt).slice(0, 100) + } + if (!summary && req.title) summary = req.title + + return ( +
+ +
+
+ {req.toolName} + {summary && {summary}} +
+ {req.description &&

{req.description}

} +
+
+ + +
+
+ ) +} diff --git a/dashboard/frontend/src/context/ThemeContext.tsx b/dashboard/frontend/src/context/ThemeContext.tsx new file mode 100644 index 00000000..b94a2d8c --- /dev/null +++ b/dashboard/frontend/src/context/ThemeContext.tsx @@ -0,0 +1,52 @@ +import { createContext, useCallback, useContext, useEffect, useState, type ReactNode } from 'react' +import { applyTheme, persistTheme, resolveInitialTheme, type ThemeMode } from '../lib/theme' + +interface ThemeContextValue { + theme: ThemeMode + isDark: boolean + isLight: boolean + setTheme: (theme: ThemeMode) => void + toggleTheme: () => void +} + +const ThemeContext = createContext(null) + +export function ThemeProvider({ children }: { children: ReactNode }) { + const [theme, setThemeState] = useState(() => resolveInitialTheme()) + + useEffect(() => { + applyTheme(theme) + }, [theme]) + + const setTheme = useCallback((nextTheme: ThemeMode) => { + persistTheme(nextTheme) + setThemeState(nextTheme) + }, []) + + const toggleTheme = useCallback(() => { + setTheme(theme === 'dark' ? 'light' : 'dark') + }, [setTheme, theme]) + + return ( + + {children} + + ) +} + +export function useTheme() { + const ctx = useContext(ThemeContext) + if (!ctx) { + throw new Error('useTheme must be used within ') + } + return ctx +} + diff --git a/dashboard/frontend/src/i18n/locales/en-US/index.ts b/dashboard/frontend/src/i18n/locales/en-US/index.ts index 675e120e..2cd3b16e 100644 --- a/dashboard/frontend/src/i18n/locales/en-US/index.ts +++ b/dashboard/frontend/src/i18n/locales/en-US/index.ts @@ -196,6 +196,7 @@ const translations = { issues: 'Topics', reports: 'Reports', mempalace: 'MemPalace', + agentKnowledge: 'Agent RAG', systems: 'Systems', templates: 'Templates', shareLinks: 'Share Links', @@ -925,6 +926,9 @@ const translations = { configureFromScratch: 'Configure from scratch', restoreBrainRepo: 'Restore brain repo', restoreHint: "If you've previously set up EvoNexus with a brain repo, use \"Restore\" to recover your configuration.", + useExistingWorkspace: 'Use existing workspace', + workspaceReadyBanner: 'This workspace is already configured by your team. You can start using it right away — no setup needed.', + orSetupManually: 'or set up manually', }, provider: { title: 'Choose your AI provider', diff --git a/dashboard/frontend/src/i18n/locales/es/index.ts b/dashboard/frontend/src/i18n/locales/es/index.ts index 721276f4..26ff0555 100644 --- a/dashboard/frontend/src/i18n/locales/es/index.ts +++ b/dashboard/frontend/src/i18n/locales/es/index.ts @@ -184,6 +184,7 @@ const translations = { issues: 'Temas', reports: 'Informes', mempalace: 'MemPalace', + agentKnowledge: 'Base RAG', systems: 'Sistemas', templates: 'Plantillas', shareLinks: 'Enlaces compartidos', diff --git a/dashboard/frontend/src/i18n/locales/pt-BR/index.ts b/dashboard/frontend/src/i18n/locales/pt-BR/index.ts index 941c612f..aac13b98 100644 --- a/dashboard/frontend/src/i18n/locales/pt-BR/index.ts +++ b/dashboard/frontend/src/i18n/locales/pt-BR/index.ts @@ -191,6 +191,7 @@ const translations = { issues: 'Tópicos', reports: 'Relatórios', mempalace: 'MemPalace', + agentKnowledge: 'Base RAG', systems: 'Sistemas', templates: 'Modelos', shareLinks: 'Links compartilhados', diff --git a/dashboard/frontend/src/lib/api.ts b/dashboard/frontend/src/lib/api.ts index 213989d4..efc8c44a 100644 --- a/dashboard/frontend/src/lib/api.ts +++ b/dashboard/frontend/src/lib/api.ts @@ -15,7 +15,20 @@ async function buildError(res: Response): Promise { let detail = '' try { const data = await res.clone().json() - detail = data?.error || data?.description || data?.message || '' + // Try common error shapes first, then plugin-preview-shaped responses + // (`{conflicts: [...], manifest, ...}`). Without this, plugin install + // 409s surfaced as "409 CONFLICT" with no hint at the actual reason + // (e.g. version mismatch). + detail = + data?.error || + data?.description || + data?.message || + (Array.isArray(data?.conflicts) && data.conflicts.length > 0 + ? data.conflicts.join(' • ') + : '') || + (Array.isArray(data?.details) && data.details.length > 0 + ? data.details.join(' • ') + : '') } catch { try { const text = await res.text() @@ -73,11 +86,12 @@ export const api = { if (!res.ok) throw await buildError(res); return res.json(); }, - delete: async (path: string) => { + delete: async (path: string, body?: unknown) => { const res = await fetch(`${API}/api${path}`, { method: 'DELETE', - headers: { ...XHR_HEADER }, + headers: { 'Content-Type': 'application/json', ...XHR_HEADER }, credentials: 'include', + body: body ? JSON.stringify(body) : undefined, }); if (!res.ok) throw await buildError(res); return res.json(); diff --git a/dashboard/frontend/src/lib/navigation.ts b/dashboard/frontend/src/lib/navigation.ts new file mode 100644 index 00000000..a7455e7a --- /dev/null +++ b/dashboard/frontend/src/lib/navigation.ts @@ -0,0 +1,137 @@ +import type { LucideIcon } from 'lucide-react' +import { + Activity, + BookOpen, + Bot, + BarChart3, + Brain, + Calendar, + CalendarClock, + Cpu, + Database, + DollarSign, + FolderOpen, + HardDriveDownload, + Heart, + Layout, + LayoutDashboard, + Library, + Monitor, + Plug, + Puzzle, + ScrollText, + Settings, + Share2, + Shield, + Target, + Ticket, + Users, + Webhook, + Clock, + Zap, +} from 'lucide-react' + +export interface NavItem { + to: string + labelKey: string + icon: LucideIcon + resource: string | null + desktopOnly?: boolean +} + +export interface NavGroup { + key: string + collapsible: boolean + adminOnly?: boolean + items: NavItem[] +} + +export const NAV_GROUPS: NavGroup[] = [ + { + key: 'main', + collapsible: false, + items: [ + { to: '/', labelKey: 'overview', icon: LayoutDashboard, resource: null }, + ], + }, + { + key: 'operations', + collapsible: true, + items: [ + { to: '/agents', labelKey: 'agents', icon: Bot, resource: 'agents' }, + { to: '/skills', labelKey: 'skills', icon: Zap, resource: 'skills' }, + { to: '/routines', labelKey: 'routines', icon: Clock, resource: 'routines' }, + { to: '/tasks', labelKey: 'tasks', icon: CalendarClock, resource: 'tasks' }, + { to: '/triggers', labelKey: 'triggers', icon: Webhook, resource: 'triggers' }, + { to: '/heartbeats', labelKey: 'heartbeats', icon: Heart, resource: 'heartbeats' }, + { to: '/activity', labelKey: 'activity', icon: Activity, resource: 'scheduler' }, + { to: '/goals', labelKey: 'goals', icon: Target, resource: 'goals' }, + { to: '/topics', labelKey: 'issues', icon: Ticket, resource: 'tickets' }, + { to: '/templates', labelKey: 'templates', icon: Layout, resource: 'templates' }, + ], + }, + { + key: 'data', + collapsible: true, + items: [ + { to: '/workspace', labelKey: 'workspace', icon: FolderOpen, resource: 'workspace' }, + { to: '/shares', labelKey: 'shareLinks', icon: Share2, resource: 'workspace' }, + { to: '/memory', labelKey: 'memory', icon: Brain, resource: 'memory' }, + { to: '/mempalace', labelKey: 'mempalace', icon: Library, resource: 'mempalace' }, + { to: '/knowledge', labelKey: 'knowledge', icon: Database, resource: 'knowledge' }, + { to: '/agent-knowledge', labelKey: 'agentKnowledge', icon: Bot, resource: 'knowledge' }, + { to: '/costs', labelKey: 'costs', icon: DollarSign, resource: 'costs' }, + ], + }, + { + key: 'system', + collapsible: true, + items: [ + { to: '/settings', labelKey: 'settings', icon: Settings, resource: 'config' }, + { to: '/systems', labelKey: 'systems', icon: Monitor, resource: 'systems' }, + { to: '/observability', labelKey: 'observability', icon: BarChart3, resource: 'systems' }, + { to: '/providers', labelKey: 'providers', icon: Cpu, resource: 'config' }, + { to: '/plugins', labelKey: 'plugins', icon: Puzzle, resource: 'config' }, + { to: '/integrations', labelKey: 'integrations', icon: Plug, resource: 'integrations' }, + { to: '/scheduler', labelKey: 'scheduler', icon: Calendar, resource: 'scheduler' }, + { to: '/backups', labelKey: 'backups', icon: HardDriveDownload, resource: 'config' }, + ], + }, + { + key: 'admin', + collapsible: true, + adminOnly: true, + items: [ + { to: '/users', labelKey: 'users', icon: Users, resource: 'users' }, + { to: '/roles', labelKey: 'roles', icon: Shield, resource: 'users' }, + { to: '/audit', labelKey: 'audit', icon: ScrollText, resource: 'audit' }, + ], + }, +] + +export const DOCS_NAV_ITEM: NavItem = { + to: '/docs', + labelKey: 'docs', + icon: BookOpen, + resource: null, +} + +export function getVisibleNavGroups(hasPermission: (resource: string, action: string) => boolean): NavGroup[] { + return NAV_GROUPS + .map((group) => { + const items = group.items.filter((item) => item.resource === null || hasPermission(item.resource, 'view')) + if (items.length === 0) return null + + if (group.adminOnly) { + const hasAnyAdmin = group.items.some((item) => item.resource && hasPermission(item.resource, 'view')) + if (!hasAnyAdmin) return null + } + + return { ...group, items } + }) + .filter((group): group is NavGroup => group !== null) +} + +export function getVisibleNavItems(hasPermission: (resource: string, action: string) => boolean): NavItem[] { + return [...getVisibleNavGroups(hasPermission).flatMap((group) => group.items), DOCS_NAV_ITEM] +} diff --git a/dashboard/frontend/src/lib/pwa.ts b/dashboard/frontend/src/lib/pwa.ts new file mode 100644 index 00000000..18b96849 --- /dev/null +++ b/dashboard/frontend/src/lib/pwa.ts @@ -0,0 +1,18 @@ +export function registerServiceWorker() { + if (!('serviceWorker' in navigator)) return + if (!import.meta.env.PROD) return + + const register = () => { + navigator.serviceWorker.register('/sw.js').catch((error) => { + console.warn('[pwa] service worker registration failed', error) + }) + } + + if (document.readyState === 'complete') { + register() + return + } + + window.addEventListener('load', register, { once: true }) +} + diff --git a/dashboard/frontend/src/lib/terminal-url.ts b/dashboard/frontend/src/lib/terminal-url.ts index fe1f3bc7..7dcba017 100644 --- a/dashboard/frontend/src/lib/terminal-url.ts +++ b/dashboard/frontend/src/lib/terminal-url.ts @@ -1,27 +1,52 @@ /** * Shared terminal-server URL constants. - * Exported here so AgentChat, useGlobalNotifications, and any future consumer - * all resolve the same base URL without duplication. + * Exported here so AgentDetail, AgentTerminal, AgentChat, + * useGlobalNotifications, and any future consumer all resolve the same base + * URL without duplication. * * The dashboard backend mounts an HTTP+WebSocket proxy at /terminal that - * forwards to the local terminal-server. Going through it (rather than - * hitting :32352 directly) wins on three fronts: - * 1. Same-origin requests pass the dashboard's CSP `connect-src 'self'` - * directive — direct cross-port fetches are blocked even from localhost. - * 2. CORS preflights are avoided (same origin). - * 3. Browsers behind SSH tunnels, Tailscale Funnel, or any reverse proxy - * that only exposes the dashboard port still get terminal access. + * forwards to the local terminal-server. Going through it keeps terminal + * access on the same origin as the dashboard, which works behind SSH + * tunnels, Tailscale Funnel, and reverse proxies that expose only the + * dashboard port. * - * The vite dev server (port 5173) does not yet proxy /terminal, so in DEV - * mode we fall back to a direct connection — that path is local-only by - * definition. + * Escape hatch: VITE_TERMINAL_URL can point at a separate terminal-server + * base URL. It accepts http(s):// or ws(s):// and maps to both schemes. + * + * The Vite dev server (port 5173) does not proxy /terminal, so in DEV mode + * we fall back to a direct connection to terminal-server. */ const isViteDev = import.meta.env.DEV +const rawOverride = (import.meta.env.VITE_TERMINAL_URL as string | undefined)?.trim() +const terminalOverride = rawOverride ? rawOverride.replace(/\/+$/, '') : null + +function resolveOverride(raw: string): { http: string; ws: string } | null { + try { + const url = new URL(raw) + const isSecure = url.protocol === 'https:' || url.protocol === 'wss:' + const httpProtocol = isSecure ? 'https:' : 'http:' + const wsProtocol = isSecure ? 'wss:' : 'ws:' + const path = url.pathname.replace(/\/+$/, '') + url.search + + return { + http: `${httpProtocol}//${url.host}${path}`, + ws: `${wsProtocol}//${url.host}${path}`, + } + } catch { + return null + } +} + +const override = terminalOverride ? resolveOverride(terminalOverride) : null -export const TS_HTTP = isViteDev - ? `http://${window.location.hostname}:32352` - : `${window.location.protocol}//${window.location.host}/terminal` +export const TS_HTTP = override + ? override.http + : isViteDev + ? `http://${window.location.hostname}:32352` + : `${window.location.protocol}//${window.location.host}/terminal` -export const TS_WS = isViteDev - ? `ws://${window.location.hostname}:32352` - : `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}/terminal` +export const TS_WS = override + ? override.ws + : isViteDev + ? `ws://${window.location.hostname}:32352` + : `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}/terminal` diff --git a/dashboard/frontend/src/lib/theme.ts b/dashboard/frontend/src/lib/theme.ts new file mode 100644 index 00000000..fbed42a4 --- /dev/null +++ b/dashboard/frontend/src/lib/theme.ts @@ -0,0 +1,44 @@ +export type ThemeMode = 'dark' | 'light' + +export const THEME_STORAGE_KEY = 'evonexus.theme' + +function getStoredTheme(): ThemeMode | null { + if (typeof window === 'undefined') return null + try { + const value = window.localStorage.getItem(THEME_STORAGE_KEY) + if (value === 'dark' || value === 'light') return value + } catch {} + return null +} + +function getPreferredTheme(): ThemeMode { + if (typeof window === 'undefined') return 'dark' + return window.matchMedia?.('(prefers-color-scheme: light)').matches ? 'light' : 'dark' +} + +export function resolveInitialTheme(): ThemeMode { + return getStoredTheme() ?? getPreferredTheme() +} + +export function applyTheme(theme: ThemeMode) { + if (typeof document === 'undefined') return + + const root = document.documentElement + root.dataset.theme = theme + root.style.colorScheme = theme + + const body = document.body + body.dataset.theme = theme + + const themeColor = theme === 'light' ? '#f5f7fb' : '#0C111D' + const meta = document.querySelector('meta[name="theme-color"]') as HTMLMetaElement | null + if (meta) meta.content = themeColor +} + +export function persistTheme(theme: ThemeMode) { + if (typeof window === 'undefined') return + try { + window.localStorage.setItem(THEME_STORAGE_KEY, theme) + } catch {} +} + diff --git a/dashboard/frontend/src/pages/AgentDetail.tsx b/dashboard/frontend/src/pages/AgentDetail.tsx index 803f5cba..f5a80f88 100644 --- a/dashboard/frontend/src/pages/AgentDetail.tsx +++ b/dashboard/frontend/src/pages/AgentDetail.tsx @@ -11,6 +11,7 @@ import { trackAgentVisit } from './Agents' import { AgentAvatar } from '../components/AgentAvatar' import { useAuth } from '../context/AuthContext' import { useNotificationBadge } from '../hooks/useNotificationBadge' +import { TS_HTTP } from '../lib/terminal-url' interface MemoryFile { name: string @@ -20,16 +21,6 @@ interface MemoryFile { type Tab = 'sessions' | 'profile' | 'memory' -// Terminal-server URL — always go through the dashboard's /terminal proxy in -// production builds. Direct cross-port fetches (e.g. localhost:32352 from a -// page served at localhost:8080) are blocked by the dashboard's -// `connect-src 'self'` CSP directive even when the network path works. -// In Vite dev mode (no proxy mounted) we fall back to a direct connection. -const isViteDev = import.meta.env.DEV -const TS_HTTP = isViteDev - ? `http://${window.location.hostname}:32352` - : `${window.location.origin}/terminal` - interface TerminalTab { id: string // sessionId name: string // display name diff --git a/dashboard/frontend/src/pages/AgentKnowledge.tsx b/dashboard/frontend/src/pages/AgentKnowledge.tsx new file mode 100644 index 00000000..6696166d --- /dev/null +++ b/dashboard/frontend/src/pages/AgentKnowledge.tsx @@ -0,0 +1,670 @@ +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' +import { + AlertCircle, + Bot, + CheckCircle2, + Database, + FileText, + Loader2, + RefreshCw, + Search, + Trash2, + Upload, +} from 'lucide-react' +import { useNavigate } from 'react-router-dom' +import { api } from '../lib/api' +import { useAuth } from '../context/AuthContext' + +const API_BASE = import.meta.env.DEV ? 'http://localhost:8080' : '' + +interface KnowledgeConnection { + id: string + slug: string + name: string + status: string +} + +interface Division { + slug: string + label: string + agent: string | null + description: string + color: string + ready: boolean + documents_count: number + chunks_count: number + space?: { id: string; slug: string; name: string } | null +} + +interface DivisionResponse { + connections: KnowledgeConnection[] + active_connection_id: string | null + divisions: Division[] + ready: boolean + message?: string +} + +interface KnowledgeDocument { + id: string + title: string + status: string + created_at?: string + chunks_count?: number + pages_count?: number + division: string + division_label: string +} + +interface UploadItem { + id: string + filename: string + documentId?: string + division: string + phase: string + error?: string +} + +interface SearchHit { + chunk_id: string + document_id: string + doc_title?: string + content_type?: string + content: string + final_score?: number + division: string + division_label: string + chunk_metadata?: Record +} + +function formatDate(value?: string) { + if (!value) return '-' + try { + return new Intl.DateTimeFormat('pt-BR', { dateStyle: 'short', timeStyle: 'short' }).format(new Date(value)) + } catch { + return value + } +} + +function phaseLabel(phase: string) { + const normalized = phase || 'pending' + const labels: Record = { + queued: 'Na fila', + pending: 'Pendente', + scanning: 'Analisando', + parsing: 'Lendo arquivo', + chunking: 'Quebrando em trechos', + embedding: 'Gerando vetores', + storing: 'Gravando', + classifying: 'Classificando', + processing: 'Processando', + ready: 'Pronto', + done: 'Pronto', + error: 'Erro', + } + return labels[normalized] || normalized +} + +function statusColor(status: string) { + if (['ready', 'done'].includes(status)) return '#00FFA7' + if (status === 'error') return '#F87171' + return '#FBBF24' +} + +export default function AgentKnowledge() { + const navigate = useNavigate() + const { hasPermission } = useAuth() + const canManage = hasPermission('knowledge', 'manage') + + const [loading, setLoading] = useState(true) + const [bootstrapping, setBootstrapping] = useState(false) + const [error, setError] = useState(null) + const [connections, setConnections] = useState([]) + const [activeConnectionId, setActiveConnectionId] = useState(null) + const [divisions, setDivisions] = useState([]) + const [selectedDivision, setSelectedDivision] = useState('geral') + const [documents, setDocuments] = useState([]) + const [documentsLoading, setDocumentsLoading] = useState(false) + + const [files, setFiles] = useState([]) + const [dragging, setDragging] = useState(false) + const [uploading, setUploading] = useState(false) + const [uploads, setUploads] = useState([]) + const fileInputRef = useRef(null) + const pollers = useRef>>({}) + + const [query, setQuery] = useState('') + const [searchScope, setSearchScope] = useState('all') + const [searching, setSearching] = useState(false) + const [searchResults, setSearchResults] = useState(null) + + const selected = useMemo( + () => divisions.find((d) => d.slug === selectedDivision) || divisions[0], + [divisions, selectedDivision] + ) + + const allReady = divisions.length > 0 && divisions.every((d) => d.ready) + + const loadDivisions = useCallback(async (connectionId?: string | null) => { + setError(null) + const suffix = connectionId ? `?connection_id=${encodeURIComponent(connectionId)}` : '' + const data: DivisionResponse = await api.get(`/agent-knowledge/divisions${suffix}`) + setConnections(data.connections || []) + setActiveConnectionId(data.active_connection_id) + setDivisions(data.divisions || []) + if (!selectedDivision && data.divisions?.[0]) setSelectedDivision(data.divisions[0].slug) + return data + }, [selectedDivision]) + + const loadDocuments = useCallback(async () => { + if (!activeConnectionId || !selectedDivision) return + setDocumentsLoading(true) + try { + const params = new URLSearchParams({ + connection_id: activeConnectionId, + division: selectedDivision, + limit: '25', + }) + const data = await api.get(`/agent-knowledge/documents?${params}`) + setDocuments(data.documents || []) + } catch { + setDocuments([]) + } finally { + setDocumentsLoading(false) + } + }, [activeConnectionId, selectedDivision]) + + useEffect(() => { + loadDivisions() + .catch((e) => setError(e instanceof Error ? e.message : 'Falha ao carregar a base RAG.')) + .finally(() => setLoading(false)) + }, [loadDivisions]) + + useEffect(() => { + loadDocuments() + }, [loadDocuments]) + + useEffect(() => { + return () => { + Object.values(pollers.current).forEach(clearInterval) + } + }, []) + + async function handleBootstrap() { + if (!activeConnectionId) return + setBootstrapping(true) + setError(null) + try { + const data = await api.post('/agent-knowledge/bootstrap', { connection_id: activeConnectionId }) + setDivisions(data.divisions || []) + await loadDocuments() + } catch (e) { + setError(e instanceof Error ? e.message : 'Falha ao preparar as divisões RAG.') + } finally { + setBootstrapping(false) + } + } + + function addFiles(nextFiles: File[]) { + if (nextFiles.length === 0) return + setFiles((prev) => [...prev, ...nextFiles]) + } + + function updateUpload(id: string, patch: Partial) { + setUploads((prev) => prev.map((item) => item.id === id ? { ...item, ...patch } : item)) + } + + function pollStatus(localId: string, documentId: string) { + const interval = setInterval(async () => { + try { + const data = await api.get(`/agent-knowledge/documents/${documentId}/status`) + const phase = data.phase || data.status || 'processing' + updateUpload(localId, { phase, error: data.error || data.error_message }) + if (['done', 'ready', 'error'].includes(phase)) { + clearInterval(interval) + delete pollers.current[localId] + await loadDivisions(activeConnectionId) + await loadDocuments() + } + } catch { + updateUpload(localId, { phase: 'error', error: 'Falha ao consultar status.' }) + clearInterval(interval) + delete pollers.current[localId] + } + }, 2000) + pollers.current[localId] = interval + } + + async function handleUpload() { + if (!activeConnectionId || !selected || files.length === 0) return + setUploading(true) + setError(null) + try { + const body = new FormData() + body.append('connection_id', activeConnectionId) + body.append('division', selected.slug) + files.forEach((file) => body.append('files', file)) + + const res = await fetch(`${API_BASE}/api/agent-knowledge/upload`, { + method: 'POST', + credentials: 'include', + headers: { 'X-Requested-With': 'XMLHttpRequest' }, + body, + }) + if (!res.ok) { + const data = await res.json().catch(() => ({})) + throw new Error(data.message || data.error || `Upload falhou: ${res.status}`) + } + const data = await res.json() + const items: UploadItem[] = (data.uploads || []).map((upload: any) => ({ + id: `${upload.document_id}-${Math.random()}`, + filename: upload.filename, + documentId: upload.document_id, + division: upload.division, + phase: upload.status?.phase || upload.document?.status || 'pending', + })) + setUploads((prev) => [...items, ...prev]) + setFiles([]) + items.forEach((item) => { + if (item.documentId) pollStatus(item.id, item.documentId) + }) + await loadDivisions(activeConnectionId) + await loadDocuments() + } catch (e) { + setError(e instanceof Error ? e.message : 'Falha ao enviar arquivos.') + } finally { + setUploading(false) + } + } + + async function handleSearch() { + if (!activeConnectionId || !query.trim()) return + setSearching(true) + setError(null) + try { + const params = new URLSearchParams({ + connection_id: activeConnectionId, + division: searchScope, + q: query.trim(), + top_k: '12', + }) + const data = await api.get(`/agent-knowledge/search?${params}`) + setSearchResults(data.results || []) + } catch (e) { + setError(e instanceof Error ? e.message : 'Falha ao buscar na base RAG.') + setSearchResults([]) + } finally { + setSearching(false) + } + } + + async function handleDelete(documentId: string) { + if (!activeConnectionId) return + try { + await api.delete(`/agent-knowledge/documents/${documentId}?connection_id=${encodeURIComponent(activeConnectionId)}`) + await loadDivisions(activeConnectionId) + await loadDocuments() + } catch (e) { + setError(e instanceof Error ? e.message : 'Falha ao excluir documento.') + } + } + + if (loading) { + return ( +
+ Carregando base RAG... +
+ ) + } + + if (!activeConnectionId) { + return ( +
+
+ +

Base RAG dos Agentes

+

+ Nenhuma conexão pgvector pronta foi encontrada. +

+ +
+
+ ) + } + + return ( +
+
+
+
+
+ +
+
+

Base RAG dos Agentes

+

Conhecimento pgvector dividido por agente contábil

+
+
+
+ +
+ {connections.length > 1 && ( + + )} + +
+
+ + {error && ( +
+ + {error} +
+ )} + + {!allReady && ( +
+
+

Divisões RAG pendentes

+

Crie os Spaces pgvector: Geral e os seis agentes customizados.

+
+ {canManage && ( + + )} +
+ )} + +
+
+ {divisions.map((division) => ( + + ))} +
+ +
+
+
+
+

Upload para {selected?.label}

+

{selected?.agent || 'Conhecimento comum dos agentes'}

+
+ {files.length > 0 && ( + + )} +
+ +
{ e.preventDefault(); setDragging(true) }} + onDragLeave={() => setDragging(false)} + onDrop={(e) => { + e.preventDefault() + setDragging(false) + addFiles(Array.from(e.dataTransfer.files)) + }} + onClick={() => fileInputRef.current?.click()} + className={`border-2 border-dashed rounded-xl p-8 text-center cursor-pointer transition-colors ${ + dragging ? 'border-[#00FFA7] bg-[#00FFA7]/5' : 'border-[#344054] hover:border-[#00FFA7]/40' + }`} + > + +

Arraste arquivos ou clique para selecionar

+

PDF, DOCX, PPTX, XLSX, HTML, imagens, TXT, MD, CSV e JSON

+ { + if (e.target.files) addFiles(Array.from(e.target.files)) + e.currentTarget.value = '' + }} + /> +
+ + {files.length > 0 && ( +
+
+ {files.map((file, idx) => ( + + {file.name} + + ))} +
+ + {!canManage &&

Seu papel precisa de permissão knowledge:manage para upload.

} + {selected && !selected.ready &&

Prepare a divisão antes de enviar arquivos.

} +
+ )} + + {uploads.length > 0 && ( +
+ {uploads.slice(0, 6).map((item) => ( +
+
+

{item.filename}

+ {item.error &&

{item.error}

} +
+ + {phaseLabel(item.phase)} + +
+ ))} +
+ )} +
+ +
+
+
+

Busca RAG

+

Consulta híbrida vetorial + BM25 nos chunks prontos

+
+ +
+ +
+
+ + setQuery(e.target.value)} + onKeyDown={(e) => e.key === 'Enter' && handleSearch()} + placeholder="Buscar instruções, regras, prazos ou documentos..." + className="w-full bg-[#0C111D] border border-[#344054] rounded-lg pl-9 pr-3 py-2.5 text-sm text-[#F9FAFB] placeholder-[#667085] focus:border-[#00FFA7] focus:outline-none" + /> +
+ +
+ + {searchResults && ( +
+ {searchResults.length === 0 ? ( +

Nenhum resultado encontrado.

+ ) : ( + searchResults.map((hit) => ( +
+
+
+

{hit.doc_title || 'Documento'}

+

{hit.division_label} {hit.content_type ? `- ${hit.content_type}` : ''}

+
+ {hit.final_score != null && ( + + {hit.final_score.toFixed(4)} + + )} +
+

{hit.content}

+
+ )) + )} +
+ )} +
+ +
+
+
+

Documentos em {selected?.label}

+

Últimos arquivos enviados para esta divisão

+
+ {documentsLoading && } +
+ + {documents.length === 0 ? ( +

Nenhum documento nesta divisão.

+ ) : ( +
+ + + + + + + + + + + {documents.map((doc) => ( + + + + + + + + ))} + +
DocumentoStatusChunksCriado +
{doc.title} + + {phaseLabel(doc.status)} + + {doc.chunks_count ?? 0}{formatDate(doc.created_at)} + {canManage && ( + + )} +
+
+ )} +
+
+
+
+ ) +} diff --git a/dashboard/frontend/src/pages/Knowledge/Settings.tsx b/dashboard/frontend/src/pages/Knowledge/Settings.tsx index 582afb85..67fb0790 100644 --- a/dashboard/frontend/src/pages/Knowledge/Settings.tsx +++ b/dashboard/frontend/src/pages/Knowledge/Settings.tsx @@ -55,7 +55,7 @@ const EMBEDDER_OPTIONS: Array<{ value: 'gemini', label: 'Google Gemini', desc: '768 / 1536 / 3072 dims (MRL) · Requires a Gemini API key · generous free tier', - defaultModel: 'gemini-embedding-001', + defaultModel: 'gemini-embedding-2', }, ] diff --git a/dashboard/frontend/src/pages/Observability.tsx b/dashboard/frontend/src/pages/Observability.tsx new file mode 100644 index 00000000..2667a22d --- /dev/null +++ b/dashboard/frontend/src/pages/Observability.tsx @@ -0,0 +1,243 @@ +import { useEffect, useState } from 'react' +import { Activity, RefreshCw, ShieldCheck, Database, Plug, Clock3, AlertTriangle } from 'lucide-react' +import { api } from '../lib/api' +import { PageSkeleton } from '../components/PageStates' + +type Summary = any + +function MetricCard({ + icon: Icon, + label, + value, + detail, +}: { + icon: any + label: string + value: string + detail?: string +}) { + return ( +
+
+
+

{label}

+
{value}
+
+
+ +
+
+ {detail &&

{detail}

} +
+ ) +} + +export default function Observability() { + const [summary, setSummary] = useState(null) + const [loading, setLoading] = useState(true) + const [error, setError] = useState(null) + + const load = async () => { + setLoading(true) + setError(null) + try { + const data = await api.get('/observability/summary') + setSummary(data) + } catch { + setError('Failed to load observability summary') + } finally { + setLoading(false) + } + } + + useEffect(() => { + load() + }, []) + + if (loading && !summary) { + return ( +
+
+
+

Observability

+

Platform health, provider metrics, and runtime state.

+
+
+ +
+ ) + } + + const providerMetrics = summary?.provider_metrics?.providers || [] + const routing = summary?.provider_config?.routing?.failover_order || [] + + return ( +
+
+
+

Observability

+

Platform health, provider metrics, queue state, and cache usage.

+
+ +
+ + {error && ( +
+ {error} +
+ )} + +
+ + + + +
+ +
+
+
+
+

Provider metrics

+

Success rate and latency by provider.

+
+
+ {summary?.provider_metrics?.total_events || 0} events +
+
+ +
+ + + + + + + + + + + + {providerMetrics.map((row: any) => ( + + + + + + + + ))} + {providerMetrics.length === 0 && ( + + + + )} + +
ProviderEventsSuccessLatencyLast event
{row.provider_id}{row.events}{row.success_rate ?? 'n/a'}%{row.avg_latency_ms ?? 'n/a'} ms{row.last_event?.event || 'n/a'}
+ No provider metrics yet. +
+
+
+ +
+
+

Routing

+

Current failover order used by the terminal server.

+
+ {routing.map((item: string) => ( + + {item} + + ))} +
+
+ +
+

System snapshot

+
+
+ Cache + {summary?.cache?.backend || 'unknown'} +
+
+ Queue + {summary?.queue?.backend || 'unknown'} +
+
+ Generated at + {summary?.generated_at ? new Date(summary.generated_at).toLocaleString() : 'n/a'} +
+
+
+
+
+ +
+
+
+

Recent events

+

Queue and platform activity from the last snapshot.

+
+
+ + {summary?.recent_events?.length || 0} +
+
+ +
+ {(summary?.recent_events || []).slice().reverse().map((event: any, index: number) => ( +
+
+ +
+
+
+ {event.topic || 'event'} + {event.source || 'dashboard'} +
+

+ {JSON.stringify(event.payload || {}).slice(0, 220)} +

+
+ + {event.ts ? new Date(event.ts).toLocaleTimeString() : ''} + +
+ ))} + {(summary?.recent_events || []).length === 0 && ( +
+ No recent events recorded. +
+ )} +
+
+
+ ) +} diff --git a/dashboard/frontend/src/pages/PluginDetail.tsx b/dashboard/frontend/src/pages/PluginDetail.tsx index 01b3b993..efeb8d59 100644 --- a/dashboard/frontend/src/pages/PluginDetail.tsx +++ b/dashboard/frontend/src/pages/PluginDetail.tsx @@ -9,6 +9,7 @@ import { import { api } from '../lib/api' import type { Plugin } from '../components/PluginCard' import UpdatePreviewModal from '../components/UpdatePreviewModal' +import PluginUninstall, { type SafeUninstallSpec } from '../components/PluginUninstall' interface HealthResult { slug: string @@ -147,6 +148,9 @@ export default function PluginDetail() { // Wave 2.0 — Icon fallback state const [iconError, setIconError] = useState(false) + // B3 — Safe uninstall wizard state + const [showUninstallWizard, setShowUninstallWizard] = useState(false) + // Wave 2.3 — MCP restart banner dismiss (persisted via localStorage) const mcpBannerKey = `mcp-restart-dismissed-${slug}` const [mcpBannerDismissed, setMcpBannerDismissed] = useState( @@ -191,16 +195,24 @@ export default function PluginDetail() { } } - async function handleUninstall() { - if (!slug || !window.confirm(t('plugins.confirmUninstall'))) return - setRemoving(true) - try { - await api.delete(`/plugins/${slug}`) - navigate('/plugins') - } catch (e: unknown) { - setError(e instanceof Error ? e.message : t('common.unexpectedError')) - setRemoving(false) + function handleUninstall() { + if (!slug) return + // B3: If plugin declares safe_uninstall.enabled, open the wizard instead of window.confirm. + const manifest = (plugin as unknown as Record | null)?.manifest_json as Record | undefined + const safeUninstall = (manifest?.safe_uninstall ?? {}) as SafeUninstallSpec + if (safeUninstall?.enabled) { + setShowUninstallWizard(true) + return } + // Legacy path: simple confirm dialog + if (!window.confirm(t('plugins.confirmUninstall'))) return + setRemoving(true) + api.delete(`/plugins/${slug}`) + .then(() => navigate('/plugins')) + .catch((e: unknown) => { + setError(e instanceof Error ? e.message : t('common.unexpectedError')) + setRemoving(false) + }) } async function handleToggle() { @@ -425,7 +437,21 @@ export default function PluginDetail() { mcpItems.length > 0 || integrationItems.length > 0 + // B3: Extract safe_uninstall spec from manifest for the wizard + const _manifest = (plugin as unknown as Record | null)?.manifest_json as Record | undefined + const _safeUninstallSpec = (_manifest?.safe_uninstall ?? {}) as SafeUninstallSpec + return ( + <> + {/* B3: Safe uninstall wizard overlay */} + {showUninstallWizard && slug && ( + setShowUninstallWizard(false)} + onUninstalled={() => navigate('/plugins')} + /> + )}
{/* Back */}
+ ) } diff --git a/dashboard/frontend/src/pages/SettingsSecurityTab.tsx b/dashboard/frontend/src/pages/SettingsSecurityTab.tsx new file mode 100644 index 00000000..a25c229d --- /dev/null +++ b/dashboard/frontend/src/pages/SettingsSecurityTab.tsx @@ -0,0 +1,257 @@ +import { useCallback, useEffect, useState } from 'react' +import { AlertTriangle, Copy, KeyRound, QrCode, ShieldCheck } from 'lucide-react' +import { useAuth } from '../context/AuthContext' +import { api } from '../lib/api' + +type ToastType = 'success' | 'error' | 'info' + +const inp = 'w-full px-4 py-3 rounded-lg bg-[#0f1520] border border-[#1e2a3a] text-[#e2e8f0] placeholder-[#3d4f65] text-sm transition-colors duration-200 focus:outline-none focus:border-[#00FFA7]/60 focus:ring-1 focus:ring-[#00FFA7]/20' +const lbl = 'block text-[11px] font-semibold text-[#5a6b7f] mb-1.5 tracking-[0.08em] uppercase' + +interface SecurityTabProps { + showToast: (msg: string, type?: ToastType) => void +} + +export default function SecurityTab({ showToast }: SecurityTabProps) { + const { hasPermission, user } = useAuth() + const canManage = hasPermission('config', 'manage') + const [status, setStatus] = useState(null) + const [enrollment, setEnrollment] = useState(null) + const [verificationCode, setVerificationCode] = useState('') + const [password, setPassword] = useState('') + const [loading, setLoading] = useState(true) + const [saving, setSaving] = useState(false) + + const load = useCallback(async () => { + setLoading(true) + try { + const data = await api.get('/auth/2fa/status') + setStatus(data) + } catch (error) { + showToast(error instanceof Error ? error.message : 'Failed to load 2FA status', 'error') + } finally { + setLoading(false) + } + }, [showToast]) + + useEffect(() => { + load() + }, [load]) + + const startSetup = async () => { + if (!canManage) { + showToast('Only admin accounts can manage 2FA', 'error') + return + } + setSaving(true) + try { + const data = await api.post('/auth/2fa/setup') + setEnrollment(data) + setVerificationCode('') + showToast('2FA enrollment started') + await load() + } catch (error) { + showToast(error instanceof Error ? error.message : 'Failed to start 2FA setup', 'error') + } finally { + setSaving(false) + } + } + + const confirmSetup = async () => { + if (!canManage) { + showToast('Only admin accounts can manage 2FA', 'error') + return + } + if (!verificationCode.trim()) { + showToast('Enter the verification code from your authenticator app', 'error') + return + } + setSaving(true) + try { + await api.post('/auth/2fa/confirm', { code: verificationCode.trim() }) + setEnrollment(null) + setVerificationCode('') + setPassword('') + showToast('Two-factor authentication enabled') + await load() + } catch (error) { + showToast(error instanceof Error ? error.message : 'Failed to confirm 2FA', 'error') + } finally { + setSaving(false) + } + } + + const disableTwoFactor = async () => { + if (!canManage) { + showToast('Only admin accounts can manage 2FA', 'error') + return + } + if (!password.trim()) { + showToast('Enter your current password to disable 2FA', 'error') + return + } + setSaving(true) + try { + await api.post('/auth/2fa/disable', { password, totp_code: verificationCode.trim() }) + setEnrollment(null) + setVerificationCode('') + setPassword('') + showToast('Two-factor authentication disabled') + await load() + } catch (error) { + showToast(error instanceof Error ? error.message : 'Failed to disable 2FA', 'error') + } finally { + setSaving(false) + } + } + + const secret = enrollment?.secret || (status?.enrollment_pending ? 'Enrollment pending in session' : '') + const provisioningUri = enrollment?.otpauth_uri || '' + + return ( +
+
+
+
+

+ + Two-factor authentication +

+

+ Protect admin access with a TOTP authenticator app. +

+
+
+
+ {status?.enabled ? 'Enabled' : 'Disabled'} +
+
{user?.role || 'user'}
+
+
+ + {loading ? ( +
+ {[...Array(3)].map((_, i) =>
)} +
+ ) : ( +
+
+
+ + Current status +
+

+ {status?.enabled + ? `Enabled${status?.confirmed_at ? ` since ${status.confirmed_at}` : ''}` + : '2FA is not enabled for this account.'} +

+ {status?.last_used_step ? ( +

Last accepted time-step: {status.last_used_step}

+ ) : null} +
+ + {!canManage && ( +
+ + Only admin accounts can enroll or disable 2FA. +
+ )} + + {secret && ( +
+
+ + Enrollment secret +
+
+ {secret} +
+ {provisioningUri && ( +
+ {provisioningUri} +
+ )} + +
+ )} + + {canManage && ( +
+
+ + +
+ +
+
+ + setVerificationCode(e.target.value)} + className={inp} + placeholder="123456" + inputMode="numeric" + autoComplete="one-time-code" + /> +
+
+ + setPassword(e.target.value)} + className={inp} + placeholder="••••••••" + autoComplete="current-password" + /> +
+
+ + +
+ )} +
+ )} +
+
+ ) +} + diff --git a/dashboard/frontend/src/pages/Users.tsx b/dashboard/frontend/src/pages/Users.tsx index e0c8ad84..7501cf10 100644 --- a/dashboard/frontend/src/pages/Users.tsx +++ b/dashboard/frontend/src/pages/Users.tsx @@ -1,7 +1,7 @@ import { useEffect, useState, useCallback } from 'react' import { useConfirm } from '../components/ConfirmDialog' import { api } from '../lib/api' -import { Users as UsersIcon, Plus, Pencil, Trash2, X } from 'lucide-react' +import { Users as UsersIcon, Plus, Pencil, Trash2, X, RotateCcw } from 'lucide-react' interface User { id: string @@ -139,7 +139,7 @@ export default function UsersPage() { const handleDeactivate = async (u: User) => { const ok = await confirm({ title: 'Desativar usuário', - description: `Desativar "${u.username}"?`, + description: `Desativar "${u.username}"? O usuário não poderá fazer login, mas seus dados serão preservados.`, confirmText: 'Desativar', variant: 'danger', }) @@ -147,8 +147,23 @@ export default function UsersPage() { try { await api.delete(`/users/${u.id}`) fetchUsers() - } catch { - /* ignore */ + } catch (ex: unknown) { + setError(ex instanceof Error ? ex.message : 'Falha ao desativar') + } + } + + const handleReactivate = async (u: User) => { + const ok = await confirm({ + title: 'Reativar usuário', + description: `Reativar "${u.username}"? O usuário poderá fazer login novamente.`, + confirmText: 'Reativar', + }) + if (!ok) return + try { + await api.post(`/users/${u.id}/reactivate`) + fetchUsers() + } catch (ex: unknown) { + setError(ex instanceof Error ? ex.message : 'Falha ao reativar') } } @@ -172,6 +187,14 @@ export default function UsersPage() {
+ {/* Global error banner */} + {error && !modalOpen && ( +
+ {error} + +
+ )} + {loading ? (
{[...Array(3)].map((_, i) =>
)} @@ -229,18 +252,26 @@ export default function UsersPage() { - {u.is_active && ( + {u.is_active ? ( + ) : ( + )}
diff --git a/dashboard/frontend/src/pages/integrations/CustomIntegrationModal.tsx b/dashboard/frontend/src/pages/integrations/CustomIntegrationModal.tsx new file mode 100644 index 00000000..f9efd4a1 --- /dev/null +++ b/dashboard/frontend/src/pages/integrations/CustomIntegrationModal.tsx @@ -0,0 +1,300 @@ +import { useEffect, useRef, useState } from 'react' +import { Eye, EyeOff, Loader2, Plus, X } from 'lucide-react' +import { api } from '../../lib/api' +import { CATEGORY_OPTIONS, EMPTY_FORM, type CustomIntegrationForm, slugify } from './types' + +interface CustomModalProps { + open: boolean + initial?: CustomIntegrationForm & { slug: string } + isEdit: boolean + onClose: () => void + onSaved: (envWritten?: boolean) => void +} + +export function CustomIntegrationModal({ open, initial, isEdit, onClose, onSaved }: CustomModalProps) { + const [form, setForm] = useState(EMPTY_FORM) + const [slugManual, setSlugManual] = useState(false) + const [errors, setErrors] = useState>>({}) + const [saving, setSaving] = useState(false) + const [visibleRows, setVisibleRows] = useState>(new Set()) + const overlayRef = useRef(null) + + useEffect(() => { + if (open) { + const baseForm = initial + ? { + ...initial, + envKeys: (initial.envKeys as unknown as (string | { name: string; value: string })[]).map((k) => + typeof k === 'string' ? { name: k, value: '' } : k + ), + } + : EMPTY_FORM + setForm(baseForm) + setSlugManual(isEdit) + setErrors({}) + setVisibleRows(new Set()) + setSaving(false) + } + }, [open, initial, isEdit]) + + useEffect(() => { + if (!open) return + const handler = (e: KeyboardEvent) => { + if (e.key === 'Escape') onClose() + } + document.addEventListener('keydown', handler) + return () => document.removeEventListener('keydown', handler) + }, [open, onClose]) + + const setField = (key: K, value: CustomIntegrationForm[K]) => { + setForm((prev) => { + const next = { ...prev, [key]: value } + if (key === 'displayName' && !slugManual) { + next.slug = slugify(value as string) + } + return next + }) + setErrors((prev) => ({ ...prev, [key]: undefined })) + } + + const validate = (): boolean => { + const errs: Partial> = {} + if (!form.displayName.trim()) errs.displayName = 'Required' + if (!form.slug.trim()) { + errs.slug = 'Required' + } else if (!/^[a-z0-9][a-z0-9-]*[a-z0-9]$/.test(form.slug)) { + errs.slug = 'Lowercase letters, digits and hyphens only' + } + setErrors(errs) + return Object.keys(errs).length === 0 + } + + const handleSave = async () => { + if (!validate()) return + setSaving(true) + try { + const envKeyNames = form.envKeys.map((r) => r.name).filter((n) => n.trim()) + const envValues: Record = {} + for (const row of form.envKeys) { + if (row.name.trim() && row.value.trim()) { + envValues[row.name.trim()] = row.value.trim() + } + } + const hasEnvValues = Object.keys(envValues).length > 0 + + if (isEdit && initial?.slug) { + await api.patch(`/integrations/custom/${initial.slug}`, { + displayName: form.displayName, + description: form.description, + category: form.category, + envKeys: envKeyNames, + ...(hasEnvValues ? { envValues } : {}), + }) + } else { + await api.post('/integrations/custom', { + slug: form.slug, + displayName: form.displayName, + description: form.description, + category: form.category, + envKeys: envKeyNames, + ...(hasEnvValues ? { envValues } : {}), + }) + } + onSaved(hasEnvValues) + onClose() + } catch (e: unknown) { + const msg = e instanceof Error ? e.message : 'Error saving' + setErrors({ displayName: msg }) + } finally { + setSaving(false) + } + } + + const addEnvRow = () => { + setField('envKeys', [...form.envKeys, { name: '', value: '' }]) + } + + const removeEnvRow = (idx: number) => { + setField('envKeys', form.envKeys.filter((_, i) => i !== idx)) + setVisibleRows((prev) => { + const next = new Set(prev) + next.delete(idx) + return next + }) + } + + const updateEnvRow = (idx: number, field: 'name' | 'value', val: string) => { + const next = form.envKeys.map((r, i) => + i === idx ? { ...r, [field]: field === 'name' ? val.toUpperCase() : val } : r + ) + setField('envKeys', next) + } + + const toggleRowVisibility = (idx: number) => { + setVisibleRows((prev) => { + const next = new Set(prev) + if (next.has(idx)) next.delete(idx) + else next.add(idx) + return next + }) + } + + if (!open) return null + + return ( +
+
+
+
+

+ {isEdit ? 'Edit Custom Integration' : 'New Custom Integration'} +

+ +
+ +
+
+ + setField('displayName', e.target.value)} + placeholder="My Custom API" + className="w-full rounded-lg border border-[#21262d] bg-[#161b22] px-3 py-2 text-sm text-[#e6edf3] placeholder-[#3F3F46] focus:outline-none focus:border-[#00FFA7]/50 transition-colors" + /> + {errors.displayName &&

{errors.displayName}

} +
+ +
+ +
+ custom-int- + { + setSlugManual(true) + setField('slug', e.target.value) + }} + disabled={isEdit} + placeholder="my-api" + className="flex-1 bg-transparent px-1 py-2 text-sm text-[#e6edf3] placeholder-[#3F3F46] focus:outline-none disabled:opacity-50" + /> +
+ {errors.slug &&

{errors.slug}

} +
+ +
+ +