// Reusable Content Pages for LineageLens

function ContentLayout({ title, eyebrow, accentColor, children }) {
  return (
    <div style={{ paddingTop: 120, paddingBottom: 120, minHeight: '80vh' }}>
      <div className="container">
        <div style={{ maxWidth: 800, margin: '0 auto' }}>
          {eyebrow && (
            <div className="eyebrow" style={{ '--accent': accentColor, marginBottom: 16 }}>
              <span className="dot" style={{ background: accentColor }} />
              {eyebrow.toUpperCase()}
            </div>
          )}
          <h1 style={{ fontSize: 48, marginBottom: 40, letterSpacing: '-0.03em' }}>{title}</h1>
          <div className="content-body" style={{ fontSize: 16, lineHeight: 1.7, color: 'var(--text)', whiteSpace: 'pre-line' }}>
            {children}
          </div>
        </div>
      </div>
    </div>
  );
}

const PAGES = {
  changelog: {
    title: 'Changelog',
    eyebrow: 'Product Updates',
    accentColor: 'var(--solo)',
    content: `Every release of LineageLens — the VS Code extension, the npm CLI, the backend API, and the proxy — is documented here in full detail. We ship continuously and we believe in transparent versioning: every breaking change is called out explicitly, every new capture field is listed, every bug fix includes the root cause. If something in your audit trail looks different after an update, the answer is here.

Subscribe to the RSS feed or watch the GitHub repository to get notified the moment a new version ships. We follow semantic versioning strictly: patch releases are safe to auto-update, minor releases add features without breaking existing data, major releases require a migration step that is always documented with a one-command script.`
  },
  status: {
    title: 'Status',
    eyebrow: 'System Health',
    accentColor: 'var(--team)',
    content: `Live operational status for all LineageLens infrastructure: the npm registry package availability, the Docker Hub images for the Plus and Max backend and proxy, the browser dashboard health endpoint, and the proxy capture pipeline. 

Each component shows current status (Operational / Degraded / Outage), rolling 90-day uptime percentage, and a log of every incident with root cause and resolution time. If your lineagelens start command is hanging, your dashboard isn't loading, or your proxy is dropping captures, check here first before opening a support ticket. We update this page in real time during incidents and post a full post-mortem within 48 hours of resolution.`
  },
  documentation: {
    title: 'Documentation',
    eyebrow: 'Resources',
    accentColor: 'var(--solo)',
    content: `Everything you need to install, configure, and operate LineageLens — from a five-minute quickstart to a full reference of every configuration option, environment variable, and API endpoint. 

The documentation is organized by tier: Base users start with the VS Code extension install guide; Plus and Max users follow the CLI quickstart that takes you from zero to a running dashboard in under three minutes. 

Beyond setup, the docs cover the proxy architecture (how LineageLens sits transparently between your AI tools and providers without modifying any requests), the capture data model (exactly what fields are recorded for every AI interaction), the risk scoring algorithm (how CRITICAL vs HIGH vs LOW is calculated), and the compliance export format (field-by-field breakdown of the audit JSON). If you are evaluating LineageLens for a security or compliance requirement, the documentation is the right place to start your technical due diligence.`
  },
  api: {
    title: 'API Reference',
    eyebrow: 'Developer Tools',
    accentColor: 'var(--team)',
    content: `Complete reference for the LineageLens REST API, available in Plus and Max. Every endpoint is documented with its full request schema, response schema, authentication requirements, example curl commands, and error codes. 

Key endpoints include: POST /search for semantic search across captured interactions, GET /provenance/{uuid} to fetch the full record for a single AI interaction, POST /insights/dashboard to pull aggregated metrics for any time window, GET /export/audit to generate a compliance-ready export, POST /team/invite to add team members, and GET /health for liveness checks. 

The API uses JWT authentication with a short-lived access token and a longer-lived refresh token. All tokens are scoped to a single workspace. The API is designed to be queried directly from CI/CD pipelines, SIEM tools, and compliance dashboards — not just from the browser UI.`
  },
  proxy: {
    title: 'Proxy Setup',
    eyebrow: 'Integration',
    accentColor: 'var(--ent)',
    content: `LineageLens captures AI interactions by running a transparent proxy between your AI tools and their upstream providers (OpenAI, Anthropic, Azure OpenAI, and any OpenAI-compatible endpoint). 

This page explains exactly how the proxy works, how to point your tools at it, and what it does and does not intercept. The proxy runs at localhost:8788 and requires a single environment variable change in your AI tool: set the base URL to the proxy address. The proxy forwards every request unmodified to the real provider and records the prompt, response, model, latency, and token count to the LineageLens backend — with zero added latency overhead in the capture path. 

This guide covers setup for Cursor, GitHub Copilot, Continue.dev, and any tool that accepts a custom API base URL, plus how to verify the proxy is capturing correctly using the dashboard's live feed.`
  },
  compliance: {
    title: 'Compliance Brief',
    eyebrow: 'Security & Legal',
    accentColor: 'var(--ent)',
    content: `A plain-language technical summary of what LineageLens captures, how it stores data, and how it maps to common compliance frameworks — written for security reviewers, compliance officers, and legal teams evaluating LineageLens for enterprise deployment. 

Covers: the full data model of a captured AI interaction (what is stored, where, for how long, and who can access it); how LineageLens supports SOC 2 Type II evidence collection for AI-related controls; how the audit export maps to ISO 27001 Annex A controls; GDPR considerations for teams with EU developers (prompt content is stored locally in your own Postgres instance — LineageLens never has access to your data); and the security architecture of the proxy (TLS passthrough, no MITM on the provider connection, secrets stored in ~/.lineagelens/ with restricted file permissions). 

This brief is designed to be handed directly to your security team or external auditor.`
  },
  blog: {
    title: 'Blog',
    eyebrow: 'Thoughts & Tutorials',
    accentColor: 'var(--solo)',
    content: `Thinking out loud about AI governance, code provenance, and what it actually means to run AI responsibly in a software team. 

We write about real incidents — the kind where an AI-generated change made it to production and nobody could explain why — and what the engineering response looks like. We cover the evolving landscape of AI compliance requirements: what SOC 2 auditors are starting to ask about AI-generated code, how ISO 27001 is adapting, and what "AI Bill of Materials" means in practice. 

We write tutorials on querying the LineageLens API from your existing observability stack, building custom risk dashboards, and integrating AI provenance data into your CI/CD pipeline as a quality gate. No growth-hacking content, no fluff. Just the technical reality of governing AI in a production engineering organization.`
  },
  about: {
    title: 'About',
    eyebrow: 'Company',
    accentColor: 'var(--solo)',
    content: `LineageLens exists because AI coding assistants are now writing production code, and the engineering industry has no standard way to track what they wrote, why, or what happened as a result. We built LineageLens after watching teams struggle to answer basic post-incident questions — which AI tool generated this function, which developer accepted it, which model version was running that week — questions that have clear answers in the git history for human-written code but are completely invisible for AI-generated code today. 

LineageLens is a solo-built product, which means every design decision is made by someone who has personally felt this problem. We are not a VC-funded team optimizing for growth metrics. We are building the tool we needed and making it available to teams who need it too. The product is honest about what it captures, transparent about its limitations, and priced so that a single developer can afford it on a personal card before they ever have to justify it to a finance team.`
  },
  security: {
    title: 'Security',
    eyebrow: 'Company',
    accentColor: 'var(--team)',
    content: `Security is not a feature of LineageLens — it is the prerequisite. We are a tool that handles sensitive engineering data: prompts that may contain proprietary code, API keys accidentally pasted into a chat, architectural decisions made in a conversation with an AI. We take that responsibility seriously. 

This page covers the full security architecture: how secrets are generated and stored locally (never transmitted to LineageLens servers), how the proxy handles TLS (passthrough — we never terminate your connection to the AI provider), how JWT tokens are scoped and rotated, how the Postgres database is secured in the Plus/Max Docker deployment, and our responsible disclosure policy for security researchers who find vulnerabilities. 

We also cover what LineageLens does NOT do: we do not store your data in our cloud, we do not have access to your prompts, and we do not phone home with telemetry beyond anonymous version-check pings that can be disabled in one environment variable.`
  },
  privacy: {
    title: 'Privacy',
    eyebrow: 'Company',
    accentColor: 'var(--ent)',
    content: `LineageLens is architecturally private by design. In Plus and Max, your captured AI interactions are stored in a Postgres database running in Docker on your own machine or your own server — LineageLens the company has zero access to that data. The proxy runs locally. The dashboard runs locally. Nothing leaves your network unless you explicitly use the compliance export and send it somewhere yourself. 

For the Base VS Code extension, captured records are stored in VS Code's local storage on the developer's machine. This privacy policy covers: what data the VS Code extension stores and where, what data the Plus/Max backend stores and where, the one category of data that does touch our servers (license key validation for paid plans, which contains only your email address and a workspace identifier), how to delete your data entirely, and your rights under GDPR and CCPA. 

If your legal team needs a Data Processing Agreement for enterprise procurement, contact us and we will turn one around within 48 hours.`
  },
  terms: {
    title: 'Terms of Service',
    eyebrow: 'Company',
    accentColor: 'var(--text-muted)',
    content: `The terms of service for LineageLens — plain language where possible, legalese only where necessary. 

Key points: Base is free forever with no usage limits. Paid plans (Plus and Max) are billed monthly and can be cancelled at any time with no penalty; cancellation takes effect at the end of the current billing period and your data remains accessible until then. You own your data — LineageLens makes no claim on the prompts, responses, or provenance records captured by your installation. We do not sell your data. We do not use your captured data to train models. 

The software is provided as-is for the Base tier; Plus and Max include a commercially reasonable uptime commitment for the Docker images and backend API. Misuse of the proxy to circumvent AI provider terms of service is prohibited and will result in account termination. 

Full legal terms follow below.`
  }
};

function ContentRenderer({ pageId }) {
  const page = PAGES[pageId];
  if (!page) return <ContentLayout title="Not Found">Page not found.</ContentLayout>;
  
  return (
    <ContentLayout title={page.title} eyebrow={page.eyebrow} accentColor={page.accentColor}>
      {page.content}
    </ContentLayout>
  );
}

Object.assign(window, { ContentLayout, ContentRenderer });
