254 lines
27 KiB
SQL
254 lines
27 KiB
SQL
--
|
|
-- PostgreSQL database dump
|
|
--
|
|
|
|
\restrict 5381xbeZ2vw2jRRWovquyTeURjFjgi44iJV2qUglA1SOb6qlHfJXZ5NrEeEFsYD
|
|
|
|
-- Dumped from database version 17.6 (Debian 17.6-2.pgdg13+1)
|
|
-- Dumped by pg_dump version 17.6 (Debian 17.6-2.pgdg13+1)
|
|
|
|
SET statement_timeout = 0;
|
|
SET lock_timeout = 0;
|
|
SET idle_in_transaction_session_timeout = 0;
|
|
SET transaction_timeout = 0;
|
|
SET client_encoding = 'UTF8';
|
|
SET standard_conforming_strings = on;
|
|
SELECT pg_catalog.set_config('search_path', '', false);
|
|
SET check_function_bodies = false;
|
|
SET xmloption = content;
|
|
SET client_min_messages = warning;
|
|
SET row_security = off;
|
|
|
|
SET default_tablespace = '';
|
|
|
|
SET default_table_access_method = heap;
|
|
|
|
--
|
|
-- Name: opc; Type: TABLE; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE TABLE public.opc (
|
|
id uuid DEFAULT gen_random_uuid() NOT NULL,
|
|
number character varying(20) NOT NULL,
|
|
title character varying(500) NOT NULL,
|
|
description text DEFAULT ''::text NOT NULL,
|
|
type character varying(50) DEFAULT 'General'::character varying NOT NULL,
|
|
status character varying(50) DEFAULT 'New'::character varying NOT NULL,
|
|
priority character varying(20) DEFAULT 'Medium'::character varying NOT NULL,
|
|
assignee character varying(200) DEFAULT ''::character varying NOT NULL,
|
|
created_at timestamp with time zone DEFAULT now() NOT NULL,
|
|
updated_at timestamp with time zone DEFAULT now() NOT NULL
|
|
);
|
|
|
|
|
|
ALTER TABLE public.opc OWNER TO postgres;
|
|
|
|
--
|
|
-- Name: opc_artifact; Type: TABLE; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE TABLE public.opc_artifact (
|
|
id uuid DEFAULT gen_random_uuid() NOT NULL,
|
|
opc_id uuid NOT NULL,
|
|
artifact_type character varying(50) NOT NULL,
|
|
title character varying(500) DEFAULT ''::character varying NOT NULL,
|
|
content text DEFAULT ''::text NOT NULL,
|
|
created_at timestamp with time zone DEFAULT now() NOT NULL,
|
|
updated_at timestamp with time zone DEFAULT now() NOT NULL
|
|
);
|
|
|
|
|
|
ALTER TABLE public.opc_artifact OWNER TO postgres;
|
|
|
|
--
|
|
-- Name: opc_note; Type: TABLE; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE TABLE public.opc_note (
|
|
id uuid DEFAULT gen_random_uuid() NOT NULL,
|
|
opc_id uuid NOT NULL,
|
|
author character varying(200) NOT NULL,
|
|
content text NOT NULL,
|
|
created_at timestamp with time zone DEFAULT now() NOT NULL
|
|
);
|
|
|
|
|
|
ALTER TABLE public.opc_note OWNER TO postgres;
|
|
|
|
--
|
|
-- Name: opc_pinned_commit; Type: TABLE; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE TABLE public.opc_pinned_commit (
|
|
opc_id uuid NOT NULL,
|
|
hash character varying(40) NOT NULL,
|
|
short_hash character varying(10) DEFAULT ''::character varying NOT NULL,
|
|
subject character varying(1000) DEFAULT ''::character varying NOT NULL,
|
|
author character varying(200) DEFAULT ''::character varying NOT NULL,
|
|
pinned_at timestamp with time zone DEFAULT now() NOT NULL,
|
|
pinned_by character varying(200) DEFAULT ''::character varying NOT NULL
|
|
);
|
|
|
|
|
|
ALTER TABLE public.opc_pinned_commit OWNER TO postgres;
|
|
|
|
--
|
|
-- Data for Name: opc; Type: TABLE DATA; Schema: public; Owner: postgres
|
|
--
|
|
|
|
COPY public.opc (id, number, title, description, type, status, priority, assignee, created_at, updated_at) FROM stdin;
|
|
cc52d1cc-5580-44e4-ba97-abb9244b04d3 OPC # 0001 Create Online Project Communication System Design and implement the OPC (Online Project Communication) system - the core tracking mechanism for change orders, tasks, QA, and business requirements across the Clarity platform. Includes the database schema (opc, opc_note, opc_artifact, opc_pinned_commit tables), the ControlPlane.Api OpcService and endpoints, and the full OpcPage.tsx UI with drawer, notes, artifacts, AI assist, and commit linking. Feature New High amadzarak 2026-04-25 17:41:49.187986+00 2026-04-25 17:41:49.187986+00
|
|
459000f4-da28-41d7-a6b7-525f3d51273c OPC # 0022 Git Commit Diff Viewer Implement a diff viewer drawer in the Control Plane that lets you inspect any commit's file changes inline. Accessible from the OPC Commits tab by clicking a commit hash. Uses LibGit2Sharp on the API side to read the diff and renders it in a styled drawer on the frontend. Feature New Medium amadzarak 2026-04-25 17:41:49.254562+00 2026-04-25 17:41:49.254562+00
|
|
ab2a1db3-20ac-4153-830c-360f8c7f5d80 OPC # 0023 Make OPC Landing Screen For Control Plane Set OPC as the default landing page in App.tsx (previously Deployments/Dashboard). Also fixed the title casing bug where .NET became .Net, and added data volume persistence and auto-schema migration for the controlplanedb Postgres container. Feature New Medium amadzarak 2026-04-25 17:41:49.261513+00 2026-04-25 17:41:49.261513+00
|
|
9232f83c-7c1a-45d0-88ac-708a186e2922 OPC # 0024 Environment Awareness in Control Plane Right now environment (prod/uat/fdev) is just a tag on a tenant card. Bake explicit environment context throughout the Control Plane - environment-scoped views, environment indicators on every action surface, and prevent cross-environment accidents (e.g. deploying a fdev image to prod). The deploy wizard, pipelines, and branch ladder should all be environment-aware. Feature New High amadzarak 2026-04-25 17:46:18.965319+00 2026-04-25 17:46:18.965319+00
|
|
b36c7607-7a59-43ce-9988-95b78292ef0b OPC # 0025 Infrastructure Health Dashboard Aspire provides a live health dashboard locally. Prod needs an equivalent in Control Plane: are containers running? Is Postgres accepting connections? What is the RabbitMQ queue depth? Are Vault and MinIO healthy? Build a real-time infra health view that surfaces container status, service reachability, and key metrics - replacing the need to SSH or check Docker directly in production. Feature New High amadzarak 2026-04-25 17:46:18.977691+00 2026-04-25 17:46:18.977691+00
|
|
b6a26529-c0d0-4b97-9639-fc93177fcdb1 OPC # 0026 Vault Secrets Management UI Vault runs locally via Aspire and is accessed directly in dev. In production, Control Plane should be the surface for secrets operations: rotate secrets, view active policies, grant/revoke access, and audit secret reads. Operators should never need to open the raw Vault UI. Integrates with the audit trail OPC. Feature New Medium amadzarak 2026-04-25 17:46:18.985147+00 2026-04-25 17:46:18.985147+00
|
|
6c4c8d28-28da-4869-96ea-e7886aa08250 OPC # 0027 Database Migration Gating in Deploy Wizard Currently MigrationService runs automatically on Aspire startup for local dev. In production, Control Plane should gate deployments behind a migration step - show pending migrations, require explicit approval to run them, and block the deploy if migrations fail. This replaces the fire-and-forget Aspire migration pattern with a controlled, visible prod workflow. Feature New Medium amadzarak 2026-04-25 17:46:18.991472+00 2026-04-25 17:46:18.991472+00
|
|
a10d126f-d56f-45b8-a369-9263ca5b6518 OPC # 0028 Tenant Rollback Action Aspire doesn't need rollback - you just restart. Production does. Add a rollback action per tenant that redeploys the previous known-good image. Should show the current image tag, the previous image tag, and require confirmation. Rollback event should be logged to the audit trail and optionally linked to an OPC. Feature New High amadzarak 2026-04-25 17:46:18.997405+00 2026-04-25 17:46:18.997405+00
|
|
0160ebc8-d64a-44d0-adb6-5d7ddb12ea73 OPC # 0029 Production Action Audit Trail Every significant action in Control Plane (deploy, rollback, config change, secret rotation, migration run) should be logged to an immutable audit trail - who did it, when, what changed, and optionally which OPC it was linked to. OPC already provides work-item traceability; this complements it with a low-level ops log. Store in Postgres, surface in the Control Plane UI with filtering by tenant, environment, and action type. Feature New High amadzarak 2026-04-25 17:46:19.004992+00 2026-04-25 17:46:19.004992+00
|
|
3ef08b4d-6ec6-4620-91e5-0a81da31e19b OPC # 0030 Audit.NET Configuration For a compliant application we need a robust audit logging system General New Medium amadzarak 2026-04-25 17:51:54.548141+00 2026-04-25 17:51:54.548141+00
|
|
3797e30f-0591-4aa9-a29c-fb0d875a7021 OPC # 0031 Gitea Container and Bootstrap Wizard Add Gitea as a persistent container in ControlPlane.AppHost alongside Postgres, RabbitMQ, Vault etc. Build a one-time Bootstrap Wizard screen in ControlPlane that handles the full Gitea setup: admin account creation (credentials stored in Vault), organisation creation, repository creation with develop as default branch, branch protection rules for master/uat/develop, webhook registration pointing to ControlPlane build trigger, and deploy key generation for the build pipeline. After bootstrap, ControlPlane holds the Gitea admin token in Vault and owns all further git server management. Gitea exposed at gitea.clarity.test via nginx/dnsmasq. Feature New High amadzarak 2026-04-25 17:59:58.429769+00 2026-04-25 17:59:58.429769+00
|
|
27c09a91-33f6-4a42-a3a3-f602728f9018 OPC # 0032 Git Workflow Integration in ControlPlane Integrate day-to-day git workflow into ControlPlane UI so operators never need to open Gitea directly. Includes: branch creation from an OPC (auto-named feature/OPC-XXXX-slug), PR list screen showing open PRs with status and diff viewer, branch ladder promotion wizard wired to Gitea merge API (feature -> develop -> uat -> master), release tag cutting from master auto-named from closed OPCs, and OPC card showing linked branch + PR status inline. Gitea is the engine; ControlPlane is the face. Feature New High amadzarak 2026-04-25 17:59:58.436163+00 2026-04-25 17:59:58.436163+00
|
|
e900e6ef-2c50-4309-b2ef-803dfaaae889 OPC # 0033 Platform Infrastructure Separation - ControlPlane vs Shared Platform Layer Currently ControlPlane.AppHost owns and spins up all infrastructure (Postgres, Keycloak, Vault, MinIO, Gitea, Nginx, RabbitMQ) as a single unit. This is acceptable for local dev but needs to be rethought before cloud deployment.\n\nThe correct production model is three distinct layers:\n\n1. PLATFORM INFRASTRUCTURE (pre-exists, lives independently): Shared Postgres server (keycloakdb, giteadb, clarityshareddb), Keycloak (auth for platform + shared-tier tenants), Vault (platform secrets + tenant namespaces), MinIO (object storage), Nginx (reverse proxy / wildcard gateway), Gitea (Git server, SDLC trigger).\n\n2. CONTROLPLANE (connects TO platform, does not own it): Only owns controlplanedb + RabbitMQ. ControlPlane.Api + ControlPlane.Worker connect to platform infra via ClarityInfraOptions config. Does not bootstrap platform services in production.\n\n3. TENANT INSTANCES (provisioned on demand by ControlPlane.Worker): Shared tier carves a logical slice (realm, schema, bucket, Vault path) out of Platform Infra. Dedicated tier provisions new containers on the platform network.\n\nWork items when ready: Split AppHost.cs into platform layer vs ControlPlane layer. Create a docker-compose.yml or Platform.AppHost for long-lived platform infra. Thin out ControlPlane.AppHost to only own controlplanedb + RabbitMQ. Ensure ClarityInfraOptions drives all platform URL config. Validate provisioning saga steps respect TenantTier (Shared/Isolated/Dedicated).\n\nRelated files: ControlPlane.AppHost/AppHost.cs, ControlPlane.Core/Config/ClarityInfraOptions.cs, ControlPlane.Core/Models/TenantTier.cs, ControlPlane.Worker/Steps/*.cs Tech Debt New Medium amadzarak 2026-04-25 18:50:23.462667+00 2026-04-25 18:50:23.462667+00
|
|
e8a62bdb-d4fb-4c65-8e09-a853b80f84f4 OPC # 0034 Separate ControlPlane into its own Gitea repository Currently both Clarity (the product) and ControlPlane (the ops brain) live in the same git repository: https://opc.clarity.test/Clarity/Clarity. This creates several critical problems:\n\n1. SELF-REFERENTIAL DEPLOYMENT: Gitea webhooks on the Clarity repo trigger ControlPlane CI/CD pipelines, but ControlPlane itself lives in that same repo. ControlPlane cannot manage its own deployments through the same trigger mechanism.\n\n2. MIXED GIT HISTORY: Product feature commits and platform ops commits share one log. No independent versioning or release cadence.\n\n3. DEPLOYMENT COUPLING: You cannot deploy Clarity without carrying ControlPlane code changes along for the ride and vice versa.\n\n4. CONCEPTUAL MISMATCH: ControlPlane is the thing that manages Clarity. Having it live inside Clarity is architecturally circular.\n\nTarget end state:\n- Repo 1: https://opc.clarity.test/Clarity/Clarity (product only: Clarity.AppHost, Clarity.Server, Clarity.MigrationService, Clarity.ServiceDefaults, clarity.controlplane folder, frontend)\n- Repo 2: https://opc.clarity.test/Clarity/ControlPlane (ops brain only: ControlPlane.Api, ControlPlane.Worker, ControlPlane.Core, ControlPlane.AppHost, ControlPlane.Gateway)\n\nMigration steps:\n- Create new ControlPlane repo on Gitea\n- Use git filter-repo or subtree split to extract ControlPlane history cleanly\n- Update all Gitea webhook targets\n- Bootstrap ControlPlane deployment separately (not through itself)\n- Remove ControlPlane projects from Clarity.slnx and Clarity repo\n\nRisk: Medium - git history surgery, webhook reconfiguration, potential shared config/ClientAssets path dependencies between the two solutions. Tech Debt New High amadzarak 2026-04-25 18:55:59.735921+00 2026-04-25 18:55:59.735921+00
|
|
8441c91d-a476-4bd5-b171-40dfe05e26ff OPC # 0035 Local VM Simulation for DedicatedVM Tier Testing using Vagrant + VirtualBox When implementing the DedicatedVM provisioning tier, use Vagrant + VirtualBox for local simulation before testing against a real cloud provider (Hetzner, etc.).\n\nSetup:\n- winget install Oracle.VirtualBox\n- winget install HashiCorp.Vagrant\n- Define a Vagrantfile that mirrors exactly what Pulumi would provision on a real VPS (Ubuntu 24.04, Docker installed, SSH key auth)\n\nGoal: Pulumi DedicatedVM saga step should target a Vagrant VM locally with zero code changes vs production - only the IP/host in config changes.\n\nVagrantfile should define:\n- Ubuntu 24.04\n- 2 CPUs, 2GB RAM, 20GB disk\n- Static private network IP (e.g. 192.168.56.10) for predictable Pulumi targeting\n- Docker pre-installed via shell provisioner\n- SSH key provisioned for ControlPlane.Worker to use\n\nThis gives clean slate testing: vagrant destroy && vagrant up = fresh VM in minutes.\n\nRelated: DedicatedVM tier implementation, PulumiStep in ControlPlane.Worker saga, OPC #0033 (platform infra separation) Tech Debt New Low amadzarak 2026-04-25 19:18:08.56663+00 2026-04-25 19:18:08.56663+00
|
|
72340872-e8eb-4a68-88f9-10f0f8a32c92 OPC # 0036 The Full Component Mode Options ├── Postgres → [ Shared Platform | Own Container | Bundled | Own VPS (Docker) | Own VPS (Bare Metal) ]\n├── Keycloak → [ Shared Platform | Own Container | Bundled | Own VPS (Docker) | Own VPS (Bare Metal) ]\n├── Vault → [ Shared Platform | Own Container | Bundled | Own VPS (Docker) | Own VPS (Bare Metal) ]\n└── MinIO → [ Shared Platform | Own Container | Bundled | Own VPS (Docker) | Own VPS (Bare Metal) ] General New Medium amadzarak 2026-04-25 19:23:02.142866+00 2026-04-25 19:23:02.142866+00
|
|
67029a11-dbc9-486e-9b73-0e7778cd2db6 OPC # 0037 Cherry-Pick Promotion Model - OPC-to-Commit Environment Chain Tracking Current PromotionRequest model is branch-to-branch (merge style). Need to add cherry-pick support to match GSI/TFS changeset promotion workflow.\n\nGSI workflow being replicated:\n- Each OPC has associated changesets\n- OPC page shows which environments each changeset has reached (fdev/uat/prod)\n- Promotion = cherry-pick specific commit from one environment branch to the next\n- Can track exactly how far along the chain each OPC codes has gone\n\nChanges needed:\n1. PromotionRequest.cs - add fields:\n - OpcNumber (string?) - which OPC this promotion belongs to\n - CommitSha (string?) - specific commit to cherry-pick (null = full branch merge)\n - Mode (PromotionMode enum) - CherryPick | BranchMerge\n - TargetEnvironment (string?) - fdev | uat | prod\n\n2. OPC model - add environment chain tracking:\n - Track which commits are in which environments per OPC\n - Equivalent of TFS changeset chain view on OPC page\n\n3. PromotionService.cs - implement actual cherry-pick via LibGit2Sharp\n - Cherry-pick CommitSha onto TargetEnvironment branch\n - Update OPC environment chain on success\n\n4. BranchPage.tsx - already exists as the branch ladder view, wire up cherry-pick promotions\n\n5. OpcPage.tsx - show per-OPC environment chain (fdev ✅ / uat ✅ / prod ❌)\n\nRelated files: ControlPlane.Core/Models/PromotionRequest.cs, ControlPlane.Api/Services/PromotionService.cs, ControlPlane.Api/Endpoints/PromotionEndpoints.cs, clarity.controlplane/src/pages/BranchPage.tsx, ControlPlane.Core/Models/OpcModels.cs Feature New High amadzarak 2026-04-25 20:33:26.921638+00 2026-04-25 20:33:26.921638+00
|
|
d682d570-f4da-4f25-91d8-bfd6c87983d2 OPC # 0038 Self-Contained ControlPlane Single Container / One-Click Boot ControlPlane currently requires Aspire orchestration to boot (opc-postgres, RabbitMQ, Gitea, API, Worker, UI all wired via AppHost). For cloud deployment and simplicity, explore packaging ControlPlane as a single self-contained unit that boots with one command - no Aspire required in production.\n\nOptions to explore:\n1. docker-compose.yml for ControlPlane itself (opc-postgres + rabbitmq + gitea + api + worker + UI nginx) - mirrors what Aspire does locally but as a compose file for production\n2. Single container with supervisord running all processes (heavier but truly one container)\n3. Keep Aspire for local dev, use compose for production deployment\n\nGoal: `docker compose up -d` in the ControlPlane repo = entire platform is running. No dotnet tooling required on the server.\n\nRelated: OPC #0033 (platform infra separation), OPC #0034 (repo separation) Feature New Medium amadzarak 2026-04-25 20:36:03.918009+00 2026-04-25 20:36:03.918009+00
|
|
8efcf2fb-91a7-4a0e-a6f9-a2626563a6be OPC # 0039 Extract Nginx + Dnsmasq into standalone /gateway unit Nginx and Dnsmasq should be extracted out of /infra into their own standalone compose unit (e.g. /gateway or /clarity-gateway). These services are not platform-layer concerns - they are the ingress and DNS gateway for the entire Clarity operation, sitting above all layers (OPC, client sites, internal tooling). Keeping them in /infra couples the gateway lifecycle to the platform stack lifecycle.\n\nGoals:\n- Separate failure domain: OPC or platform infra can go down without affecting client site routing\n- Cleaner deploys: Nginx rule changes for new client provisioning do not require touching /infra at all\n- Treat gateway as infrastructure-of-infrastructure - near-zero downtime, almost never redeployed\n\nScope:\n- Create /gateway/docker-compose.yml with just nginx + dnsmasq\n- Move nginx/conf.d and dnsmasq config bind mounts to /gateway\n- Remove nginx and dnsmasq from /infra/docker-compose.yml\n- Decide on config management pattern for conf.d (file-based bind mount vs API-driven)\n- Update InfraEndpoints.cs / InfraPage.tsx if gateway services need separate status tracking\n- Consider a dedicated GatewayEndpoints.cs for gateway-specific compose up/down/status Tech Debt New High amadzarak 2026-04-25 20:51:38.333162+00 2026-04-25 20:51:38.333162+00
|
|
\.
|
|
|
|
|
|
--
|
|
-- Data for Name: opc_artifact; Type: TABLE DATA; Schema: public; Owner: postgres
|
|
--
|
|
|
|
COPY public.opc_artifact (id, opc_id, artifact_type, title, content, created_at, updated_at) FROM stdin;
|
|
d3cac504-7d5d-4bf1-bbed-654727080767 e8a62bdb-d4fb-4c65-8e09-a853b80f84f4 Spec Why It's Not Just "Move Some Folders" 1. 📁 Path Dependencies (the obvious one)\nClientAssets, .gitkeep, VaultData, KeycloakConfig are all referenced via Path.GetFullPath(../...) relative to the repo root. ControlPlane in the cloud has no filesystem relationship to the Clarity repo anymore.\n2. 🐳 The Docker Socket Problem (the big one)\nClarityContainerService provisions tenant containers by talking to the local Docker daemon. It spins up clarity-net containers and writes nginx configs to a bind-mounted folder. This only works if ControlPlane.Worker is running on the same host as the Docker containers it's managing. Move ControlPlane to a cloud server and it needs to reach a remote Docker host — or you change the model entirely.\n3. 🌐 Network Topology\nRight now everything assumes clarity-net — a local Docker bridge network. Tenant containers, Nginx, Vault, Keycloak all talk to each other over it. In production, what is clarity-net? A VPS Docker network? A VPN? Kubernetes?\n4. 🔑 Secrets & Vault\nWorker reads init.json (the unseal keys) from a local file path. Cloud-hosted ControlPlane needs a different secret bootstrap strategy.\n---\nThe Core Question\nWhen ControlPlane is cloud-hosted and provisions a new tenant — where do those tenant containers actually run?\n•\tOn the same cloud server as ControlPlane? (simplest)\n•\tOn a separate dedicated host per client?\n•\tEventually Kubernetes?\nThat answer determines everything else about how the networking and provisioning model needs to change. What's your thinking there? 2026-04-25 19:01:55.475537+00 2026-04-25 19:01:55.475537+00
|
|
969b3677-4987-43df-8550-b1ae59aab013 e8a62bdb-d4fb-4c65-8e09-a853b80f84f4 BusinessRequirement The Clarity Tenant Tier Spectrum 🥉 Shared (Base)\n•\tPostgres: Shared server, CREATE DATABASE clarity_{tenant}\n•\tKeycloak: Shared instance, dedicated realm\n•\tVault: Shared instance, dedicated namespace/path\n•\tMinIO: Shared instance, dedicated bucket\n•\tApp: Clarity.Server container on shared Docker host\n•\tInfra managed by: ControlPlane carving logical slices\n🥈 Dedicated Container\n•\tPostgres: Own container, still on ControlPlane's Docker host\n•\tKeycloak: TBD — own container or still shared?\n•\tVault: TBD — own container or still shared?\n•\tMinIO: TBD\n•\tApp: Own Clarity.Server container\n•\tInfra managed by: ControlPlane spinning up containers remotely\n🥇 Dedicated VM (Docker)\n•\tPostgres: Own container but running on a provisioned VM\n•\tEntire Clarity stack: Docker containers on that VM\n•\tInfra managed by: ControlPlane needs to provision a VM first, then deploy to it\n•\tThis is where Pulumi enters — VM creation is IaC territory\n💎 Bare Metal\n•\tPostgres: Native OS install, no Docker\n•\tEntire Clarity stack: Running as OS processes/services\n•\tInfra managed by: Pulumi + something like Ansible for process management\n•\tNo containers whatsoever\n---\nWhere Pulumi Fits\nPulumi starts mattering the moment you cross the VM boundary — anything below Dedicated Container tier requires:\n1.\tProvisioning compute (VM, bare metal)\n2.\tConfiguring networking (firewall, DNS, VPN)\n3.\tInstalling runtime dependencies (Docker, Postgres, .NET runtime)\n4.\tDeploying the stack onto it\nThat's exactly what Pulumi does — it's infrastructure-as-code in actual C#, which means ControlPlane.Worker could literally call Pulumi programs as part of the provisioning saga.\n---\nThe Questions You Need to Answer Per Tier\nFor each tier, you need to define:\nQuestion\tShared\tDedicated Container\tDedicated VM\tBare Metal\nWhere does compute live?\tControlPlane host\tControlPlane host\tNew VM\tPhysical server\nWho provisions the VM?\tN/A\tN/A\tPulumi\tPulumi/manual\nKeycloak shared or own?\tShared\t?\tOwn\tOwn\nVault shared or own?\tShared\t?\tOwn\tOwn\nMinIO shared or own?\tShared\t?\tOwn\tOwn\nHow does ControlPlane reach it?\tLocal Docker\tLocal Docker\tSSH/Docker remote\tSSH\n---\nThe Dedicated Container tier has the most ambiguity right now — specifically whether Keycloak/Vault/MinIO follow Postgres into isolation or stay shared. That decision defines how complex the middle tier saga needs to be.\nWhat's your instinct on that middle tier — fully isolated containers for everything, or just Postgres + App?\n 2026-04-25 19:05:15.517737+00 2026-04-25 19:05:15.517737+00
|
|
\.
|
|
|
|
|
|
--
|
|
-- Data for Name: opc_note; Type: TABLE DATA; Schema: public; Owner: postgres
|
|
--
|
|
|
|
COPY public.opc_note (id, opc_id, author, content, created_at) FROM stdin;
|
|
7c85ac30-dfb2-47ab-9102-1230477c7dd1 27c09a91-33f6-4a42-a3a3-f602728f9018 amadzarak I migrated all ControlPlane projects to a seperate .slnx called ControlPlane.slnx 2026-04-25 18:41:26.931768+00
|
|
\.
|
|
|
|
|
|
--
|
|
-- Data for Name: opc_pinned_commit; Type: TABLE DATA; Schema: public; Owner: postgres
|
|
--
|
|
|
|
COPY public.opc_pinned_commit (opc_id, hash, short_hash, subject, author, pinned_at, pinned_by) FROM stdin;
|
|
\.
|
|
|
|
|
|
--
|
|
-- Name: opc_artifact opc_artifact_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_artifact
|
|
ADD CONSTRAINT opc_artifact_pkey PRIMARY KEY (id);
|
|
|
|
|
|
--
|
|
-- Name: opc_note opc_note_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_note
|
|
ADD CONSTRAINT opc_note_pkey PRIMARY KEY (id);
|
|
|
|
|
|
--
|
|
-- Name: opc opc_number_key; Type: CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc
|
|
ADD CONSTRAINT opc_number_key UNIQUE (number);
|
|
|
|
|
|
--
|
|
-- Name: opc_pinned_commit opc_pinned_commit_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_pinned_commit
|
|
ADD CONSTRAINT opc_pinned_commit_pkey PRIMARY KEY (opc_id, hash);
|
|
|
|
|
|
--
|
|
-- Name: opc opc_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc
|
|
ADD CONSTRAINT opc_pkey PRIMARY KEY (id);
|
|
|
|
|
|
--
|
|
-- Name: ix_opc_artifact_opc_id; Type: INDEX; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE INDEX ix_opc_artifact_opc_id ON public.opc_artifact USING btree (opc_id);
|
|
|
|
|
|
--
|
|
-- Name: ix_opc_artifact_type; Type: INDEX; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE INDEX ix_opc_artifact_type ON public.opc_artifact USING btree (opc_id, artifact_type);
|
|
|
|
|
|
--
|
|
-- Name: ix_opc_note_opc_id; Type: INDEX; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE INDEX ix_opc_note_opc_id ON public.opc_note USING btree (opc_id);
|
|
|
|
|
|
--
|
|
-- Name: ix_opc_number; Type: INDEX; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE INDEX ix_opc_number ON public.opc USING btree (number);
|
|
|
|
|
|
--
|
|
-- Name: ix_opc_pinned_commit_opc_id; Type: INDEX; Schema: public; Owner: postgres
|
|
--
|
|
|
|
CREATE INDEX ix_opc_pinned_commit_opc_id ON public.opc_pinned_commit USING btree (opc_id);
|
|
|
|
|
|
--
|
|
-- Name: opc_artifact opc_artifact_opc_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_artifact
|
|
ADD CONSTRAINT opc_artifact_opc_id_fkey FOREIGN KEY (opc_id) REFERENCES public.opc(id) ON DELETE CASCADE;
|
|
|
|
|
|
--
|
|
-- Name: opc_note opc_note_opc_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_note
|
|
ADD CONSTRAINT opc_note_opc_id_fkey FOREIGN KEY (opc_id) REFERENCES public.opc(id) ON DELETE CASCADE;
|
|
|
|
|
|
--
|
|
-- Name: opc_pinned_commit opc_pinned_commit_opc_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres
|
|
--
|
|
|
|
ALTER TABLE ONLY public.opc_pinned_commit
|
|
ADD CONSTRAINT opc_pinned_commit_opc_id_fkey FOREIGN KEY (opc_id) REFERENCES public.opc(id) ON DELETE CASCADE;
|
|
|
|
|
|
--
|
|
-- PostgreSQL database dump complete
|
|
--
|
|
|
|
\unrestrict 5381xbeZ2vw2jRRWovquyTeURjFjgi44iJV2qUglA1SOb6qlHfJXZ5NrEeEFsYD
|
|
|