OPC # 0001: Extract OPC into standalone repo

This commit is contained in:
amadzarak
2026-04-25 17:26:42 -04:00
commit 42383bdc03
170 changed files with 21365 additions and 0 deletions
+12
View File
@@ -0,0 +1,12 @@
**/.vs
**/.git
**/.idea
**/bin
**/obj
**/node_modules
**/.env
**/npm-debug.log
**/.dockerignore
**/Dockerfile*
**/docker-compose*
ClientAssets/
+366
View File
@@ -0,0 +1,366 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Oo]ut/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
VaultData/
ClientAssets/
@@ -0,0 +1,14 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Messages;
using MassTransit;
namespace ControlPlane.Api.Consumers;
public sealed class ProvisioningProgressConsumer(SseEventBus bus) : IConsumer<ProvisioningProgressEvent>
{
public Task Consume(ConsumeContext<ProvisioningProgressEvent> context)
{
bus.Publish(context.Message);
return Task.CompletedTask;
}
}
+30
View File
@@ -0,0 +1,30 @@
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.AspNetCore.OpenApi" />
<PackageReference Include="Microsoft.Extensions.ServiceDiscovery" />
<PackageReference Include="MassTransit" />
<PackageReference Include="MassTransit.RabbitMQ" />
<PackageReference Include="Aspire.RabbitMQ.Client" />
<PackageReference Include="Docker.DotNet" />
<PackageReference Include="Npgsql" />
<PackageReference Include="LibGit2Sharp" />
<PackageReference Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" />
<PackageReference Include="OpenTelemetry.Extensions.Hosting" />
<PackageReference Include="OpenTelemetry.Instrumentation.AspNetCore" />
<PackageReference Include="OpenTelemetry.Instrumentation.Http" />
<PackageReference Include="OpenTelemetry.Instrumentation.Runtime" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Clarity.ServiceDefaults\Clarity.ServiceDefaults.csproj" />
<ProjectReference Include="..\ControlPlane.Core\ControlPlane.Core.csproj" />
</ItemGroup>
</Project>
+108
View File
@@ -0,0 +1,108 @@
-- =============================================================================
-- OPC Seed Script seeded from TODO.md backlog
-- Run against the ControlPlane database.
-- OPC # 0001 is already live; this starts at 0002.
-- =============================================================================
INSERT INTO opc (id, number, title, description, type, status, priority, assignee, created_at, updated_at)
VALUES
-- ── Keycloak / Auth ───────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0002',
'Fix KeycloakStep 401 on realm provisioning',
'KeycloakStep is the current blocker in the provisioning saga. The step returns 401 when attempting to create the tenant realm. Investigate the admin-client credentials, token scope, and the endpoint URL used inside the Docker network.',
'Bug',
'In Progress',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0003',
'KeycloakStep: full realm + user provisioning flow',
'After the 401 is resolved, implement the full flow: create realm {subdomain}.clarity.io, create the admin role, create the day-zero admin user from AdminEmail, assign the admin role, and trigger execute-actions-email (verify email + set password).',
'Feature',
'New',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0004',
'Keycloak JWT backchannel issuer cleanup',
'Keycloak advertises its issuer based on the incoming request URL. When the backchannel hits http://keycloak:8080 directly it returns http://keycloak.clarity.test:8080 as the issuer, forcing layered workarounds in ValidIssuers and the rewrite handler. Clean fix: boot Keycloak with KC_HOSTNAME_URL=https://keycloak.clarity.test, verify via /.well-known/openid-configuration, then simplify ValidIssuers back to two entries. Deferred until next planned maintenance window (requires nuke to apply env var).',
'Tech Debt',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── VaultStep ─────────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0005',
'VaultStep: read root token and write initial secrets',
'Read the root token from /vault/file/init.json, enable KV-v2 secrets engine at {subdomain}/, then write the initial secrets: DB connection string and Keycloak client secret.',
'Feature',
'New',
'High',
'amadzarak',
NOW(), NOW()
),
-- ── MigrationStep ─────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0006',
'MigrationStep: run EF Core migrations per provisioning mode',
'Wire up EF Core migrations inside MigrationStep for all three provisioning modes. Shared: run against the shared DB scoped to the tenant schema. Isolated: run against the dedicated Postgres container registered in SagaContext. Dedicated: run against the full dedicated Postgres instance.',
'Feature',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── HandoffStep ───────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0007',
'HandoffStep: send magic-link email and mark saga complete',
'Send a magic-link / welcome email to AdminEmail via SMTP or SendGrid, then mark CompletedSteps.HandoffSent on the provisioning job. Blocked until SMTP is wired (currently SendRequiredActionsEmailAsync is commented out in KeycloakStep.cs).',
'Feature',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── Observability ─────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0008',
'Stream tenant container logs into Aspire dashboard',
'Use the Docker SDK to tail fdev-app-clarity-* container logs and forward them to Aspire''s structured log stream. Currently these logs are only visible via docker logs on the host.',
'Feature',
'New',
'Low',
'amadzarak',
NOW(), NOW()
),
-- ── Kubernetes (backburner) ───────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0009',
'Kubernetes migration path evaluation',
'Currently managing containers directly via Docker.DotNet. Evaluate k8s when: scheduling across multiple nodes is needed, rolling deploys are required, or client count exceeds single-host capacity. Options: k3s (self-hosted), AKS/EKS (cloud), or keep Docker Compose per host for mid-scale. ClarityContainerService abstraction is intentional swap Docker.DotNet for a k8s client without changing the saga.',
'General',
'New',
'Low',
'amadzarak',
NOW(), NOW()
);
+119
View File
@@ -0,0 +1,119 @@
-- =============================================================================
-- OPC Seed Script 2 completed work from TODO.md
-- Run against the ControlPlane database.
-- Picks up numbering at 0010 (00010009 covered in seed_opc.sql).
-- =============================================================================
INSERT INTO opc (id, number, title, description, type, status, priority, assignee, created_at, updated_at)
VALUES
(
gen_random_uuid(),
'OPC # 0010',
'Aspire AppHost wired: Vault, MinIO, RabbitMQ, Postgres, Keycloak, Worker, API, UI',
'Full Aspire AppHost configuration completed. All infrastructure services (Vault, MinIO, RabbitMQ, Postgres, Keycloak) and application services (Worker, API, UI) are registered and wired in the AppHost project.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0011',
'Fix CRLF → LF on entrypoint.sh (was breaking Vault container)',
'entrypoint.sh had Windows-style CRLF line endings which caused the Vault container to fail on startup. Fixed by enforcing LF via .gitattributes.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0012',
'Vault initialises and unseals on first run',
'Vault container now correctly initialises (generates root token + unseal keys) and auto-unseals on first run. Init output is written to /vault/file/init.json.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0013',
'Pin Keycloak bootstrap password (fix persistent container password drift)',
'Keycloak was experiencing password drift between container restarts due to the bootstrap admin credentials not being pinned. Fixed by explicitly setting the admin password so it persists across restarts.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0014',
'Fix Keycloak endpoint name: tcp → http',
'Keycloak Aspire resource was registered with a tcp endpoint name instead of http, causing service discovery failures. Renamed to http to align with the rest of the stack.',
'Bug',
'Done',
'Medium',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0015',
'Worker starts and correctly waits for all dependencies',
'The Worker service was starting before infrastructure dependencies were healthy. Implemented proper wait/health-check logic so the Worker blocks until Postgres, Keycloak, Vault, RabbitMQ, and MinIO are all ready.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0016',
'MassTransit saga pipeline with compensation',
'Implemented the full MassTransit-based provisioning saga with forward steps and compensating transactions. Each step registers its rollback so a mid-saga failure cleanly tears down any already-provisioned resources.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0017',
'SSE progress stream: Worker → RabbitMQ → API → browser',
'Implemented a real-time Server-Sent Events pipeline. The Worker publishes step progress to RabbitMQ, the API consumes and streams events via SSE, and the browser receives live updates without polling.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0018',
'Frontend Diagnostics tab with full stack traces from worker',
'Added a Diagnostics tab to the frontend that displays structured error messages and full stack traces forwarded from the Worker service, making provisioning failures debuggable directly in the UI.',
'Feature',
'Done',
'Medium',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0019',
'Enforce LF line endings for *.sh and *.hcl via .gitattributes',
'Added .gitattributes rules to enforce LF line endings for all *.sh and *.hcl files. Prevents CRLF issues from reappearing when contributors commit from Windows machines.',
'Tech Debt',
'Done',
'Low',
'amadzarak',
NOW(), NOW()
);
+203
View File
@@ -0,0 +1,203 @@
using ControlPlane.Core.Models;
using LibGit2Sharp;
namespace ControlPlane.Api.Endpoints;
public static class GitEndpoints
{
public static IEndpointRouteBuilder MapGitEndpoints(this IEndpointRouteBuilder app)
{
app.MapGet("/api/git/log", GetLog);
app.MapGet("/api/git/commits/{hash}", GetCommit);
app.MapGet("/api/git/branches", GetBranches);
app.MapGet("/api/git/branch-coverage", GetBranchCoverage);
return app;
}
// GET /api/git/log?grep=OPC+%23+0001&limit=50
private static IResult GetLog(
IConfiguration config,
string? grep = null,
int limit = 50)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository. Set Git:RepoRoot in appsettings.");
using var repo = new Repository(repoPath);
var tips = repo.Branches
.Where(b => b.Tip != null)
.Select(b => (GitObject)b.Tip)
.ToList();
var filter = new CommitFilter
{
SortBy = CommitSortStrategies.Topological | CommitSortStrategies.Time,
IncludeReachableFrom = tips.Count > 0 ? tips : (object)repo.Head,
};
IEnumerable<Commit> query = repo.Commits.QueryBy(filter);
if (!string.IsNullOrWhiteSpace(grep))
query = query.Where(c => c.Message.Contains(grep, StringComparison.OrdinalIgnoreCase));
var commits = query
.Take(limit)
.Select(c => ToGitCommit(repo, c))
.ToList();
return Results.Ok(commits);
}
// GET /api/git/commits/{hash}
private static IResult GetCommit(string hash, IConfiguration config)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var commit = repo.Lookup<Commit>(hash);
if (commit is null) return Results.NotFound();
var parentTree = commit.Parents.FirstOrDefault()?.Tree;
var changes = repo.Diff.Compare<TreeChanges>(parentTree, commit.Tree);
var patch = repo.Diff.Compare<Patch>(parentTree, commit.Tree);
var files = changes.Select(c => new
{
path = c.Path,
oldPath = c.OldPath,
status = c.Status.ToString(),
additions = patch[c.Path]?.LinesAdded ?? 0,
deletions = patch[c.Path]?.LinesDeleted ?? 0,
patch = patch[c.Path]?.Patch ?? string.Empty,
}).ToList();
return Results.Ok(new
{
hash = commit.Sha,
shortHash = commit.Sha[..7],
author = commit.Author.Name,
email = commit.Author.Email,
date = commit.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
subject = commit.MessageShort,
body = commit.Message,
files,
});
}
// GET /api/git/branches
private static IResult GetBranches(IConfiguration config)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var branches = repo.Branches
.Where(b => !b.IsRemote && b.Tip != null)
.Select(b => new
{
name = b.FriendlyName,
hash = b.Tip.Sha,
shortHash = b.Tip.Sha[..7],
subject = b.Tip.MessageShort,
author = b.Tip.Author.Name,
date = b.Tip.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
isHead = b.IsCurrentRepositoryHead,
})
.OrderBy(b => b.name)
.ToList();
return Results.Ok(branches);
}
// GET /api/git/branch-coverage?commits=hash1,hash2,hash3
// Returns each local branch and whether it contains ALL of the given commits.
private static IResult GetBranchCoverage(IConfiguration config, string? commits = null)
{
if (string.IsNullOrWhiteSpace(commits)) return Results.Ok(Array.Empty<object>());
var hashes = commits.Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries);
if (hashes.Length == 0) return Results.Ok(Array.Empty<object>());
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var targetCommits = hashes
.Select(h => repo.Lookup<Commit>(h))
.Where(c => c is not null)
.ToList();
if (targetCommits.Count == 0) return Results.Ok(Array.Empty<object>());
var result = repo.Branches
.Where(b => !b.IsRemote && b.Tip != null)
.Select(b =>
{
var contains = targetCommits.All(tc =>
{
// If merge base of branch tip and target == target, then target is an ancestor
var mergeBase = repo.ObjectDatabase.FindMergeBase(b.Tip, tc!);
return mergeBase?.Sha == tc!.Sha;
});
return new
{
branch = b.FriendlyName,
contains,
tipHash = b.Tip.Sha[..7],
isHead = b.IsCurrentRepositoryHead,
};
})
.OrderBy(b => b.branch)
.ToList();
return Results.Ok(result);
}
// ── Helpers ───────────────────────────────────────────────────────────────
/// Resolves the repo root: explicit config overrides, otherwise auto-discover
/// from the running assembly directory upward via LibGit2Sharp.
private static string? ResolveRepo(IConfiguration config)
{
var configured = config["Git:RepoRoot"] ?? config["Docker:RepoRoot"];
if (!string.IsNullOrWhiteSpace(configured) && Directory.Exists(configured))
return configured;
// Auto-discover: walk up from the app's own directory
var startPath = AppContext.BaseDirectory;
var discovered = Repository.Discover(startPath);
if (discovered is null) return null;
// Repository.Discover returns the .git directory path; get the working dir
using var probe = new Repository(discovered);
return probe.Info.WorkingDirectory;
}
private static GitCommit ToGitCommit(Repository repo, Commit c)
{
string[] files;
try
{
var parentTree = c.Parents.FirstOrDefault()?.Tree;
var changes = repo.Diff.Compare<TreeChanges>(parentTree, c.Tree);
files = changes.Select(ch => ch.Path).ToArray();
}
catch { files = []; }
return new GitCommit(
Hash: c.Sha,
ShortHash: c.Sha[..7],
Author: c.Author.Name,
Date: c.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
Subject: c.MessageShort,
Files: files
);
}
}
@@ -0,0 +1,79 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
namespace ControlPlane.Api.Endpoints;
public static class GiteaEndpoints
{
public static IEndpointRouteBuilder MapGiteaEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/gitea").WithTags("Gitea");
g.MapGet ("/repo", GetRepo);
g.MapGet ("/branches", ListBranches);
g.MapPost("/branches", CreateBranch);
g.MapGet ("/pulls", ListPulls);
g.MapGet ("/pulls/{number:long}", GetPull);
g.MapPost("/pulls", CreatePull);
g.MapGet ("/tags", ListTags);
g.MapPost("/tags", CreateTag);
g.MapGet ("/webhooks", ListWebhooks);
g.MapPost("/webhooks", RegisterWebhook);
return app;
}
private static async Task<IResult> GetRepo(GiteaService svc, CancellationToken ct)
{
var repo = await svc.GetRepoAsync(ct);
return repo is null ? Results.StatusCode(503) : Results.Ok(repo);
}
private static async Task<IResult> ListBranches(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListBranchesAsync(ct));
private static async Task<IResult> CreateBranch(
CreateBranchRequest req, GiteaService svc, CancellationToken ct)
{
var branch = await svc.CreateBranchAsync(req, ct);
return branch is null ? Results.BadRequest("Failed to create branch in Gitea.") : Results.Ok(branch);
}
private static async Task<IResult> ListPulls(
GiteaService svc, string state = "open", CancellationToken ct = default) =>
Results.Ok(await svc.ListPullRequestsAsync(state, ct));
private static async Task<IResult> GetPull(
long number, GiteaService svc, CancellationToken ct)
{
var pr = await svc.GetPullRequestAsync(number, ct);
return pr is null ? Results.NotFound() : Results.Ok(pr);
}
private static async Task<IResult> CreatePull(
CreatePullRequestRequest req, GiteaService svc, CancellationToken ct)
{
var pr = await svc.CreatePullRequestAsync(req, ct);
return pr is null ? Results.BadRequest("Failed to create PR in Gitea.") : Results.Ok(pr);
}
private static async Task<IResult> ListTags(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListTagsAsync(ct));
private static async Task<IResult> CreateTag(
CreateTagRequest req, GiteaService svc, CancellationToken ct)
{
var tag = await svc.CreateTagAsync(req, ct);
return tag is null ? Results.BadRequest("Failed to create tag in Gitea.") : Results.Ok(tag);
}
private static async Task<IResult> ListWebhooks(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListWebhooksAsync(ct));
private static async Task<IResult> RegisterWebhook(
CreateWebhookRequest req, GiteaService svc, CancellationToken ct)
{
var hook = await svc.RegisterWebhookAsync(req, ct);
return hook is null ? Results.BadRequest("Failed to register webhook in Gitea.") : Results.Ok(hook);
}
}
@@ -0,0 +1,75 @@
using ControlPlane.Api.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ImageBuildEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapImageBuildEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/image").WithTags("Image");
group.MapGet("/status", GetStatus);
group.MapPost("/build", TriggerBuild);
return app;
}
/// <summary>Returns the last known build status without triggering a new build.</summary>
private static async Task<IResult> GetStatus(ImageBuildService svc) =>
Results.Ok(await svc.GetStatusAsync());
/// <summary>
/// Triggers a docker build and streams the output line-by-line as SSE.
/// The build context is the repo root, which must be configured via
/// Docker:RepoRoot in appsettings / environment.
/// </summary>
private static async Task TriggerBuild(
HttpContext ctx,
ImageBuildService svc,
IConfiguration config,
CancellationToken ct)
{
var repoRoot = config["Docker:RepoRoot"];
if (string.IsNullOrWhiteSpace(repoRoot) || !Directory.Exists(repoRoot))
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(new
{
error = "Docker:RepoRoot is not configured or does not exist.",
hint = "Add Docker__RepoRoot to the worker environment pointing at the repo root directory.",
}, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
// Use a Channel so the Progress<T> callback (sync) can safely hand lines
// to the async SSE writer without blocking the Docker build thread.
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = true, SingleReader = true });
void OnLine(string line) => channel.Writer.TryWrite(line);
// Run the build on a background thread so we can drain the channel here
var buildTask = Task.Run(() => svc.BuildAsync(repoRoot, OnLine, ct), ct)
.ContinueWith(_ => channel.Writer.TryComplete(), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
var json = JsonSerializer.Serialize(new { line }, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
await buildTask; // ensure build is fully done
// Signal stream end
await ctx.Response.WriteAsync("data: {\"done\":true}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
}
@@ -0,0 +1,232 @@
using System.Diagnostics;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace ControlPlane.Api.Endpoints;
public static class InfraEndpoints
{
public static IEndpointRouteBuilder MapInfraEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/infra").WithTags("Infrastructure");
g.MapGet ("/status", GetStatus);
g.MapPost("/{container}/start", (string container) => ServiceAction(container, "start"));
g.MapPost("/{container}/stop", (string container) => ServiceAction(container, "stop"));
g.MapPost("/{container}/restart",(string container) => ServiceAction(container, "restart"));
g.MapGet ("/compose/up/stream", ComposeUpStream);
g.MapGet ("/compose/down/stream", ComposeDownStream);
return app;
}
// ── Known platform services ───────────────────────────────────────────────
private static readonly string[] PlatformContainers =
[
"clarity-postgres",
"clarity-keycloak",
"clarity-vault",
"clarity-minio",
"clarity-gitea",
"clarity-nginx",
"clarity-dnsmasq",
];
// ── Handlers ─────────────────────────────────────────────────────────────
private static async Task<IResult> GetStatus()
{
var services = new List<InfraService>();
foreach (var container in PlatformContainers)
{
var (code, output) = await DockerAsync(
$"inspect --format={{{{json .}}}} {container}");
if (code != 0 || string.IsNullOrWhiteSpace(output))
{
services.Add(new InfraService(container, container, "stopped", [], null));
continue;
}
try
{
using var doc = JsonDocument.Parse(output.Trim());
var root = doc.RootElement;
var state = root.GetProperty("State").GetProperty("Status").GetString() ?? "unknown";
var health = root.GetProperty("State").TryGetProperty("Health", out var h)
? h.GetProperty("Status").GetString()
: null;
var status = (state, health) switch
{
("running", "unhealthy") => "unhealthy",
("running", _) => "running",
("exited", _) => "stopped",
_ => state
};
// Ports
var ports = new List<string>();
if (root.TryGetProperty("NetworkSettings", out var ns) &&
ns.TryGetProperty("Ports", out var portsEl))
{
foreach (var port in portsEl.EnumerateObject())
{
if (port.Value.ValueKind != JsonValueKind.Null)
ports.Add(port.Name.Split('/')[0]);
}
}
// Uptime
string? uptime = null;
if (root.GetProperty("State").TryGetProperty("StartedAt", out var startedAt))
{
if (DateTime.TryParse(startedAt.GetString(), out var started) && state == "running")
{
var elapsed = DateTime.UtcNow - started.ToUniversalTime();
uptime = elapsed.TotalDays >= 1
? $"{(int)elapsed.TotalDays}d {elapsed.Hours}h"
: elapsed.TotalHours >= 1
? $"{(int)elapsed.TotalHours}h {elapsed.Minutes}m"
: $"{elapsed.Minutes}m";
}
}
// Friendly name
var name = root.TryGetProperty("Name", out var n)
? n.GetString()?.TrimStart('/') ?? container
: container;
services.Add(new InfraService(name, container, status, ports, uptime));
}
catch
{
services.Add(new InfraService(container, container, "unknown", [], null));
}
}
return Results.Ok(new InfraStatusResponse(services, DateTimeOffset.UtcNow));
}
private static async Task<IResult> ServiceAction(string container, string action)
{
if (!PlatformContainers.Contains(container))
return Results.BadRequest($"Unknown platform container: {container}");
var (code, output) = await DockerAsync($"{action} {container}");
return code == 0
? Results.Ok()
: Results.Problem(output ?? "Docker command failed", statusCode: 500);
}
private static Task ComposeUpStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "up --pull missing", ct);
private static Task ComposeDownStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "down", ct);
private static async Task StreamComposeOutput(
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct)
{
var infraDir = ResolveInfraPath(config);
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = false, SingleReader = true });
var psi = new ProcessStartInfo("docker",
$"compose -f \"{Path.Combine(infraDir, "docker-compose.yml")}\" {composeArgs}")
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
WorkingDirectory = infraDir,
};
var proc = Process.Start(psi)!;
// Read stdout + stderr concurrently into the channel
var stdoutTask = Task.Run(async () =>
{
while (await proc.StandardOutput.ReadLineAsync(ct) is { } line)
channel.Writer.TryWrite(line);
}, ct);
var stderrTask = Task.Run(async () =>
{
while (await proc.StandardError.ReadLineAsync(ct) is { } line)
channel.Writer.TryWrite(line);
}, ct);
_ = Task.WhenAll(stdoutTask, stderrTask)
.ContinueWith(_ => channel.Writer.TryComplete(), TaskScheduler.Default);
// Stream lines to client as SSE
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
if (line is null) continue;
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
await proc.WaitForExitAsync(ct);
var exitLine = proc.ExitCode == 0 ? "data: ✔ Done." : $"data: ✖ Exited with code {proc.ExitCode}";
await ctx.Response.WriteAsync($"{exitLine}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
proc.Dispose();
}
// ── Helpers ───────────────────────────────────────────────────────────────
private static string ResolveInfraPath(IConfiguration config)
{
var repoRoot = config["Docker:RepoRoot"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "..", "..", ".."));
return Path.GetFullPath(Path.Combine(repoRoot, "infra"));
}
private static Task<(int Code, string? Output)> DockerAsync(string args) =>
RunAsync("docker", args, null);
private static async Task<(int Code, string? Output)> ComposeAsync(string args, string infraDir)=>
await RunAsync("docker", $"compose -f \"{Path.Combine(infraDir, "docker-compose.yml")}\" {args}", infraDir);
private static async Task<(int Code, string? Output)> RunAsync(
string exe, string args, string? workingDir)
{
var psi = new ProcessStartInfo(exe, args)
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
if (workingDir is not null) psi.WorkingDirectory = workingDir;
using var proc = Process.Start(psi);
if (proc is null) return (-1, null);
var output = await proc.StandardOutput.ReadToEndAsync();
var err = await proc.StandardError.ReadToEndAsync();
await proc.WaitForExitAsync();
return (proc.ExitCode, string.IsNullOrWhiteSpace(output) ? err : output);
}
// ── Response models ───────────────────────────────────────────────────────
public record InfraService(
string Name,
string Container,
string Status,
List<string> Ports,
string? Uptime);
public record InfraStatusResponse(
List<InfraService> Services,
DateTimeOffset CheckedAt);
}
+244
View File
@@ -0,0 +1,244 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using LibGit2Sharp;
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class OpcEndpoints
{
private static readonly JsonSerializerOptions JsonOpts =
new(JsonSerializerDefaults.Web) { WriteIndented = false };
public static IEndpointRouteBuilder MapOpcEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/opc").WithTags("OPC");
// ── OPC CRUD ──────────────────────────────────────────────────────────
g.MapGet ("", ListOpcs);
g.MapGet ("/next-number", GetNextNumber);
g.MapPost ("", CreateOpc);
g.MapGet ("/{id:guid}", GetOpc);
g.MapPatch ("/{id:guid}", UpdateOpc);
g.MapDelete("/{id:guid}", DeleteOpc);
// ── Notes ─────────────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/notes", ListNotes);
g.MapPost ("/{id:guid}/notes", AddNote);
// ── Artifacts ─────────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/artifacts", ListArtifacts);
g.MapPost ("/{id:guid}/artifacts", CreateArtifact);
g.MapPatch ("/artifacts/{artifactId:guid}", UpdateArtifact);
g.MapDelete("/artifacts/{artifactId:guid}", DeleteArtifact);
// ── Pinned commits ────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/pinned-commits", ListPinnedCommits);
g.MapPost ("/{id:guid}/pinned-commits", PinCommit);
g.MapDelete("/{id:guid}/pinned-commits/{hash}", UnpinCommit);
// ── AI assist (proxies to OpenRouter, key stays on server) ────────────
g.MapPost ("/ai-assist", AiAssist);
return app;
}
// ── OPC handlers ──────────────────────────────────────────────────────────
private static async Task<IResult> ListOpcs(
OpcService svc,
string? type = null, string? status = null,
CancellationToken ct = default)
{
var list = await svc.ListAsync(type, status, ct);
return Results.Ok(list);
}
private static async Task<IResult> GetNextNumber(
OpcService svc, CancellationToken ct)
{
var number = await svc.NextNumberAsync(ct);
return Results.Ok(new { number });
}
private static async Task<IResult> CreateOpc(
CreateOpcRequest req, OpcService svc, CancellationToken ct)
{
var opc = await svc.CreateAsync(req, ct);
return Results.Created($"/api/opc/{opc.Id}", opc);
}
private static async Task<IResult> GetOpc(
Guid id, OpcService svc, CancellationToken ct)
{
var opc = await svc.GetAsync(id, ct);
return opc is null ? Results.NotFound() : Results.Ok(opc);
}
private static async Task<IResult> UpdateOpc(
Guid id, UpdateOpcRequest req, OpcService svc, CancellationToken ct)
{
var opc = await svc.UpdateAsync(id, req, ct);
return opc is null ? Results.NotFound() : Results.Ok(opc);
}
private static async Task<IResult> DeleteOpc(
Guid id, OpcService svc, CancellationToken ct)
{
return await svc.DeleteAsync(id, ct) ? Results.NoContent() : Results.NotFound();
}
// ── Note handlers ─────────────────────────────────────────────────────────
private static async Task<IResult> ListNotes(
Guid id, OpcService svc, CancellationToken ct)
{
var notes = await svc.ListNotesAsync(id, ct);
return Results.Ok(notes);
}
private static async Task<IResult> AddNote(
Guid id, AddNoteRequest req, OpcService svc, CancellationToken ct)
{
var note = await svc.AddNoteAsync(id, req, ct);
return Results.Created($"/api/opc/{id}/notes/{note.Id}", note);
}
// ── Artifact handlers ─────────────────────────────────────────────────────
private static async Task<IResult> ListArtifacts(
Guid id, OpcService svc,
string? type = null, CancellationToken ct = default)
{
var artifacts = await svc.ListArtifactsAsync(id, type, ct);
return Results.Ok(artifacts);
}
private static async Task<IResult> CreateArtifact(
Guid id, UpsertArtifactRequest req, OpcService svc, CancellationToken ct)
{
var artifact = await svc.UpsertArtifactAsync(id, req, ct);
return Results.Created($"/api/opc/{id}/artifacts/{artifact.Id}", artifact);
}
private static async Task<IResult> UpdateArtifact(
Guid artifactId, UpsertArtifactRequest req, OpcService svc, CancellationToken ct)
{
var artifact = await svc.UpdateArtifactAsync(artifactId, req, ct);
return artifact is null ? Results.NotFound() : Results.Ok(artifact);
}
private static async Task<IResult> DeleteArtifact(
Guid artifactId, OpcService svc, CancellationToken ct)
{
return await svc.DeleteArtifactAsync(artifactId, ct)
? Results.NoContent()
: Results.NotFound();
}
// ── Pinned commit handlers ────────────────────────────────────────────────
private static async Task<IResult> ListPinnedCommits(
Guid id, OpcService svc, CancellationToken ct)
{
var commits = await svc.ListPinnedCommitsAsync(id, ct);
return Results.Ok(commits);
}
private static async Task<IResult> PinCommit(
Guid id, PinCommitRequest req, OpcService svc, IConfiguration config, CancellationToken ct)
{
var repoPath = config["Docker:RepoRoot"];
string fullHash = req.Hash;
string shortHash = req.Hash.Length >= 7 ? req.Hash[..7] : req.Hash;
string subject = string.Empty;
string author = string.Empty;
if (!string.IsNullOrWhiteSpace(repoPath) && Directory.Exists(repoPath))
{
using var repo = new Repository(repoPath);
var commit = repo.Lookup<Commit>(req.Hash);
if (commit is null) return Results.NotFound("Commit not found in repository.");
fullHash = commit.Sha;
shortHash = commit.Sha[..7];
subject = commit.MessageShort;
author = commit.Author.Name;
}
var pinned = await svc.PinCommitAsync(id, fullHash, shortHash, subject, author, req.PinnedBy, ct);
return pinned is null
? Results.NotFound()
: Results.Created($"/api/opc/{id}/pinned-commits/{fullHash}", pinned);
}
private static async Task<IResult> UnpinCommit(
Guid id, string hash, OpcService svc, CancellationToken ct)
{
return await svc.UnpinCommitAsync(id, hash, ct) ? Results.NoContent() : Results.NotFound();
}
// ── AI assist ─────────────────────────────────────────────────────────────
private static async Task<IResult> AiAssist(
AiAssistRequest req,
IConfiguration config,
IHttpClientFactory http,
CancellationToken ct)
{
var apiKey = config["OpenRouter:ApiKey"];
if (string.IsNullOrWhiteSpace(apiKey))
return Results.Problem("OpenRouter API key not configured. Add OpenRouter:ApiKey to appsettings.");
var systemPrompt =
"You are an assistant helping a software engineering team write clear, concise " +
"OPC (Online Project Communication) content — requirements, change descriptions, " +
"QA test paths, and specifications. Be direct, structured, and professional. " +
"Respond with plain text only (no markdown wrapping).";
var messages = new List<object>
{
new { role = "system", content = systemPrompt },
};
if (!string.IsNullOrWhiteSpace(req.Context))
messages.Add(new { role = "user", content = $"Context:\n{req.Context}" });
messages.Add(new { role = "user", content = req.Prompt });
var body = new
{
model = "anthropic/claude-3.5-haiku",
messages,
max_tokens = 1024,
};
var client = http.CreateClient("openrouter");
client.DefaultRequestHeaders.Authorization =
new AuthenticationHeaderValue("Bearer", apiKey);
client.DefaultRequestHeaders.Add("HTTP-Referer", "https://controlplane.clarity.internal");
client.DefaultRequestHeaders.Add("X-Title", "Clarity ControlPlane OPC");
var response = await client.PostAsync(
"https://openrouter.ai/api/v1/chat/completions",
new StringContent(JsonSerializer.Serialize(body), Encoding.UTF8, "application/json"),
ct);
if (!response.IsSuccessStatusCode)
{
var err = await response.Content.ReadAsStringAsync(ct);
return Results.Problem($"OpenRouter error {response.StatusCode}: {err}");
}
var json = await response.Content.ReadAsStringAsync(ct);
using var doc = JsonDocument.Parse(json);
var text = doc.RootElement
.GetProperty("choices")[0]
.GetProperty("message")
.GetProperty("content")
.GetString() ?? string.Empty;
return Results.Ok(new { text });
}
}
@@ -0,0 +1,65 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ProjectBuildEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web)
{
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public static IEndpointRouteBuilder MapProjectBuildEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/builds").WithTags("Builds");
group.MapGet("/projects", GetProjects);
group.MapGet("/history", GetHistory);
group.MapPost("/{projectName}", TriggerProjectBuild);
return app;
}
/// <summary>Returns the list of known projects the build monitor can track.</summary>
private static IResult GetProjects(ProjectBuildService svc) =>
Results.Ok(svc.GetProjects());
private static async Task<IResult> GetHistory(BuildHistoryService history) =>
Results.Ok(await history.GetBuildsAsync());
/// <summary>
/// Triggers a build for a named project and streams SSE output.
/// projectName must match one of the names returned by GET /api/builds/projects.
/// </summary>
private static async Task TriggerProjectBuild(
string projectName,
HttpContext ctx,
ProjectBuildService svc,
CancellationToken ct)
{
if (string.IsNullOrWhiteSpace(svc.RepoRoot))
{
ctx.Response.StatusCode = 503;
await ctx.Response.WriteAsJsonAsync(
new { error = "Docker:RepoRoot is not configured on the server." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
async Task Send(object payload)
{
var json = JsonSerializer.Serialize(payload, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
void OnLine(string line) => Send(new { line }).GetAwaiter().GetResult();
var record = await svc.BuildProjectAsync(projectName, OnLine, ct);
await Send(new { done = true, build = record });
}
}
@@ -0,0 +1,73 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class PromotionEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapPromotionEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/promotions").WithTags("Promotions");
// GET /api/promotions/ladder — branch status for all 4 ladder branches
g.MapGet("/ladder", async (PromotionService svc, CancellationToken ct) =>
Results.Ok(await svc.GetLadderStatusAsync(ct)));
// GET /api/promotions/history
g.MapGet("/history", async (PromotionService svc) =>
Results.Ok(await svc.GetHistoryAsync()));
// POST /api/promotions/promote — body: { from, to, requestedBy, note }
// Streams SSE log lines then sends {done, promotion} when complete
g.MapPost("/promote", async (
HttpContext ctx,
PromotionService svc,
PromoteRequest req,
CancellationToken ct) =>
{
// Validate ladder step
var ladder = PromotionService.Ladder;
var fi = Array.IndexOf(ladder, req.From);
var ti = Array.IndexOf(ladder, req.To);
if (fi < 0 || ti < 0 || ti != fi + 1)
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = $"Invalid promotion step: {req.From} → {req.To}. Must be adjacent in ladder." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = true, SingleReader = true });
void OnLine(string line) => channel.Writer.TryWrite(line);
var promoteTask = Task.Run(() =>
svc.PromoteAsync(req.From, req.To, req.RequestedBy ?? "system", req.Note, OnLine, ct), ct)
.ContinueWith(t => channel.Writer.TryComplete(t.Exception), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
var json = JsonSerializer.Serialize(new { line }, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
var promotion = await promoteTask;
var doneJson = JsonSerializer.Serialize(new { done = true, promotion }, JsonOpts);
await ctx.Response.WriteAsync($"data: {doneJson}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
});
return app;
}
}
public record PromoteRequest(string From, string To, string? RequestedBy, string? Note);
@@ -0,0 +1,106 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Messages;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using MassTransit;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ProvisioningEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapProvisioningEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/provision").WithTags("Provisioning");
group.MapPost("/", QueueProvisioningJob);
group.MapGet("/{id:guid}", GetJobStatus);
group.MapGet("/{id:guid}/stream", StreamJobEvents);
app.MapGet("/api/tenants", GetTenants).WithTags("Tenants");
return app;
}
private static async Task<IResult> QueueProvisioningJob(
ProvisioningRequest request,
Dictionary<Guid, ProvisioningJob> jobs,
IPublishEndpoint bus)
{
var job = new ProvisioningJob
{
ClientName = request.ClientName,
StateCode = request.StateCode.ToUpperInvariant(),
Subdomain = request.Subdomain,
AdminEmail = request.AdminEmail,
SiteCode = request.SiteCode,
Environment = request.Environment,
Tier = request.Tier,
Status = ProvisioningStatus.Pending
};
jobs[job.Id] = job;
await bus.Publish(new ProvisionClientCommand
{
JobId = job.Id,
ClientName = job.ClientName,
StateCode = job.StateCode,
Subdomain = job.Subdomain,
AdminEmail = job.AdminEmail,
SiteCode = job.SiteCode,
Environment = job.Environment,
Tier = job.Tier
});
return Results.Accepted($"/api/provision/{job.Id}", new { job.Id, job.Status });
}
private static IResult GetJobStatus(Guid id, Dictionary<Guid, ProvisioningJob> jobs) =>
jobs.TryGetValue(id, out var job) ? Results.Ok(job) : Results.NotFound();
private static IResult GetTenants(TenantRegistryService registry) =>
Results.Ok(registry.GetAll());
private static async Task StreamJobEvents(
Guid id,
SseEventBus bus,
Dictionary<Guid, ProvisioningJob> jobs,
HttpContext ctx,
CancellationToken cancellationToken)
{
if (!jobs.ContainsKey(id))
{
ctx.Response.StatusCode = 404;
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = bus.Subscribe(id);
try
{
await foreach (var evt in channel.Reader.ReadAllAsync(cancellationToken))
{
var json = JsonSerializer.Serialize(evt, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", cancellationToken);
await ctx.Response.Body.FlushAsync(cancellationToken);
if (evt.Type is "job_complete" or "job_failed") break;
}
}
catch (OperationCanceledException)
{
// Client disconnected (e.g. browser refresh) — not an error.
}
finally
{
bus.Unsubscribe(id, channel);
}
}
}
@@ -0,0 +1,62 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ReleaseEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web)
{
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public static IEndpointRouteBuilder MapReleaseEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/release").WithTags("Release");
group.MapGet("/history", GetHistory);
group.MapPost("/{env}", TriggerRelease);
return app;
}
private static async Task<IResult> GetHistory(BuildHistoryService history) =>
Results.Ok(await history.GetReleasesAsync());
/// <summary>
/// Triggers a rolling redeploy of all managed containers in the target env.
/// Streams SSE lines until release is complete.
/// Valid env values: fdev | uat | prod | all
/// </summary>
private static async Task TriggerRelease(
string env,
HttpContext ctx,
ReleaseService releases,
CancellationToken ct)
{
var valid = new[] { "fdev", "uat", "prod", "all" };
if (!valid.Contains(env, StringComparer.OrdinalIgnoreCase))
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = $"Invalid environment '{env}'. Valid: fdev, uat, prod, all." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
async Task Send(object payload)
{
var json = JsonSerializer.Serialize(payload, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
void OnLine(string line) => Send(new { line }).GetAwaiter().GetResult();
var record = await releases.ReleaseAsync(env, OnLine, ct);
await Send(new { done = true, release = record });
}
}
@@ -0,0 +1,108 @@
using Docker.DotNet;
using Docker.DotNet.Models;
using ControlPlane.Core.Services;
namespace ControlPlane.Api.Endpoints;
public static class TenantLogEndpoints
{
public static IEndpointRouteBuilder MapTenantLogEndpoints(this IEndpointRouteBuilder app)
{
app.MapGet("/api/tenants/{subdomain}/logs", StreamTenantLogs).WithTags("Tenants");
return app;
}
private static async Task StreamTenantLogs(
string subdomain,
IConfiguration config,
TenantRegistryService registry,
HttpContext ctx,
CancellationToken cancellationToken)
{
var tenant = registry.GetAll().FirstOrDefault(t => t.Subdomain == subdomain);
if (tenant is null)
{
ctx.Response.StatusCode = 404;
return;
}
var containerName = tenant.ContainerName;
if (string.IsNullOrWhiteSpace(containerName))
{
ctx.Response.StatusCode = 404;
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
var logParams = new ContainerLogsParameters
{
ShowStdout = true,
ShowStderr = true,
Follow = true,
Tail = "200",
Timestamps = true,
};
try
{
using var stream = await docker.Containers.GetContainerLogsAsync(
containerName, tty: false, logParams, cancellationToken);
// MultiplexedStream exposes CopyOutputToAsync which separates stdout/stderr
var stdoutBuf = new System.IO.MemoryStream();
var stderrBuf = new System.IO.MemoryStream();
// Stream with Follow=true won't complete until cancelled — use a pipe instead
var stdoutPipe = new System.IO.Pipelines.Pipe();
var stderrPipe = new System.IO.Pipelines.Pipe();
_ = Task.Run(async () =>
{
try
{
await stream.CopyOutputToAsync(
System.IO.Stream.Null,
stdoutPipe.Writer.AsStream(),
stderrPipe.Writer.AsStream(),
cancellationToken);
}
finally
{
stdoutPipe.Writer.Complete();
stderrPipe.Writer.Complete();
}
}, cancellationToken);
// Merge both pipes into SSE — read stdout line by line
var stdoutReader = new System.IO.StreamReader(stdoutPipe.Reader.AsStream());
var stderrReader = new System.IO.StreamReader(stderrPipe.Reader.AsStream());
var stdoutTask = ReadLinesAsync(stdoutReader, ctx, cancellationToken);
var stderrTask = ReadLinesAsync(stderrReader, ctx, cancellationToken);
await Task.WhenAll(stdoutTask, stderrTask);
}
catch (OperationCanceledException) { /* client disconnected — normal */ }
}
private static async Task ReadLinesAsync(
System.IO.StreamReader reader,
HttpContext ctx,
CancellationToken ct)
{
while (!ct.IsCancellationRequested)
{
var line = await reader.ReadLineAsync(ct);
if (line is null) break;
if (string.IsNullOrWhiteSpace(line)) continue;
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
}
}
+131
View File
@@ -0,0 +1,131 @@
using ControlPlane.Api.Consumers;
using ControlPlane.Api.Endpoints;
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using MassTransit;
using Npgsql;
var builder = WebApplication.CreateBuilder(args);
builder.AddServiceDefaults();
builder.Services.AddOpenApi();
builder.Services.ConfigureHttpJsonOptions(o =>
o.SerializerOptions.Converters.Add(new System.Text.Json.Serialization.JsonStringEnumConverter()));
// In-memory job store - swap for EF Core post-MVP
builder.Services.AddSingleton<Dictionary<Guid, ProvisioningJob>>();
// Tenant registry - reads ClientAssets/{subdomain}.xml files
builder.Services.AddSingleton<TenantRegistryService>();
// SSE event bus - ProgressConsumer writes here, SSE endpoint reads
builder.Services.AddSingleton<SseEventBus>();
// Build + release pipeline services
builder.Services.AddSingleton<BuildHistoryService>();
builder.Services.AddSingleton<ImageBuildService>();
builder.Services.AddSingleton<ReleaseService>();
builder.Services.AddSingleton<ProjectBuildService>();
builder.Services.AddSingleton<PromotionService>();
// OPC persistence (raw Npgsql)
var opcConnStr = builder.Configuration.GetConnectionString("opcdb");
if (!string.IsNullOrWhiteSpace(opcConnStr))
builder.Services.AddSingleton(NpgsqlDataSource.Create(opcConnStr));
else
builder.Services.AddSingleton(NpgsqlDataSource.Create("Host=localhost;Database=opcdb;Username=postgres;Password=controlplane-dev"));
builder.Services.AddScoped<OpcService>();
// Named HttpClient for OpenRouter AI assist proxy
builder.Services.AddHttpClient("openrouter");
// Gitea integration
builder.Services.AddHttpClient("gitea").ConfigurePrimaryHttpMessageHandler(() =>
new HttpClientHandler { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator });
builder.Services.AddScoped<GiteaService>();
builder.Services.AddMassTransit(x =>
{
x.SetKebabCaseEndpointNameFormatter();
// Receives ProvisioningProgressEvent from Worker and pushes to SSE
x.AddConsumer<ProvisioningProgressConsumer>();
x.UsingRabbitMq((ctx, cfg) =>
{
var connStr = builder.Configuration.GetConnectionString("rabbitmq");
if (!string.IsNullOrWhiteSpace(connStr))
cfg.Host(new Uri(connStr));
cfg.ConfigureEndpoints(ctx);
});
});
var app = builder.Build();
app.MapDefaultEndpoints();
if (app.Environment.IsDevelopment())
app.MapOpenApi();
app.MapProvisioningEndpoints();
app.MapTenantLogEndpoints();
app.MapImageBuildEndpoints();
app.MapReleaseEndpoints();
app.MapProjectBuildEndpoints();
app.MapGitEndpoints();
app.MapPromotionEndpoints();
app.MapOpcEndpoints();
app.MapGiteaEndpoints();
app.MapInfraEndpoints();
// Ensure OPC tables exist (idempotent — IF NOT EXISTS)
var ds = app.Services.GetRequiredService<NpgsqlDataSource>();
await using (var cmd = ds.CreateCommand("""
CREATE TABLE IF NOT EXISTS opc (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
number VARCHAR(20) NOT NULL UNIQUE,
title VARCHAR(500) NOT NULL,
description TEXT NOT NULL DEFAULT '',
type VARCHAR(50) NOT NULL DEFAULT 'General',
status VARCHAR(50) NOT NULL DEFAULT 'New',
priority VARCHAR(20) NOT NULL DEFAULT 'Medium',
assignee VARCHAR(200) NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_note (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
author VARCHAR(200) NOT NULL,
content TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_artifact (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
artifact_type VARCHAR(50) NOT NULL,
title VARCHAR(500) NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_pinned_commit (
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
hash VARCHAR(40) NOT NULL,
short_hash VARCHAR(10) NOT NULL DEFAULT '',
subject VARCHAR(1000) NOT NULL DEFAULT '',
author VARCHAR(200) NOT NULL DEFAULT '',
pinned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
pinned_by VARCHAR(200) NOT NULL DEFAULT '',
PRIMARY KEY (opc_id, hash)
);
CREATE INDEX IF NOT EXISTS ix_opc_number ON opc(number);
CREATE INDEX IF NOT EXISTS ix_opc_note_opc_id ON opc_note(opc_id);
CREATE INDEX IF NOT EXISTS ix_opc_artifact_opc_id ON opc_artifact(opc_id);
CREATE INDEX IF NOT EXISTS ix_opc_artifact_type ON opc_artifact(opc_id, artifact_type);
CREATE INDEX IF NOT EXISTS ix_opc_pinned_commit_opc_id ON opc_pinned_commit(opc_id);
"""))
await cmd.ExecuteNonQueryAsync();
app.Run();
@@ -0,0 +1,12 @@
{
"profiles": {
"ControlPlane.Api": {
"commandName": "Project",
"launchBrowser": true,
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
},
"applicationUrl": "https://localhost:7280;http://localhost:5280"
}
}
}
+216
View File
@@ -0,0 +1,216 @@
using System.Net.Http.Headers;
using System.Net.Http.Json;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using ControlPlane.Core.Models;
namespace ControlPlane.Api.Services;
/// <summary>
/// Thin wrapper around the Gitea REST API v1.
/// Configured via Gitea__BaseUrl, Gitea__Owner, and Gitea__Token in appsettings.
/// </summary>
public class GiteaService
{
private readonly HttpClient _http;
private readonly string _owner;
private readonly string _repo;
private readonly ILogger<GiteaService> _log;
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public GiteaService(IHttpClientFactory factory, IConfiguration cfg, ILogger<GiteaService> log)
{
_log = log;
_owner = cfg["Gitea:Owner"] ?? "Clarity";
_repo = cfg["Gitea:Repo"] ?? "Clarity";
var baseUrl = cfg["Gitea:BaseUrl"] ?? "https://opc.clarity.test";
var token = cfg["Gitea:Token"] ?? string.Empty;
_http = factory.CreateClient("gitea");
_http.BaseAddress = new Uri(baseUrl.TrimEnd('/') + "/api/v1/");
_http.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
if (!string.IsNullOrWhiteSpace(token))
_http.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("token", token);
}
// ── Repos ─────────────────────────────────────────────────────────────────
public async Task<GiteaRepo?> GetRepoAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<GiteaRepo>($"repos/{_owner}/{_repo}", JsonOpts, ct);
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea GetRepo failed"); return null; }
}
// ── Branches ──────────────────────────────────────────────────────────────
public async Task<List<GiteaBranch>> ListBranchesAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaBranch>>(
$"repos/{_owner}/{_repo}/branches?limit=50", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListBranches failed"); return []; }
}
public async Task<GiteaBranch?> CreateBranchAsync(CreateBranchRequest req, CancellationToken ct = default)
{
// Slugify: "OPC # 0032" + title → "feature/OPC-0032-git-workflow-integration"
var slug = SlugifyTitle(req.OpcTitle);
var num = req.OpcNumber.Replace("OPC # ", "OPC-").Replace(" ", "");
var branchName = $"feature/{num}-{slug}";
var body = JsonSerializer.Serialize(new
{
new_branch_name = branchName,
old_branch_name = req.From,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/branches",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreateBranch failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaBranch>(JsonOpts, ct);
}
// ── Pull Requests ─────────────────────────────────────────────────────────
public async Task<List<GiteaPullRequest>> ListPullRequestsAsync(
string state = "open", CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaPullRequest>>(
$"repos/{_owner}/{_repo}/pulls?state={state}&limit=50", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListPRs failed"); return []; }
}
public async Task<GiteaPullRequest?> GetPullRequestAsync(long number, CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<GiteaPullRequest>(
$"repos/{_owner}/{_repo}/pulls/{number}", JsonOpts, ct);
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea GetPR failed"); return null; }
}
public async Task<GiteaPullRequest?> CreatePullRequestAsync(
CreatePullRequestRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
title = req.Title,
head = req.Head,
@base = req.Base,
body = req.Body,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/pulls",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreatePR failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaPullRequest>(JsonOpts, ct);
}
// ── Tags ──────────────────────────────────────────────────────────────────
public async Task<List<GiteaTag>> ListTagsAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaTag>>(
$"repos/{_owner}/{_repo}/tags?limit=20", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListTags failed"); return []; }
}
public async Task<GiteaTag?> CreateTagAsync(CreateTagRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
tag_name = req.TagName,
message = req.Message,
target = req.CommitSha,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/tags",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreateTag failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaTag>(JsonOpts, ct);
}
// ── Webhooks ──────────────────────────────────────────────────────────────
public async Task<List<GiteaWebhook>> ListWebhooksAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaWebhook>>(
$"repos/{_owner}/{_repo}/hooks", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListWebhooks failed"); return []; }
}
public async Task<GiteaWebhook?> RegisterWebhookAsync(
CreateWebhookRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
type = "gitea",
active = true,
config = new { url = req.TargetUrl, content_type = "json" },
events = req.Events,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/hooks",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea RegisterWebhook failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaWebhook>(JsonOpts, ct);
}
// ── Helpers ───────────────────────────────────────────────────────────────
private static string SlugifyTitle(string title) =>
System.Text.RegularExpressions.Regex
.Replace(title.ToLowerInvariant(), @"[^a-z0-9]+", "-")
.Trim('-')[..Math.Min(40, title.Length)];
}
@@ -0,0 +1,144 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Docker.DotNet;
using Docker.DotNet.Models;
namespace ControlPlane.Api.Services;
/// <summary>
/// Drives `docker build` for the clarity-server image via the Docker SDK.
/// Streams each build log line to the provided callback so the API endpoint
/// can forward it as SSE to the control plane UI in real time.
/// Persists build history via BuildHistoryService.
/// </summary>
public class ImageBuildService(
IConfiguration config,
BuildHistoryService history,
ILogger<ImageBuildService> logger)
{
private static readonly SemaphoreSlim _lock = new(1, 1);
public bool IsBuilding => _lock.CurrentCount == 0;
public string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest";
public async Task<ImageBuildStatus> GetStatusAsync()
{
var builds = await history.GetBuildsAsync();
var last = builds.FirstOrDefault(b => b.Kind == BuildKind.DockerImage);
return new ImageBuildStatus(
last?.Target,
last?.FinishedAt,
last?.Status.ToString() ?? "Never built",
IsBuilding);
}
/// <summary>
/// Runs docker build and streams each log line to <paramref name="onLine"/>.
/// Returns true on success, false if the build failed or was already running.
/// </summary>
public async Task<bool> BuildAsync(
string repoRoot,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
onLine("⚠️ A build is already in progress.");
return false;
}
var record = await history.CreateBuildAsync(BuildKind.DockerImage, ImageName);
try
{
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
var (repo, tag) = SplitImageTag(ImageName);
var dockerfilePath = "Clarity.Server/Dockerfile";
void Log(string line) { onLine(line); record.Log.Add(line); }
Log($"▶ Building {ImageName} from {repoRoot}");
Log($" Dockerfile: {dockerfilePath}");
Log("──────────────────────────────────────");
var buildParams = new ImageBuildParameters
{
Dockerfile = dockerfilePath,
Tags = [$"{repo}:{tag}"],
Remove = true,
ForceRemove = true,
};
bool success = true;
string? errorDetail = null;
await docker.Images.BuildImageFromDockerfileAsync(
buildParams,
await CreateTarballAsync(repoRoot, ct),
authConfigs: null,
headers: null,
new Progress<JSONMessage>(msg =>
{
if (!string.IsNullOrWhiteSpace(msg.Stream))
Log(msg.Stream.TrimEnd('\n'));
if (msg.Error is not null)
{
success = false;
errorDetail = msg.Error.Message;
Log($"✖ {msg.Error.Message}");
}
}),
ct);
Log("──────────────────────────────────────");
if (success) Log($"✔ {ImageName} built successfully at {DateTimeOffset.UtcNow:u}");
else Log($"✖ Build failed: {errorDetail}");
await history.CompleteBuildAsync(record, success ? BuildStatus.Succeeded : BuildStatus.Failed);
logger.LogInformation("Image build {Result} for {Image}", success ? "succeeded" : "failed", ImageName);
return success;
}
catch (Exception ex)
{
record.Log.Add($"Exception: {ex.Message}");
await history.CompleteBuildAsync(record, BuildStatus.Failed);
onLine($"✖ Exception during build: {ex.Message}");
logger.LogError(ex, "Image build threw an exception.");
return false;
}
finally
{
_lock.Release();
}
}
/// <summary>
/// Packs the entire repo root into a tar stream for the Docker build context.
/// Respects .dockerignore if present.
/// </summary>
private static async Task<Stream> CreateTarballAsync(string repoRoot, CancellationToken ct)
{
// Use docker's own CLI to create the tarball via stdin — avoids reimplementing
// .dockerignore parsing. Fall back to a pure managed tar if CLI isn't available.
// For simplicity we use a managed approach: stream the directory as a tar.
var ms = new MemoryStream();
await Task.Run(() => TarHelper.Pack(repoRoot, ms), ct);
ms.Position = 0;
return ms;
}
private static (string repo, string tag) SplitImageTag(string image)
{
var colon = image.LastIndexOf(':');
return colon < 0 ? (image, "latest") : (image[..colon], image[(colon + 1)..]);
}
}
public record ImageBuildStatus(
string? ImageName,
DateTimeOffset? BuiltAt,
string LastMessage,
bool IsBuilding);
+297
View File
@@ -0,0 +1,297 @@
using ControlPlane.Core.Models;
using Npgsql;
namespace ControlPlane.Api.Services;
public class OpcService(NpgsqlDataSource db)
{
// ── Helpers ──────────────────────────────────────────────────────────────
private static OpcRecord ReadOpc(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetString(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetString(5),
r.GetString(6),
r.GetString(7),
r.GetDateTime(8),
r.GetDateTime(9)
);
private static OpcNote ReadNote(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetGuid(1),
r.GetString(2),
r.GetString(3),
r.GetDateTime(4)
);
private static OpcArtifact ReadArtifact(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetGuid(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetDateTime(5),
r.GetDateTime(6)
);
// ── Next OPC number ───────────────────────────────────────────────────────
public async Task<string> NextNumberAsync(CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT number FROM opc ORDER BY CAST(TRIM(SUBSTRING(number FROM 7)) AS INTEGER) DESC LIMIT 1");
var last = await cmd.ExecuteScalarAsync(ct) as string;
if (last is null) return "OPC # 0001";
if (int.TryParse(last[6..], out var n))
return $"OPC # {n + 1:D4}";
return "OPC # 0001";
}
// ── OPC CRUD ──────────────────────────────────────────────────────────────
public async Task<List<OpcRecord>> ListAsync(
string? typeFilter = null, string? statusFilter = null,
CancellationToken ct = default)
{
var sql = """
SELECT id, number, title, description, type, status, priority, assignee,
created_at, updated_at
FROM opc
WHERE ($1::text IS NULL OR type = $1)
AND ($2::text IS NULL OR status = $2)
ORDER BY created_at DESC
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(typeFilter ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(statusFilter ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcRecord>();
while (await r.ReadAsync(ct)) list.Add(ReadOpc(r));
return list;
}
public async Task<OpcRecord?> GetAsync(Guid id, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT id, number, title, description, type, status, priority, assignee, created_at, updated_at FROM opc WHERE id = $1");
cmd.Parameters.AddWithValue(id);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadOpc(r) : null;
}
public async Task<OpcRecord> CreateAsync(CreateOpcRequest req, CancellationToken ct = default)
{
var number = await NextNumberAsync(ct);
var sql = """
INSERT INTO opc (number, title, description, type, status, priority, assignee)
VALUES ($1, $2, $3, $4, 'New', $5, $6)
RETURNING id, number, title, description, type, status, priority, assignee,
created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(number);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Description);
cmd.Parameters.AddWithValue(req.Type);
cmd.Parameters.AddWithValue(req.Priority);
cmd.Parameters.AddWithValue(req.Assignee);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadOpc(r);
}
public async Task<OpcRecord?> UpdateAsync(Guid id, UpdateOpcRequest req, CancellationToken ct = default)
{
var sql = """
UPDATE opc SET
title = COALESCE($2, title),
description = COALESCE($3, description),
type = COALESCE($4, type),
status = COALESCE($5, status),
priority = COALESCE($6, priority),
assignee = COALESCE($7, assignee),
updated_at = NOW()
WHERE id = $1
RETURNING id, number, title, description, type, status, priority, assignee,
created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(id);
cmd.Parameters.AddWithValue(req.Title ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Description ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Type ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Status ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Priority ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Assignee ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadOpc(r) : null;
}
public async Task<bool> DeleteAsync(Guid id, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand("DELETE FROM opc WHERE id = $1");
cmd.Parameters.AddWithValue(id);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
// ── Notes ──────────────────────────────────────────────────────────────────
public async Task<List<OpcNote>> ListNotesAsync(Guid opcId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT id, opc_id, author, content, created_at FROM opc_note WHERE opc_id = $1 ORDER BY created_at ASC");
cmd.Parameters.AddWithValue(opcId);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcNote>();
while (await r.ReadAsync(ct)) list.Add(ReadNote(r));
return list;
}
public async Task<OpcNote> AddNoteAsync(Guid opcId, AddNoteRequest req, CancellationToken ct = default)
{
var sql = """
INSERT INTO opc_note (opc_id, author, content)
VALUES ($1, $2, $3)
RETURNING id, opc_id, author, content, created_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(req.Author);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadNote(r);
}
// ── Artifacts ─────────────────────────────────────────────────────────────
public async Task<List<OpcArtifact>> ListArtifactsAsync(Guid opcId, string? artifactType = null, CancellationToken ct = default)
{
var sql = """
SELECT id, opc_id, artifact_type, title, content, created_at, updated_at
FROM opc_artifact
WHERE opc_id = $1
AND ($2::text IS NULL OR artifact_type = $2)
ORDER BY created_at ASC
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(artifactType ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcArtifact>();
while (await r.ReadAsync(ct)) list.Add(ReadArtifact(r));
return list;
}
public async Task<OpcArtifact> UpsertArtifactAsync(Guid opcId, UpsertArtifactRequest req, CancellationToken ct = default)
{
var sql = """
INSERT INTO opc_artifact (opc_id, artifact_type, title, content)
VALUES ($1, $2, $3, $4)
ON CONFLICT DO NOTHING
RETURNING id, opc_id, artifact_type, title, content, created_at, updated_at
""";
// Simple insert; for updates use artifact id endpoint
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(req.ArtifactType);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadArtifact(r);
}
public async Task<OpcArtifact?> UpdateArtifactAsync(Guid artifactId, UpsertArtifactRequest req, CancellationToken ct = default)
{
var sql = """
UPDATE opc_artifact SET
title = $2,
content = $3,
updated_at = NOW()
WHERE id = $1
RETURNING id, opc_id, artifact_type, title, content, created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(artifactId);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadArtifact(r) : null;
}
public async Task<bool> DeleteArtifactAsync(Guid artifactId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand("DELETE FROM opc_artifact WHERE id = $1");
cmd.Parameters.AddWithValue(artifactId);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
// ── Pinned commits ────────────────────────────────────────────────────────
private static OpcPinnedCommit ReadPinnedCommit(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetString(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetDateTime(5),
r.GetString(6)
);
public async Task<List<OpcPinnedCommit>> ListPinnedCommitsAsync(Guid opcId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT opc_id, hash, short_hash, subject, author, pinned_at, pinned_by FROM opc_pinned_commit WHERE opc_id = $1 ORDER BY pinned_at DESC");
cmd.Parameters.AddWithValue(opcId);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcPinnedCommit>();
while (await r.ReadAsync(ct)) list.Add(ReadPinnedCommit(r));
return list;
}
public async Task<OpcPinnedCommit?> PinCommitAsync(
Guid opcId, string hash, string shortHash, string subject, string author, string pinnedBy,
CancellationToken ct = default)
{
// Verify the OPC exists
await using var existsCmd = db.CreateCommand("SELECT 1 FROM opc WHERE id = $1");
existsCmd.Parameters.AddWithValue(opcId);
var exists = await existsCmd.ExecuteScalarAsync(ct);
if (exists is null) return null;
var sql = """
INSERT INTO opc_pinned_commit (opc_id, hash, short_hash, subject, author, pinned_by)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (opc_id, hash) DO UPDATE SET
short_hash = EXCLUDED.short_hash,
subject = EXCLUDED.subject,
author = EXCLUDED.author,
pinned_by = EXCLUDED.pinned_by,
pinned_at = NOW()
RETURNING opc_id, hash, short_hash, subject, author, pinned_at, pinned_by
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(hash);
cmd.Parameters.AddWithValue(shortHash);
cmd.Parameters.AddWithValue(subject);
cmd.Parameters.AddWithValue(author);
cmd.Parameters.AddWithValue(pinnedBy);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadPinnedCommit(r) : null;
}
public async Task<bool> UnpinCommitAsync(Guid opcId, string hash, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"DELETE FROM opc_pinned_commit WHERE opc_id = $1 AND hash = $2");
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(hash);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
}
@@ -0,0 +1,127 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
namespace ControlPlane.Api.Services;
/// <summary>
/// Runs dotnet build or npm run build for individual projects in the repo.
/// Used by the Build Monitor tab in the control plane UI.
/// </summary>
public class ProjectBuildService(
IConfiguration config,
BuildHistoryService history,
ILogger<ProjectBuildService> logger)
{
public string RepoRoot => config["Docker:RepoRoot"] ?? string.Empty;
/// <summary>Known projects in the solution, returned to the UI for the build monitor grid.</summary>
public IReadOnlyList<ProjectDefinition> GetProjects()
{
if (string.IsNullOrWhiteSpace(RepoRoot)) return [];
return
[
new("Clarity.Server", BuildKind.DotnetProject, "Clarity.Server/Clarity.Server.csproj"),
new("Clarity.ServiceDefaults", BuildKind.DotnetProject, "Clarity.ServiceDefaults/Clarity.ServiceDefaults.csproj"),
new("frontend (Clarity.Server)", BuildKind.NpmProject, "frontend"),
];
}
/// <summary>
/// Builds a single project and streams output to <paramref name="onLine"/>.
/// </summary>
public async Task<BuildRecord> BuildProjectAsync(
string projectName,
Action<string> onLine,
CancellationToken ct)
{
var projects = GetProjects();
var def = projects.FirstOrDefault(p => p.Name == projectName);
if (def is null)
{
var err = new BuildRecord { Kind = BuildKind.DotnetProject, Target = projectName, Status = BuildStatus.Failed };
err.Log.Add($"Unknown project: {projectName}");
return err;
}
var record = await history.CreateBuildAsync(def.Kind, def.RelativePath);
record.Log.Add($"▶ Building {def.Name} [{def.Kind}]");
record.Log.Add($" Path: {def.RelativePath}");
record.Log.Add("──────────────────────────────────────");
onLine($"▶ Building {def.Name}");
try
{
var (exe, args, workDir) = def.Kind == BuildKind.NpmProject
? BuildNpmCommand(def.RelativePath)
: BuildDotnetCommand(def.RelativePath);
var psi = new ProcessStartInfo(exe, args)
{
WorkingDirectory = workDir,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
using var proc = new Process { StartInfo = psi, EnableRaisingEvents = true };
void HandleLine(string? line)
{
if (line is null) return;
record.Log.Add(line);
onLine(line);
// Non-blocking fire-and-forget flush
_ = history.AppendBuildLogAsync(record, line);
}
proc.OutputDataReceived += (_, e) => HandleLine(e.Data);
proc.ErrorDataReceived += (_, e) => HandleLine(e.Data);
proc.Start();
proc.BeginOutputReadLine();
proc.BeginErrorReadLine();
await proc.WaitForExitAsync(ct);
var status = proc.ExitCode == 0 ? BuildStatus.Succeeded : BuildStatus.Failed;
var summary = proc.ExitCode == 0 ? "✔ Build succeeded." : $"✖ Build failed (exit {proc.ExitCode}).";
onLine("──────────────────────────────────────");
onLine(summary);
record.Log.Add(summary);
await history.CompleteBuildAsync(record, status);
logger.LogInformation("Project build [{Name}] {Status}", def.Name, status);
return record;
}
catch (Exception ex)
{
onLine($"✖ Exception: {ex.Message}");
record.Log.Add($"Exception: {ex.Message}");
await history.CompleteBuildAsync(record, BuildStatus.Failed);
logger.LogError(ex, "Project build [{Name}] threw.", def.Name);
return record;
}
}
private (string exe, string args, string workDir) BuildDotnetCommand(string relativePath)
{
var fullPath = Path.Combine(RepoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
return ("dotnet", $"build \"{fullPath}\" --configuration Release --nologo", RepoRoot);
}
private (string exe, string args, string workDir) BuildNpmCommand(string relativePath)
{
var workDir = Path.Combine(RepoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
// npm on Windows needs cmd /c
return (OperatingSystem.IsWindows() ? "cmd" : "sh",
OperatingSystem.IsWindows() ? "/c npm run build" : "-c \"npm run build\"",
workDir);
}
}
public record ProjectDefinition(string Name, BuildKind Kind, string RelativePath);
@@ -0,0 +1,283 @@
using ControlPlane.Core.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
using System.Text.Json;
namespace ControlPlane.Api.Services;
/// <summary>
/// Handles all git operations for the promotion workflow:
/// branch status, diff summaries, merge + push, and promotion history persistence.
/// All git commands run against the repo root configured in Docker:RepoRoot.
/// </summary>
public class PromotionService(IConfiguration config, ILogger<PromotionService> logger)
{
// The ordered promotion ladder — each step is a valid promotion.
public static readonly string[] Ladder = ["develop", "staging", "uat", "master"];
private string RepoRoot => config["Docker:RepoRoot"] ?? string.Empty;
private static readonly SemaphoreSlim _lock = new(1, 1);
private static readonly JsonSerializerOptions JsonOpts = new()
{
WriteIndented = true,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
// ── Branch status ────────────────────────────────────────────────────────
/// <summary>
/// Returns status for all ladder branches: last commit info + ahead/behind counts vs next branch.
/// </summary>
public async Task<List<BranchStatus>> GetLadderStatusAsync(CancellationToken ct = default)
{
var result = new List<BranchStatus>();
// Fetch to get up-to-date remote state, but don't fail if we're offline
await RunGitAsync("fetch --all --quiet", ct, swallowErrors: true);
foreach (var branch in Ladder)
{
var exists = await BranchExistsAsync(branch, ct);
if (!exists)
{
result.Add(new BranchStatus(branch, false, null, null, 0, 0, []));
continue;
}
// Last commit on this branch
var lastCommit = await GitOutputAsync($"log {branch} -1 --format=%h|%an|%ad|%s --date=short", ct);
string? shortHash = null, author = null, date = null, subject = null;
if (!string.IsNullOrWhiteSpace(lastCommit))
{
var p = lastCommit.Trim().Split('|', 4);
if (p.Length == 4) (shortHash, author, date, subject) = (p[0], p[1], p[2], p[3]);
}
// Ahead/behind vs the NEXT branch in the ladder
int ahead = 0, behind = 0;
var nextIdx = Array.IndexOf(Ladder, branch) + 1;
if (nextIdx < Ladder.Length)
{
var next = Ladder[nextIdx];
if (await BranchExistsAsync(next, ct))
{
var counts = await GitOutputAsync($"rev-list --left-right --count {next}...{branch}", ct);
if (!string.IsNullOrWhiteSpace(counts))
{
var parts = counts.Trim().Split('\t');
if (parts.Length == 2)
{
int.TryParse(parts[0], out behind);
int.TryParse(parts[1], out ahead);
}
}
}
}
// Unreleased commit summaries (commits in this branch not yet in next)
string[] unreleasedLines = [];
if (ahead > 0 && nextIdx < Ladder.Length && await BranchExistsAsync(Ladder[nextIdx], ct))
{
var log = await GitOutputAsync($"log {Ladder[nextIdx]}..{branch} --oneline --no-decorate", ct);
unreleasedLines = log.Split('\n', StringSplitOptions.RemoveEmptyEntries);
}
result.Add(new BranchStatus(branch, true, shortHash, $"{author} · {date} · {subject}",
ahead, behind, unreleasedLines));
}
return result;
}
// ── Promotion ────────────────────────────────────────────────────────────
/// <summary>
/// Merges <paramref name="from"/> into <paramref name="to"/> with a no-fast-forward merge commit,
/// then pushes. Streams progress lines to <paramref name="onLine"/>.
/// </summary>
public async Task<PromotionRequest> PromoteAsync(
string from,
string to,
string requestedBy,
string? note,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
var busy = new PromotionRequest { FromBranch = from, ToBranch = to, Status = PromotionStatus.Failed };
busy.Log.Add("⚠️ Another promotion is already in progress.");
return busy;
}
var req = new PromotionRequest
{
FromBranch = from,
ToBranch = to,
RequestedBy = requestedBy,
Note = note,
Status = PromotionStatus.Running,
};
void Log(string line) { req.Log.Add(line); onLine(line); }
try
{
Log($"▶ Promoting {from} → {to}");
if (!string.IsNullOrWhiteSpace(note)) Log($" Note: {note}");
Log("──────────────────────────────────────");
// 1. Fetch latest
Log(" git fetch --all");
await RunGitAsync("fetch --all --quiet", ct);
// 2. Checkout target branch
Log($" git checkout {to}");
await RunGitAsync($"checkout {to}", ct);
// 3. Pull target to latest
Log($" git pull origin {to}");
await RunGitAsync($"pull origin {to} --quiet", ct);
// 4. Count commits being promoted
var logOutput = await GitOutputAsync($"log {to}..{from} --oneline --no-decorate", ct);
var commitLines = logOutput.Split('\n', StringSplitOptions.RemoveEmptyEntries);
req.CommitCount = commitLines.Length;
req.CommitLines = commitLines;
Log($" Merging {commitLines.Length} commit(s) from {from}:");
foreach (var cl in commitLines) Log($" {cl}");
// 5. Merge with --no-ff for a clean promotion commit
var mergeMsg = $"chore: promote {from} → {to}" + (note != null ? $" — {note}" : "");
Log($" git merge --no-ff {from}");
await RunGitAsync($"merge --no-ff {from} -m \"{mergeMsg}\"", ct);
// 6. Push
Log($" git push origin {to}");
await RunGitAsync($"push origin {to}", ct);
// 7. Return to develop so the working tree stays clean
await RunGitAsync("checkout develop", ct, swallowErrors: true);
Log("──────────────────────────────────────");
Log($"✔ {from} → {to} promoted successfully at {DateTimeOffset.UtcNow:u}");
req.Status = PromotionStatus.Succeeded;
req.CompletedAt = DateTimeOffset.UtcNow;
}
catch (Exception ex)
{
Log($"✖ Promotion failed: {ex.Message}");
req.Status = PromotionStatus.Failed;
req.CompletedAt = DateTimeOffset.UtcNow;
// Try to abort any broken merge state
await RunGitAsync("merge --abort", ct, swallowErrors: true);
await RunGitAsync("checkout develop", ct, swallowErrors: true);
logger.LogError(ex, "Promotion {From}→{To} failed", from, to);
}
finally
{
await SaveAsync(req);
_lock.Release();
}
return req;
}
// ── History persistence ──────────────────────────────────────────────────
private string HistoryPath
{
get
{
var folder = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
Directory.CreateDirectory(folder);
return Path.Combine(folder, "promotions.json");
}
}
private static readonly SemaphoreSlim _fileLock = new(1, 1);
private async Task SaveAsync(PromotionRequest req)
{
await _fileLock.WaitAsync();
try
{
var all = LoadHistory();
var idx = all.FindIndex(r => r.Id == req.Id);
if (idx >= 0) all[idx] = req; else all.Insert(0, req);
if (all.Count > 100) all = all[..100];
await File.WriteAllTextAsync(HistoryPath, JsonSerializer.Serialize(all, JsonOpts));
}
finally { _fileLock.Release(); }
}
public async Task<List<PromotionRequest>> GetHistoryAsync()
{
await _fileLock.WaitAsync();
try { return LoadHistory(); }
finally { _fileLock.Release(); }
}
private List<PromotionRequest> LoadHistory()
{
if (!File.Exists(HistoryPath)) return [];
try { return JsonSerializer.Deserialize<List<PromotionRequest>>(File.ReadAllText(HistoryPath), JsonOpts) ?? []; }
catch { return []; }
}
// ── Git helpers ──────────────────────────────────────────────────────────
private async Task<bool> BranchExistsAsync(string branch, CancellationToken ct)
{
var output = await GitOutputAsync($"branch --list {branch}", ct);
return !string.IsNullOrWhiteSpace(output);
}
private async Task<string> GitOutputAsync(string args, CancellationToken ct)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var output = await proc.StandardOutput.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
return output;
}
private async Task RunGitAsync(string args, CancellationToken ct, bool swallowErrors = false)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var stderr = await proc.StandardError.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
if (!swallowErrors && proc.ExitCode != 0)
throw new InvalidOperationException($"git {args} exited {proc.ExitCode}: {stderr.Trim()}");
logger.LogDebug("git {Args} → exit {Code}", args, proc.ExitCode);
}
private ProcessStartInfo MakePsi(string args) => new("git", args)
{
WorkingDirectory = RepoRoot,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
}
/// <summary>Current status of a single branch in the promotion ladder.</summary>
public record BranchStatus(
string Branch,
bool Exists,
string? ShortHash,
string? LastCommitSummary,
int AheadOfNext, // commits this branch has that the next doesn't
int BehindNext, // commits next has that this branch doesn't (shouldn't happen in clean flow)
string[] UnreleasedLines // oneline log of the ahead commits
);
+191
View File
@@ -0,0 +1,191 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Docker.DotNet;
using Docker.DotNet.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
namespace ControlPlane.Api.Services;
/// <summary>
/// Orchestrates a release: finds all managed tenant containers matching the target
/// environment, removes each one, and restarts it from the latest clarity-server image.
/// Does NOT re-run Keycloak/Vault/DB steps — the container env vars are preserved from
/// the original provisioning and re-injected from the XML registry.
/// </summary>
public class ReleaseService(
IConfiguration config,
TenantRegistryService registry,
BuildHistoryService history,
ILogger<ReleaseService> logger)
{
private static readonly SemaphoreSlim _lock = new(1, 1);
public bool IsReleasing => _lock.CurrentCount == 0;
public string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest";
/// <summary>
/// Runs a release for the given environment and streams status lines to <paramref name="onLine"/>.
/// </summary>
public async Task<ReleaseRecord> ReleaseAsync(
string targetEnv,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
onLine("⚠️ A release is already in progress.");
var blocked = new ReleaseRecord
{
Environment = targetEnv,
ImageName = ImageName,
Status = ReleaseStatus.Failed,
FinishedAt = DateTimeOffset.UtcNow,
};
blocked.Tenants.Add(new TenantReleaseResult
{
Subdomain = "*", ContainerName = "*",
Success = false, Error = "Release already in progress.",
});
return blocked;
}
var record = await history.CreateReleaseAsync(targetEnv, ImageName);
try
{
onLine($"▶ Release to [{targetEnv}] using {ImageName}");
onLine("──────────────────────────────────────");
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
// Find all managed tenant containers for this environment
var filterEnv = targetEnv == "all"
? new Dictionary<string, IDictionary<string, bool>>
{
["label"] = new Dictionary<string, bool> { ["clarity.managed=true"] = true },
}
: new Dictionary<string, IDictionary<string, bool>>
{
["label"] = new Dictionary<string, bool>
{
["clarity.managed=true"] = true,
[$"clarity.env={targetEnv}"] = true,
},
};
var containers = await docker.Containers.ListContainersAsync(
new ContainersListParameters { All = true, Filters = filterEnv }, ct);
if (containers.Count == 0)
{
onLine($" No managed containers found for environment [{targetEnv}].");
record.Status = ReleaseStatus.Succeeded;
record.FinishedAt = DateTimeOffset.UtcNow;
await history.UpdateReleaseAsync(record);
return record;
}
onLine($" Found {containers.Count} container(s) to redeploy.");
onLine("");
int succeeded = 0, failed = 0;
foreach (var container in containers)
{
var name = container.Names.FirstOrDefault()?.TrimStart('/') ?? container.ID[..12];
var tenantResult = new TenantReleaseResult
{
ContainerName = name,
Subdomain = container.Labels.TryGetValue("clarity.subdomain", out var sub) ? sub : name,
};
record.Tenants.Add(tenantResult);
try
{
onLine($" → {name}");
// Read env vars from existing container — preserve Keycloak/Vault/DB config
var inspect = await docker.Containers.InspectContainerAsync(container.ID, ct);
var env = inspect.Config.Env;
var labels = inspect.Config.Labels;
var network = inspect.HostConfig.NetworkMode;
// Stop and remove old container
onLine($" Stopping...");
try
{
await docker.Containers.StopContainerAsync(
container.ID, new ContainerStopParameters { WaitBeforeKillSeconds = 8 }, ct);
await docker.Containers.RemoveContainerAsync(
container.ID, new ContainerRemoveParameters { Force = true }, ct);
}
catch (Exception ex)
{
logger.LogWarning(ex, "Stop/remove failed for {Name}, forcing removal.", name);
await docker.Containers.RemoveContainerAsync(
container.ID, new ContainerRemoveParameters { Force = true }, ct);
}
// Create fresh container from latest image, preserving all env vars and labels
onLine($" Creating from {ImageName}...");
var created = await docker.Containers.CreateContainerAsync(
new CreateContainerParameters
{
Name = name,
Image = ImageName,
Env = env,
Labels = labels,
HostConfig = new HostConfig
{
NetworkMode = network,
RestartPolicy = new RestartPolicy { Name = RestartPolicyKind.UnlessStopped },
},
}, ct);
// Start it
var started = await docker.Containers.StartContainerAsync(created.ID, null, ct);
if (!started) throw new InvalidOperationException("Docker returned false for start.");
onLine($" ✔ {name} redeployed.");
tenantResult.Success = true;
succeeded++;
}
catch (Exception ex)
{
logger.LogError(ex, "Failed to redeploy {Name}.", name);
onLine($" ✖ {name} failed: {ex.Message}");
tenantResult.Success = false;
tenantResult.Error = ex.Message;
failed++;
}
await history.UpdateReleaseAsync(record);
}
record.Status = failed == 0 ? ReleaseStatus.Succeeded
: succeeded == 0 ? ReleaseStatus.Failed
: ReleaseStatus.PartialFailure;
record.FinishedAt = DateTimeOffset.UtcNow;
onLine("");
onLine("──────────────────────────────────────");
onLine($"{(record.Status == ReleaseStatus.Succeeded ? "" : "")} Release complete — {succeeded} succeeded, {failed} failed.");
}
catch (Exception ex)
{
logger.LogError(ex, "Release to [{Env}] threw an unhandled exception.", targetEnv);
record.Status = ReleaseStatus.Failed;
record.FinishedAt = DateTimeOffset.UtcNow;
onLine($"✖ Release aborted: {ex.Message}");
}
finally
{
await history.UpdateReleaseAsync(record);
_lock.Release();
}
return record;
}
}
+38
View File
@@ -0,0 +1,38 @@
using ControlPlane.Core.Messages;
using System.Collections.Concurrent;
using System.Threading.Channels;
namespace ControlPlane.Api.Services;
/// <summary>
/// Thin in-process pub/sub for SSE. MassTransit consumer writes here;
/// the SSE endpoint reads and streams to the browser.
/// </summary>
public sealed class SseEventBus
{
private readonly ConcurrentDictionary<Guid, List<Channel<ProvisioningProgressEvent>>> _subs = new();
public void Publish(ProvisioningProgressEvent evt)
{
if (!_subs.TryGetValue(evt.JobId, out var channels)) return;
lock (channels)
foreach (var ch in channels)
ch.Writer.TryWrite(evt);
}
public Channel<ProvisioningProgressEvent> Subscribe(Guid jobId)
{
var ch = Channel.CreateUnbounded<ProvisioningProgressEvent>();
_subs.GetOrAdd(jobId, _ => []).Add(ch);
return ch;
}
public void Unsubscribe(Guid jobId, Channel<ProvisioningProgressEvent> channel)
{
if (_subs.TryGetValue(jobId, out var channels))
{
lock (channels) channels.Remove(channel);
channel.Writer.TryComplete();
}
}
}
+84
View File
@@ -0,0 +1,84 @@
using System.Formats.Tar;
using System.IO.Compression;
namespace ControlPlane.Api.Services;
/// <summary>
/// Creates a gzipped tar stream from a directory, respecting .dockerignore rules.
/// Used to supply the Docker build context to the Docker SDK.
/// </summary>
internal static class TarHelper
{
private static readonly string[] DefaultIgnore =
[
".git", ".vs", ".vscode", "node_modules", "bin", "obj",
"VaultData", "*.user", "*.suo",
];
public static void Pack(string root, Stream destination)
{
var ignorePatterns = LoadDockerIgnore(root);
using var gz = new GZipStream(destination, CompressionLevel.Fastest, leaveOpen: true);
using var tar = new TarWriter(gz, TarEntryFormat.Gnu, leaveOpen: false);
foreach (var file in Directory.EnumerateFiles(root, "*", SearchOption.AllDirectories))
{
var relative = Path.GetRelativePath(root, file).Replace('\\', '/');
if (ShouldIgnore(relative, ignorePatterns))
continue;
var entry = new GnuTarEntry(TarEntryType.RegularFile, relative)
{
DataStream = File.OpenRead(file),
};
tar.WriteEntry(entry);
}
}
private static List<string> LoadDockerIgnore(string root)
{
var path = Path.Combine(root, ".dockerignore");
var patterns = new List<string>(DefaultIgnore);
if (!File.Exists(path)) return patterns;
foreach (var line in File.ReadAllLines(path))
{
var trimmed = line.Trim();
if (!string.IsNullOrEmpty(trimmed) && !trimmed.StartsWith('#'))
patterns.Add(trimmed);
}
return patterns;
}
private static bool ShouldIgnore(string relativePath, List<string> patterns)
{
var segments = relativePath.Split('/');
foreach (var pattern in patterns)
{
var p = pattern.TrimStart('/').TrimEnd('/');
// Glob suffix match (e.g. *.user)
if (p.StartsWith('*'))
{
if (relativePath.EndsWith(p[1..], StringComparison.OrdinalIgnoreCase))
return true;
continue;
}
// Exact full-path match or root-anchored prefix (e.g. .git, .vs)
if (relativePath.Equals(p, StringComparison.OrdinalIgnoreCase))
return true;
if (relativePath.StartsWith(p + "/", StringComparison.OrdinalIgnoreCase))
return true;
// Match any path segment so that nested bin/, obj/, node_modules/ etc. are caught
// regardless of which project subdirectory they live in.
if (segments.Any(seg => seg.Equals(p, StringComparison.OrdinalIgnoreCase)))
return true;
}
return false;
}
}
+21
View File
@@ -0,0 +1,21 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.AspNetCore": "Warning"
}
},
"AllowedHosts": "*",
"OpenRouter": {
"ApiKey": "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"
},
"Git": {
"RepoRoot": "C:\\Users\\amadzarak\\source\\repos\\Clarity"
},
"Gitea": {
"BaseUrl": "https://opc.clarity.test",
"Owner": "Clarity",
"Repo": "Clarity",
"Token": "2ef325f682915c5959bf6a0dc73cec7034fcd2a2"
}
}
+160
View File
@@ -0,0 +1,160 @@
using Scalar.Aspire;
var builder = DistributedApplication.CreateBuilder(args);
// ─────────────────────────────────────────────────────────────────────────────
// Platform infrastructure (Keycloak, Vault, MinIO, Nginx, Dnsmasq) is
// managed by infra/docker-compose.yml — NOT Aspire.
// Run `docker compose up -d` from the infra/ folder before starting this host.
//
// Fixed dev URLs (hardcoded to match infra/docker-compose.yml):
// Keycloak → http://localhost:8080
// Vault → http://localhost:8200
// MinIO → http://localhost:9000
//
// ControlPlane owns: opc-postgres (opcdb + giteadb), RabbitMQ, Gitea.
// ─────────────────────────────────────────────────────────────────────────────
// Shared paths
var clientAssetsPath = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "ClientAssets"));
var nginxConfDPath = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "infra", "nginx", "conf.d"));
var vaultKeysFile = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "infra", "vault", "data", "init.json"));
#region CONTROLPLANE POSTGRES
// ControlPlane owns this — isolated from platform infra postgres.
// Override via: dotnet user-secrets set "Parameters:cp-postgres-password" "yourpassword"
var cpPostgresPassword = builder.AddParameter("cp-postgres-password", "controlplane-dev", secret: true);
var cpPostgres = builder.AddPostgres("opc-postgres", password: cpPostgresPassword)
.WithLifetime(ContainerLifetime.Persistent)
.WithDataVolume("opc-postgres-data")
.WithHostPort(5433)
.WithPgAdmin();
var controlPlaneDb = cpPostgres.AddDatabase("opcdb");
var giteaDb = cpPostgres.AddDatabase("giteadb");
#endregion
#region GITEA
// Gitea is ControlPlane's code management component — owns its own DB on opc-postgres.
var gitea = builder.AddContainer("gitea", "gitea/gitea", "latest")
.WithHttpEndpoint(port: 3000, targetPort: 3000, name: "http")
.WithEndpoint(port: 2222, targetPort: 22, name: "ssh")
.WithVolume("clarity-gitea-data", "/data")
.WithEnvironment("GITEA__database__DB_TYPE", "postgres")
.WithEnvironment("GITEA__database__HOST", "host.docker.internal:5433")
.WithEnvironment("GITEA__database__NAME", "giteadb")
.WithEnvironment("GITEA__database__USER", "postgres")
.WithEnvironment("GITEA__database__PASSWD", "controlplane-dev")
.WithEnvironment("GITEA__server__DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__ROOT_URL", "http://opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_PORT", "2222")
.WithEnvironment("GITEA__service__DISABLE_REGISTRATION", "true")
.WaitFor(giteaDb)
.WithLifetime(ContainerLifetime.Persistent);
#endregion
#region RABBITMQ
var rabbitPassword = builder.AddParameter("rabbitmq-password", "clarity-rabbit", secret: true);
var rabbit = builder.AddRabbitMQ("rabbitmq", password: rabbitPassword)
.WithLifetime(ContainerLifetime.Persistent)
.WithManagementPlugin();
#endregion
#region CONTROLPLANE API
var api = builder.AddProject<Projects.ControlPlane_Api>("controlplane-api")
.WithReference(rabbit)
.WaitFor(rabbit)
.WithReference(controlPlaneDb)
.WaitFor(controlPlaneDb)
.WithEnvironment("Gitea__BaseUrl", gitea.GetEndpoint("http"))
.WithEnvironment("ClientAssets__Folder", clientAssetsPath)
.WithEnvironment("Docker__RepoRoot", builder.AppHostDirectory.Replace("ControlPlane.AppHost", "").TrimEnd('\\', '/'))
.WithExternalHttpEndpoints();
#endregion
#region PROVISIONING WORKER
builder.AddProject<Projects.ControlPlane_Worker>("controlplane-worker")
.WithReference(rabbit)
.WaitFor(rabbit)
// Vault — fixed dev address from infra/docker-compose.yml
.WithEnvironment("Vault__Address", "http://localhost:8200")
.WithEnvironment("Vault__ContainerAddress", "http://vault:8200")
.WithEnvironment("Vault__KeysFile", vaultKeysFile)
// Keycloak — fixed dev address from infra/docker-compose.yml
.WithEnvironment("Keycloak__AuthServerUrl", "http://localhost:8080")
.WithEnvironment("Keycloak__ContainerUrl", "https://keycloak.clarity.test")
.WithEnvironment("Keycloak__Realm", "master")
.WithEnvironment("Keycloak__Resource", "admin-cli")
.WithEnvironment("Keycloak__AdminUser", "admin")
.WithEnvironment("Keycloak__AdminPassword", "Admin1234!")
// Gateway
.WithEnvironment("Gateway__TenantBaseUrl", "https://{subdomain}.clarity.test")
// ClarityInfraOptions
.WithEnvironment("Clarity__Domain", "clarity.test")
.WithEnvironment("Clarity__Network", "clarity-net")
.WithEnvironment("Clarity__KeycloakPublicUrl", "https://keycloak.clarity.test")
.WithEnvironment("Clarity__KeycloakInternalUrl", "http://keycloak:8080")
.WithEnvironment("Clarity__VaultInternalUrl", "http://vault:8200")
.WithEnvironment("Clarity__NginxCertPath", "/etc/nginx/certs/clarity.test.crt")
.WithEnvironment("Clarity__NginxCertKeyPath", "/etc/nginx/certs/clarity.test.key")
// Nginx conf.d — points to infra/nginx/conf.d so platform nginx picks up tenant configs
.WithEnvironment("Nginx__ConfDPath", nginxConfDPath)
.WithEnvironment("ClientAssets__Folder", clientAssetsPath)
// Platform Postgres connection string for tenant database provisioning (infra/docker-compose.yml)
.WithEnvironment("ConnectionStrings__platformdb",
"Host=localhost;Port=5432;Username=postgres;Password=postgres")
.WithReference(controlPlaneDb)
.WaitFor(controlPlaneDb);
#endregion
#region CONTROLPLANE UI
builder.AddViteApp("controlplane-ui", "../clarity.controlplane")
.WithReference((IResourceBuilder<IResourceWithServiceDiscovery>)api)
.WaitFor(api);
#endregion
#region CLARITY-NET connect RabbitMQ to platform network
// Ensures RabbitMQ (the one container Aspire owns) is reachable from tenant containers
// on clarity-net. All other platform containers are already on clarity-net via docker-compose.
builder.Eventing.Subscribe<AfterResourcesCreatedEvent>(async (@event, ct) =>
{
const string network = "clarity-net";
await Task.Delay(TimeSpan.FromSeconds(4), ct);
var (inspectCode, _) = await DockerOutputAsync($"network inspect {network}", ct);
if (inspectCode != 0)
await DockerOutputAsync($"network create {network}", ct);
var (idCode, idOut) = await DockerOutputAsync("ps --filter name=rabbitmq --format {{.ID}}", ct);
if (idCode == 0 && !string.IsNullOrWhiteSpace(idOut))
{
var containerId = idOut.Trim().Split('\n')[0].Trim();
await DockerOutputAsync($"network connect --alias rabbitmq {network} {containerId}", ct);
}
});
#endregion
#region SCALAR API DOCS
var scalar = builder.AddScalarApiReference();
scalar.WithApiReference(api);
#endregion
builder.Build().Run();
static async Task<(int ExitCode, string Output)> DockerOutputAsync(string args, CancellationToken ct)
{
var psi = new System.Diagnostics.ProcessStartInfo("docker", args)
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false
};
using var proc = System.Diagnostics.Process.Start(psi)!;
var output = await proc.StandardOutput.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
return (proc.ExitCode, output);
}
@@ -0,0 +1,38 @@
<Project Sdk="Aspire.AppHost.Sdk/13.2.2">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UserSecretsId>controlplane-apphost-$(MSBuildProjectName)</UserSecretsId>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\ControlPlane.Api\ControlPlane.Api.csproj" />
<ProjectReference Include="..\ControlPlane.Worker\ControlPlane.Worker.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Aspire.Hosting.JavaScript" />
<PackageReference Include="Aspire.Hosting.PostgreSQL" />
<PackageReference Include="Aspire.Hosting.Keycloak" />
<PackageReference Include="Aspire.Hosting.RabbitMQ" />
<PackageReference Include="CommunityToolkit.Aspire.Hosting.Minio" />
<PackageReference Include="Scalar.Aspire" />
</ItemGroup>
<ItemGroup>
<None Update="KeycloakConfig\realm-export.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
<None Update="VaultConfig\vault.hcl">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
<None Update="VaultConfig\entrypoint.sh">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
@@ -0,0 +1,17 @@
# Resolve all *.clarity.local subdomains to the loopback address.
# nginx (bound to port 80 on the host) then routes by subdomain to the correct tenant container.
address=/.clarity.test/127.0.0.1
# Don't read /etc/resolv.conf or /etc/hosts from the container — we are the resolver
no-resolv
no-hosts
# Forward everything that isn't clarity.local to Cloudflare DNS
server=1.1.1.1
server=8.8.8.8
# Listen on all interfaces inside the container
listen-address=0.0.0.0
# Log queries — useful during initial setup, can be removed later
log-queries
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDGzCCAgOgAwIBAgIUS0kgcdXIrlOk/K6g2bfLDRycqk8wDQYJKoZIhvcNAQEL
BQAwGTEXMBUGA1UEAwwOKi5jbGFyaXR5LnRlc3QwHhcNMjYwNDI0MjIwMDUzWhcN
MjgwNzI3MjIwMDUzWjAZMRcwFQYDVQQDDA4qLmNsYXJpdHkudGVzdDCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMWAJ62tsrnMaMnF3NR2Yfv1LKS9IRfm
sTtTWba7D8fcs9JXGlEn+vMa10AjV91yaSQoQdwLCOwkF58CmLBs0K+vvPoLgvcZ
BQxVrBj0t1YlTwLcez8vEgb2tHKGo914T/YLh+clF8oig9tIIiTNbngUGabpWUym
vPllDQ8nB0m4IkHbMAhgdDUG9X5Vc/lWHW6gxhRiUQt7HLqWJ2lLleQR5qEqRQx+
RmtseS11jhzwDYf1VVzQ2AE2tUaq82p0cZAF8uFZnESuv1Hcu+1KBfjCaGXJ/485
gg1q01sYhAkX0LAK/CqRBOd7zp9cDm3NX0tLBj4Gek6h0kFGkmRtAmcCAwEAAaNb
MFkwHQYDVR0OBBYEFJNI82Atz7k2pa2IZECO9aG30dnHMA8GA1UdEwEB/wQFMAMB
Af8wJwYDVR0RBCAwHoIOKi5jbGFyaXR5LnRlc3SCDGNsYXJpdHkudGVzdDANBgkq
hkiG9w0BAQsFAAOCAQEAO5MyjFXcOZeEwPJRel8Mvg1HRwu97tL/BB9Hb13JWzdx
FBBqwOdRrG8IB7byXLjH1ng4xMM+WI9yeZ29bV/PcrZwermGNzU+ob1SrvJYh0hb
sX0zeXKjKDGMsdlyZAERnvGOxlPzNtYRpeSD7h3qKtuzJiReCNdGzSh+2bLfxEIb
wTJJNgnXRA4GGK5zghmzOEpq/w8sqpB4hLz9OK8a33QOKp79LrfyT1B9uZq4uHZ8
SvTX89KZOGmUQraF/6QvL3CcMutwzf4unKxyaStflrcGjCn/eEe8Ea3IWL1EwU8K
9JvyDvWgv7oib7FA2BZGbYvT+wsFjiFBzTcWUX132g==
-----END CERTIFICATE-----
@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFgCetrbK5zGjJ
xdzUdmH79SykvSEX5rE7U1m2uw/H3LPSVxpRJ/rzGtdAI1fdcmkkKEHcCwjsJBef
ApiwbNCvr7z6C4L3GQUMVawY9LdWJU8C3Hs/LxIG9rRyhqPdeE/2C4fnJRfKIoPb
SCIkzW54FBmm6VlMprz5ZQ0PJwdJuCJB2zAIYHQ1BvV+VXP5Vh1uoMYUYlELexy6
lidpS5XkEeahKkUMfkZrbHktdY4c8A2H9VVc0NgBNrVGqvNqdHGQBfLhWZxErr9R
3LvtSgX4wmhlyf+POYINatNbGIQJF9CwCvwqkQTne86fXA5tzV9LSwY+BnpOodJB
RpJkbQJnAgMBAAECggEAGc9MICXNb/t3DDtHxxorZuZc7bBrpTh4G9UiKb+badZ9
R3UrksSDRobQ72hPALkFZXy/Upa8lUOINLb9pjyqLvNr4k9jz4/c+YYupdpBJUhd
4XVXw+OOWwudfEP9ISGqbXCHU50k1T0adysfjyirkZSq34WqLlqx4nOit8K1cJwc
5+jvApwOPz6zf9kFJYjybbUSPO8bFLVTpjs3hgUzaCMkYMn6R/5bR5SMeqCbZILB
fkGm+KaeS3cIY7PhDhSoiWJUR5/ZsaoT5s1IM5aGTe62XVY5eoMixYEibx/e68XC
eL3eWO304QU6AgMKHFhtTKFpnJHlyV/gu084/xWC7QKBgQD9lrkRgDDMXfuDtFRr
LiQ3QFEmmj0m2ekHIpdZDY3rJ0bbQzTw4cqWs437qMKcTczK70mfxp/IjPoky+8i
bSlm/pR+U/YwsgK0dxGLzHbIQYYQdI4BjBsysNOvxnKUxRciAMpIW5ULGKYUkCde
dhH5c2Rmve0yq6MYJ8DCOTXCwwKBgQDHYOd50Tjw5i+a5wcHEsfY+r/Vsu1u1BrS
/sdpJ+dKxx50TQO4F7tnrugwJ9cvxPDGQApDHFbIwn70zQuDNvYLD2CTtwHoJHx/
wuP3p0Rw3DmhKI9CN0oXclqNV3PZ54PZ2M5HEl0zkpoIse4YtWc0uyO6RKVHHtPr
jGjTKeZ/jQKBgAc7XinGmx2o7HxUDzhDR5sfxXCxY18RRdkDPoe2oD59j0K/hun7
tnhXxIvRw0ML4PREoLfixTnF83hLLJWxwUWDqx5zLIk0+mjFIIX5HcYWQEmF2Wrn
4PqwGklgAnKFsGQy25H2sqhvWoUpm0XRXi/b/5gCgJo6VNtiftfLI+JbAoGAC496
3H1dJ9qw9/JdXfOg0tv3M5TkX4C87W8IcPh3WMai5Wtxw8Lcgu6JWAF3YLWyoEwm
TC3gelOMuPUKrdkJ+yoxF1+NJMC410+dmEaCmWirjsSjSdua2DExPvDLLt9VrdP8
YfKWpN7jP43RmG0sRspzD+HbE3yeHRJPIa9URiECgYEAyxOOXDCQSPifgIRZe5hr
u+WsMukUypizXq36/ydCfMD7HcPOgO6bNkNsh6WlaaNrFQwR2O96V0BvrSAI242a
bTEyUx7fTwoZmn/8O6/WIwkyYolixNYbClcAIopbOXxJ9bJ1KqS47mHv1RrQ8FqN
OpJWMvrAktqNT5tjDeIj6mc=
-----END PRIVATE KEY-----
@@ -0,0 +1,2 @@
# Placeholder so the conf.d directory is tracked by git and exists at container mount time.
# The provisioning worker writes per-tenant .conf files here at runtime.
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-01000000
server {
listen 443 ssl;
server_name fdev-app-clarity-01000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-01000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-02000000
server {
listen 443 ssl;
server_name fdev-app-clarity-02000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-02000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-03000000
server {
listen 443 ssl;
server_name fdev-app-clarity-03000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-03000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-04000000
server {
listen 443 ssl;
server_name fdev-app-clarity-04000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-04000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,21 @@
server {
listen 443 ssl;
server_name opc.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
# Git over HTTP needs larger body and longer timeouts
client_max_body_size 100m;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
location / {
set $upstream http://gitea:3000;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,16 @@
server {
listen 443 ssl;
server_name keycloak.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
set $upstream http://keycloak:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,27 @@
events {
worker_connections 1024;
}
http {
# Use Docker's embedded DNS resolver so container names resolve dynamically.
# This is critical — without it nginx resolves upstream names at startup only
# and won't pick up newly provisioned tenant containers.
resolver 127.0.0.11 valid=5s ipv6=off;
# Shared log format
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
# Redirect all HTTP → HTTPS
server {
listen 80 default_server;
return 301 https://$host$request_uri;
}
# Pick up per-tenant server blocks dropped by the provisioning worker
include /etc/nginx/conf.d/*.conf;
}
@@ -0,0 +1,31 @@
{
"$schema": "https://json.schemastore.org/launchsettings.json",
"profiles": {
"https": {
"commandName": "Project",
"dotnetRunMessages": true,
"launchBrowser": true,
"applicationUrl": "https://controlplane.dev.localhost:17000;http://controlplane.dev.localhost:15000",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development",
"DOTNET_ENVIRONMENT": "Development",
"ASPIRE_DASHBOARD_OTLP_ENDPOINT_URL": "https://localhost:21000",
"ASPIRE_DASHBOARD_MCP_ENDPOINT_URL": "https://localhost:21001",
"ASPIRE_RESOURCE_SERVICE_ENDPOINT_URL": "https://localhost:21002"
}
},
"http": {
"commandName": "Project",
"dotnetRunMessages": true,
"launchBrowser": true,
"applicationUrl": "http://controlplane.dev.localhost:15000",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development",
"DOTNET_ENVIRONMENT": "Development",
"ASPIRE_DASHBOARD_OTLP_ENDPOINT_URL": "http://localhost:21000",
"ASPIRE_DASHBOARD_MCP_ENDPOINT_URL": "http://localhost:21001",
"ASPIRE_RESOURCE_SERVICE_ENDPOINT_URL": "http://localhost:21002"
}
}
}
}
@@ -0,0 +1,48 @@
#!/bin/sh
set -e
KEYS_FILE="/vault/file/init.json"
VAULT_ADDR="http://127.0.0.1:8200"
export VAULT_ADDR
# Start Vault server in the background
vault server -config=/vault/config/vault.hcl &
VAULT_PID=$!
# Wait for Vault to be ready
echo "[vault-init] Waiting for Vault to start..."
until vault status > /dev/null 2>&1 || vault status 2>&1 | grep -q "Sealed\|Initialized"; do
sleep 1
done
echo "[vault-init] Vault is up."
# Check if already initialised
INIT_STATUS=$(vault status -format=json 2>/dev/null | grep '"initialized"' | grep -c "true" || true)
if [ "$INIT_STATUS" = "0" ]; then
echo "[vault-init] First run — initialising Vault..."
vault operator init -key-shares=1 -key-threshold=1 -format=json > "$KEYS_FILE"
echo "[vault-init] Keys saved to $KEYS_FILE"
fi
# Unseal using saved key
UNSEAL_KEY=$(grep '"unseal_keys_b64"' "$KEYS_FILE" -A1 | grep '"' | tail -1 | tr -d ' ",' )
ROOT_TOKEN=$(grep '"root_token"' "$KEYS_FILE" | sed 's/.*: *"\(.*\)".*/\1/')
echo "[vault-init] Unsealing..."
vault operator unseal "$UNSEAL_KEY"
echo "[vault-init] Vault is unsealed. Root token is stored in $KEYS_FILE"
# Authenticate and bootstrap Transit engine + master key (idempotent)
export VAULT_TOKEN="$ROOT_TOKEN"
echo "[vault-init] Enabling Transit secrets engine..."
vault secrets enable -path=clarity-transit transit 2>/dev/null || echo "[vault-init] clarity-transit already enabled."
echo "[vault-init] Creating master-key..."
vault write -f clarity-transit/keys/master-key 2>/dev/null || echo "[vault-init] master-key already exists."
echo "[vault-init] Vault bootstrap complete."
# Keep container alive by waiting on the Vault process
wait $VAULT_PID
@@ -0,0 +1,13 @@
storage "file" {
path = "/vault/file"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = true
}
ui = true
disable_mlock = true
# Auto-unseal using a static shamir key — dev convenience only, never use in prod
@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDDTCCAfWgAwIBAgIURU3028kH3veUBjTtDis5N5SYI9AwDQYJKoZIhvcNAQEL
BQAwGTEXMBUGA1UEAwwOKi5sb2NhbHRlc3QubWUwHhcNMjYwNDI0MTYwNzU3WhcN
MzYwNDIxMTYwNzU3WjAZMRcwFQYDVQQDDA4qLmxvY2FsdGVzdC5tZTCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBALiZjuDCZ7uBicnk1ko6nlJIf/Zn2thr
ArBA9FD1wtMm0tWMA66fQ+STlkTw2LOlsjIk9d4A3s7jGhVyAikLqylm8in3WVWT
X4Ms5FB7lXqGEsuMI6Fq8l+Xw5boWE15XRGoOEPqaazfIvy4utF9Dk1TLXAv+Svv
dTTek7phU3hzWxzOTdk9fVhHdYqJy0ZjaxJxyUbTDPRf+IHad/0iWWpZaRuP5QEz
J0zujXEvJdFUVXOcPqSs0SdkaKqYbxegHwUK5ALQSVzH7CYHR4+Np6ChUw8+RFid
b9dQH2pzm9h7iaKD58AWLLB/D2uHBnSPkOahWY8oizlNRxsSuY7/x4cCAwEAAaNN
MEswJwYDVR0RBCAwHoIOKi5sb2NhbHRlc3QubWWCDGxvY2FsdGVzdC5tZTALBgNV
HQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggEB
ALZ/RP2JFDz4QODzy+ESg5DlgQQ3CTyDn9DwR8Pojzpq+MdJQ3+g48qsCS2FwR8W
h18DCfeemrutGHGBcX6dNbjy43oFwbvdDEaK1/m82Rmr4F/u3AdpxJpXXGEBoO9O
rg2+nXQEGFwZapUnAVGUB3Iihx5FRw1Rbi910aF6TN67Og6pUf/8Jut/M5TzAiDN
scil2PpC2mWvHzGV+gBZT0lOpfo+dRlE+zzEBWt4WpZWj3bF+WbwzR2bsd2JGZsp
OtV4ErupppsGYliKi2cJG9ceqG0zEc/hUtG2SfmZvfKOxZ2p0M6SXJDHueoAOkh1
zu/AQ0cjPBLoOy6ahVHvg20=
-----END CERTIFICATE-----
@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC4mY7gwme7gYnJ
5NZKOp5SSH/2Z9rYawKwQPRQ9cLTJtLVjAOun0Pkk5ZE8NizpbIyJPXeAN7O4xoV
cgIpC6spZvIp91lVk1+DLORQe5V6hhLLjCOhavJfl8OW6FhNeV0RqDhD6mms3yL8
uLrRfQ5NUy1wL/kr73U03pO6YVN4c1sczk3ZPX1YR3WKictGY2sScclG0wz0X/iB
2nf9IllqWWkbj+UBMydM7o1xLyXRVFVznD6krNEnZGiqmG8XoB8FCuQC0Elcx+wm
B0ePjaegoVMPPkRYnW/XUB9qc5vYe4mig+fAFiywfw9rhwZ0j5DmoVmPKIs5TUcb
ErmO/8eHAgMBAAECggEAAnNe5AnCZXYCbBpQhv9XcG6BZgJRksJZd4D7Fm62G3XB
T1pCs9IvwRujj8gsN6kIn1NI2xNOZWNZ7QpITovP6HOSRYbsElL34BXzQPiZT5gc
ePtiR+0VQkt8vxf6lHNRWmDAPREQ3UxDs7zKhEqBCLzslXYkSH0892Ibf6nImF8w
7meMsH4SxPFY16WBxWjyJNdy+TVw0BYFdPiUxE52PaIplgVZJqvmmuMUYcmOVale
lXGeWGMdvFp3Tilbj2rpnJ5p7I5av59TmIzXon/bGguhYhwus+1e8rs3WYWqibHf
bwB03kuGFaiSvuVncX3DvdBnvrz9tlCaipU+aciGUQKBgQDaQ378oDbmX1gk96/7
3ZiU67Vqnone4X88SxiYOafwmT5NVnJYMjtbN775NCUK4aR7lYo2lodl9CW096UN
Xic186jFGey3NoqCLoVodeFe/XscZMSS+TE5FLi4B4Ih/bgpcDzDQ8++5oRYiwWk
Z1/GKOc8MxXhhZDf9wOhgWBfVwKBgQDYhBPbeJJaE3k7pREBF7abDERFbfruC4Xh
181kCIZ3oMKGj4YKtIjoLnCocOAo/uhM9DnY/cBvR+CykWpH0nBfcDE9lknvpxUn
fTitwytfjKWwE3/Z9BRK/ieBaYXwEn38KgYZJJNseZLlYTgDfAKKt4tppAQ3Tdww
9DFo47IrUQKBgQCPSWBEWKmx80XafwB5SLCyk0s2A35fY4oz+tjaln855GCSRP4s
CE4PRDmLQEBRNHDW8QUbcRbSR8W5WBpy/CyhrqRNQQe1/4hOjlvmh/y8b4wyx7SF
CDLYVlIt/j/gMMCF87jwN8RaftrDhgDePT8SyCeFzcO/mf/SCEfJ7zVlYQKBgH5A
be/RG83ogw3Tj9nKQRGiEoFFw0dhcr0hgEOvcPF6zVN3h1rgsOBqjAi8YQmmskCF
POIZ/Ucma5DUmFuvCxWrrxrRcuWK0RwIua8hGj6KHedRR4EJAXhFQTYGGTLHJa2P
t6SbnldngM++Y9IsUrMeme2M1WSGQzpMei9GbpMxAoGBAIgFav2bGZCScvp+Y917
j5rKMLv8AN6nC3BQoraxMKl0YpCS8F58YHAfxKlmgR1Ll16reJLv5qAzSM7jViII
7vmiPGrpRnz1mUHVhVBfNF1UKIRmmJKtARlrbrVibGtLubtzBZOLh0bfzmnYH9Z8
ncBozZmPeJAtzGfvw+7BNoM9
-----END PRIVATE KEY-----
+16
View File
@@ -0,0 +1,16 @@
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
CN = *.localtest.me
[v3_req]
subjectAltName = @alt_names
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[alt_names]
DNS.1 = *.localtest.me
DNS.2 = localtest.me
@@ -0,0 +1,51 @@
namespace ControlPlane.Core.Config;
/// <summary>
/// Central configuration for all infrastructure URLs, network names, and domain values.
/// Bind from the "Clarity" section in appsettings.json or via AppHost environment variables.
/// Eliminates hardcoded strings spread across Worker, AppHost, and generated configs.
/// </summary>
public sealed class ClarityInfraOptions
{
public const string Section = "Clarity";
// ── Domain ────────────────────────────────────────────────────────────
/// <summary>The base DNS domain for all tenant subdomains. e.g. "clarity.test"</summary>
public string Domain { get; set; } = "clarity.test";
/// <summary>The Docker network all managed containers are attached to.</summary>
public string Network { get; set; } = "clarity-net";
// ── Keycloak ──────────────────────────────────────────────────────────
/// <summary>Public browser-facing Keycloak URL — used in redirect URIs and JWT iss claim.</summary>
public string KeycloakPublicUrl { get; set; } = "https://keycloak.clarity.test";
/// <summary>Internal Docker DNS URL for server-side Keycloak calls (avoids self-signed cert).</summary>
public string KeycloakInternalUrl { get; set; } = "http://keycloak:8080";
// ── Vault ─────────────────────────────────────────────────────────────
/// <summary>Internal Docker DNS URL for Vault — injected into tenant containers.</summary>
public string VaultInternalUrl { get; set; } = "http://vault:8200";
// ── nginx SSL certs ───────────────────────────────────────────────────
/// <summary>Path to the wildcard TLS cert inside the nginx container.</summary>
public string NginxCertPath { get; set; } = "/etc/nginx/certs/clarity.test.crt";
/// <summary>Path to the wildcard TLS key inside the nginx container.</summary>
public string NginxCertKeyPath { get; set; } = "/etc/nginx/certs/clarity.test.key";
// ── Helpers ───────────────────────────────────────────────────────────
/// <summary>Builds the public tenant URL for a given subdomain.</summary>
public string TenantPublicUrl(string subdomain) => $"https://{subdomain}.{Domain}";
/// <summary>Builds the public Keycloak realm URL for a given realm (browser-facing).</summary>
public string KeycloakRealmPublicUrl(string realm) => $"{KeycloakPublicUrl}/realms/{realm}";
/// <summary>Builds the internal Keycloak realm URL for a given realm (server-side).</summary>
public string KeycloakRealmInternalUrl(string realm) => $"{KeycloakInternalUrl}/realms/{realm}";
}
@@ -0,0 +1,14 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
</ItemGroup>
</Project>
@@ -0,0 +1,8 @@
namespace ControlPlane.Core.Interfaces;
public interface ISagaStep
{
string StepName { get; }
Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken);
Task CompensateAsync(SagaContext context, CancellationToken cancellationToken);
}
@@ -0,0 +1,30 @@
using ControlPlane.Core.Models;
namespace ControlPlane.Core.Interfaces;
/// <summary>
/// Mutable context bag passed through every saga step.
/// Steps read inputs and write outputs here so downstream steps can consume them.
/// </summary>
public class SagaContext
{
public ProvisioningJob Job { get; init; } = default!;
// Written by DatabaseStep — connection string for the tenant's Postgres (shared or own)
public string? TenantConnectionString { get; set; }
public string? TenantStackName { get; set; }
// Written by KeycloakStep
public string? DayZeroUserSubjectId { get; set; }
public string? MagicLink { get; set; }
// Written by LaunchStep or PulumiStep — base URL for the provisioned tenant
public string? TenantApiBaseUrl { get; set; }
// Written by LaunchStep — primary app container name
public string? ContainerName { get; set; }
// Written by PulumiStep (DedicatedVM/Enterprise tier) — target host details for subsequent steps
public string? VmIpAddress { get; set; }
public string? VmSshKeyPath { get; set; }
}
@@ -0,0 +1,38 @@
using ControlPlane.Core.Models;
namespace ControlPlane.Core.Messages;
/// <summary>API -> Worker: kick off the saga.</summary>
public record ProvisionClientCommand
{
public Guid JobId { get; init; }
public string ClientName { get; init; } = string.Empty;
public string StateCode { get; init; } = string.Empty;
public string Subdomain { get; init; } = string.Empty;
public string AdminEmail { get; init; } = string.Empty;
public string SiteCode { get; init; } = string.Empty;
public string Environment { get; init; } = "fdev";
public TenantTier Tier { get; init; } = TenantTier.Shared;
}
/// <summary>Worker -> API/Gateway: one log event per saga step transition.</summary>
public record ProvisioningProgressEvent
{
public Guid JobId { get; init; }
public string Type { get; init; } = string.Empty; // step_started | step_complete | step_failed | job_complete | job_failed | diagnostic | compensation_started | compensation_complete
public string? Step { get; init; }
public string? Message { get; init; }
/// <summary>Full exception string (stack trace) for diagnostic events.</summary>
public string? Detail { get; init; }
public DateTimeOffset Timestamp { get; init; } = DateTimeOffset.UtcNow;
}
/// <summary>Worker -> Gateway: published once when a job completes successfully. Triggers route registration.</summary>
public record TenantProvisionedEvent
{
public Guid JobId { get; init; }
public string Subdomain { get; init; } = string.Empty;
public TenantTier Tier { get; init; }
/// <summary>Base URL of the API instance for this tenant. For Shared/Isolated this is the shared API. For Dedicated it is the per-tenant instance.</summary>
public string ApiBaseUrl { get; init; } = string.Empty;
}
+24
View File
@@ -0,0 +1,24 @@
using System.Text.Json;
using System.Text.Json.Serialization;
namespace ControlPlane.Core.Models;
public enum BuildStatus { Running, Succeeded, Failed }
public enum BuildKind { DockerImage, DotnetProject, NpmProject }
/// <summary>
/// Persisted record of a single build run — image build, dotnet build, or npm build.
/// Stored in ClientAssets/builds.json.
/// </summary>
public class BuildRecord
{
public string Id { get; set; } = Guid.NewGuid().ToString("N")[..8];
public BuildKind Kind { get; set; }
public string Target { get; set; } = string.Empty; // image name or project path
public BuildStatus Status { get; set; } = BuildStatus.Running;
public DateTimeOffset StartedAt { get; set; } = DateTimeOffset.UtcNow;
public DateTimeOffset? FinishedAt { get; set; }
public int? DurationMs { get; set; }
public string? ImageDigest { get; set; } // populated for DockerImage builds
public List<string> Log { get; set; } = [];
}
+26
View File
@@ -0,0 +1,26 @@
using System.Text.Json.Serialization;
namespace ControlPlane.Core.Models;
/// <summary>
/// Defines where a specific infrastructure component (Postgres, Keycloak, Vault, MinIO)
/// is hosted for a given tenant. Each component in a StackConfig is configured independently.
/// </summary>
[JsonConverter(typeof(JsonStringEnumConverter))]
public enum ComponentMode
{
/// <summary>Shared platform instance — logical slice only (realm, schema, bucket, namespace).</summary>
SharedPlatform,
/// <summary>Baked into the app image itself via supervisord. Trial tier only.</summary>
Bundled,
/// <summary>Own sidecar container on ControlPlane's shared Docker host.</summary>
OwnContainer,
/// <summary>Own VM with the component running inside Docker on it.</summary>
VpsDocker,
/// <summary>Own VM with the component running as a native OS process (no Docker).</summary>
VpsBareMetal
}
+10
View File
@@ -0,0 +1,10 @@
namespace ControlPlane.Core.Models;
public record GitCommit(
string Hash,
string ShortHash,
string Author,
string Date,
string Subject,
string[] Files
);
+63
View File
@@ -0,0 +1,63 @@
namespace ControlPlane.Core.Models;
// ── Repository ────────────────────────────────────────────────────────────────
public record GiteaRepo(
long Id,
string Name,
string FullName,
string DefaultBranch,
string CloneUrl,
string SshUrl,
bool Private
);
// ── Branch ────────────────────────────────────────────────────────────────────
public record GiteaBranch(
string Name,
string CommitSha,
bool Protected
);
// ── Pull Request ──────────────────────────────────────────────────────────────
public record GiteaPullRequest(
long Number,
string Title,
string State, // open | closed | merged
string HeadBranch,
string BaseBranch,
string HtmlUrl,
string CreatedAt,
string UpdatedAt,
GiteaUser? User,
GiteaMergeInfo? MergeInfo
);
public record GiteaUser(string Login, string AvatarUrl);
public record GiteaMergeInfo(bool Mergeable, bool Merged, string? MergedAt);
// ── Tag ───────────────────────────────────────────────────────────────────────
public record GiteaTag(string Name, string CommitSha, string ZipUrl);
// ── Webhook ───────────────────────────────────────────────────────────────────
public record GiteaWebhook(long Id, string Url, bool Active, string[] Events);
// ── Request shapes ────────────────────────────────────────────────────────────
public record CreateBranchRequest(string OpcNumber, string OpcTitle, string From = "master");
public record CreatePullRequestRequest(
string Title,
string Head,
string Base,
string Body
);
public record CreateTagRequest(string TagName, string Message, string CommitSha);
public record CreateWebhookRequest(string TargetUrl, string[] Events);
+73
View File
@@ -0,0 +1,73 @@
namespace ControlPlane.Core.Models;
public record OpcRecord(
Guid Id,
string Number,
string Title,
string Description,
string Type,
string Status,
string Priority,
string Assignee,
DateTime CreatedAt,
DateTime UpdatedAt
);
public record OpcNote(
Guid Id,
Guid OpcId,
string Author,
string Content,
DateTime CreatedAt
);
public record OpcArtifact(
Guid Id,
Guid OpcId,
string ArtifactType,
string Title,
string Content,
DateTime CreatedAt,
DateTime UpdatedAt
);
// Request / response shapes used by the API endpoints
public record CreateOpcRequest(
string Title,
string Type,
string Priority,
string Assignee,
string Description
);
public record UpdateOpcRequest(
string? Title,
string? Description,
string? Type,
string? Status,
string? Priority,
string? Assignee
);
public record AddNoteRequest(string Author, string Content);
public record UpsertArtifactRequest(
string ArtifactType,
string Title,
string Content
);
public record AiAssistRequest(string Prompt, string? Context);
public record OpcPinnedCommit(
Guid OpcId,
string Hash,
string ShortHash,
string Subject,
string Author,
DateTime PinnedAt,
string PinnedBy
);
public record PinCommitRequest(string Hash, string PinnedBy);
@@ -0,0 +1,22 @@
namespace ControlPlane.Core.Models;
public enum PromotionStatus { Pending, Running, Succeeded, Failed }
/// <summary>
/// Represents a request to promote (merge) one environment branch into the next.
/// e.g. develop → staging, staging → uat, uat → main
/// </summary>
public class PromotionRequest
{
public string Id { get; set; } = Guid.NewGuid().ToString("N")[..8];
public string FromBranch { get; set; } = string.Empty;
public string ToBranch { get; set; } = string.Empty;
public string RequestedBy { get; set; } = "system";
public string? Note { get; set; }
public PromotionStatus Status { get; set; } = PromotionStatus.Pending;
public DateTimeOffset CreatedAt { get; set; } = DateTimeOffset.UtcNow;
public DateTimeOffset? CompletedAt { get; set; }
public List<string> Log { get; set; } = [];
public int CommitCount { get; set; } // commits in from that are not in to
public string[] CommitLines { get; set; } = []; // oneline summary of those commits
}
@@ -0,0 +1,47 @@
namespace ControlPlane.Core.Models;
public enum ProvisioningStatus
{
Pending,
Running,
Compensating,
Failed,
Completed
}
[Flags]
public enum CompletedSteps
{
None = 0,
InfrastructureProvisioned = 1 << 0,
KeycloakProvisioned = 1 << 1,
VaultVerified = 1 << 2,
DatabaseMigrated = 1 << 3,
HandoffSent = 1 << 4
}
public class ProvisioningJob
{
public Guid Id { get; set; } = Guid.NewGuid();
public string ClientName { get; set; } = string.Empty;
public string StateCode { get; set; } = string.Empty;
public string Subdomain { get; set; } = string.Empty;
public string AdminEmail { get; set; } = string.Empty;
public string SiteCode { get; set; } = string.Empty;
public string Environment { get; set; } = "fdev";
public TenantTier Tier { get; set; } = TenantTier.Shared;
/// <summary>
/// Snapshot of the StackConfig at the time provisioning was requested.
/// Immutable after the job is created.
/// </summary>
public StackConfig StackConfig { get; set; } = StackConfig.DefaultForTier(TenantTier.Shared);
public ProvisioningStatus Status { get; set; } = ProvisioningStatus.Pending;
public CompletedSteps CompletedSteps { get; set; } = CompletedSteps.None;
public string? FailureReason { get; set; }
public DateTimeOffset CreatedAt { get; set; } = DateTimeOffset.UtcNow;
public DateTimeOffset? CompletedAt { get; set; }
}
@@ -0,0 +1,18 @@
namespace ControlPlane.Core.Models;
public class ProvisioningRequest
{
public string ClientName { get; set; } = string.Empty;
public string StateCode { get; set; } = string.Empty;
public string Subdomain { get; set; } = string.Empty;
public string AdminEmail { get; set; } = string.Empty;
public string SiteCode { get; set; } = string.Empty;
public string Environment { get; set; } = "fdev";
public TenantTier Tier { get; set; } = TenantTier.Shared;
/// <summary>
/// Per-component infrastructure configuration. Defaults to the standard profile
/// for the selected tier if not explicitly specified.
/// </summary>
public StackConfig StackConfig { get; set; } = StackConfig.DefaultForTier(TenantTier.Shared);
}
+27
View File
@@ -0,0 +1,27 @@
namespace ControlPlane.Core.Models;
public enum ReleaseStatus { Running, Succeeded, PartialFailure, Failed }
/// <summary>
/// Persisted record of a release — a coordinated redeploy of all tenant containers
/// in a target environment to the latest clarity-server image.
/// Stored in ClientAssets/releases.json.
/// </summary>
public class ReleaseRecord
{
public string Id { get; set; } = Guid.NewGuid().ToString("N")[..8];
public string Environment { get; set; } = string.Empty; // fdev | uat | prod | all
public string ImageName { get; set; } = string.Empty;
public ReleaseStatus Status { get; set; } = ReleaseStatus.Running;
public DateTimeOffset StartedAt { get; set; } = DateTimeOffset.UtcNow;
public DateTimeOffset? FinishedAt { get; set; }
public List<TenantReleaseResult> Tenants { get; set; } = [];
}
public class TenantReleaseResult
{
public string Subdomain { get; set; } = string.Empty;
public string ContainerName { get; set; } = string.Empty;
public bool Success { get; set; }
public string? Error { get; set; }
}
+51
View File
@@ -0,0 +1,51 @@
namespace ControlPlane.Core.Models;
/// <summary>
/// Defines the exact infrastructure composition for a provisioned tenant.
/// Each component is configured independently — the TenantTier gates which
/// ComponentMode values are available in the UI.
///
/// Allowed modes per tier:
///
/// | Trial | Shared | Dedicated | Enterprise |
/// SharedPlatform | ✅ | ✅ | ✅ | ✅ |
/// Bundled | ✅ | ❌ | ❌ | ❌ |
/// OwnContainer | ❌ | ❌ | ✅ | ✅ |
/// VpsDocker | ❌ | ❌ | ❌ | ✅ |
/// VpsBareMetal | ❌ | ❌ | ❌ | ✅ |
/// </summary>
public class StackConfig
{
public ComponentMode Postgres { get; set; } = ComponentMode.SharedPlatform;
public ComponentMode Keycloak { get; set; } = ComponentMode.SharedPlatform;
public ComponentMode Vault { get; set; } = ComponentMode.SharedPlatform;
public ComponentMode Minio { get; set; } = ComponentMode.SharedPlatform;
/// <summary>Returns a default StackConfig for the given tier.</summary>
public static StackConfig DefaultForTier(TenantTier tier) => tier switch
{
TenantTier.Trial => new StackConfig
{
Postgres = ComponentMode.Bundled,
Keycloak = ComponentMode.SharedPlatform,
Vault = ComponentMode.SharedPlatform,
Minio = ComponentMode.SharedPlatform
},
TenantTier.Shared => new StackConfig(),
TenantTier.Dedicated => new StackConfig
{
Postgres = ComponentMode.OwnContainer,
Keycloak = ComponentMode.OwnContainer,
Vault = ComponentMode.OwnContainer,
Minio = ComponentMode.OwnContainer
},
TenantTier.Enterprise => new StackConfig
{
Postgres = ComponentMode.VpsDocker,
Keycloak = ComponentMode.VpsDocker,
Vault = ComponentMode.VpsDocker,
Minio = ComponentMode.VpsDocker
},
_ => new StackConfig()
};
}
+135
View File
@@ -0,0 +1,135 @@
using System.Xml.Serialization;
namespace ControlPlane.Core.Models;
[XmlRoot("Tenant")]
public class TenantRecord
{
// ── Identity ──────────────────────────────────────────────────────────
[XmlAttribute]
public string Subdomain { get; set; } = string.Empty;
[XmlElement]
public string ClientName { get; set; } = string.Empty;
[XmlElement]
public string StateCode { get; set; } = string.Empty;
[XmlElement]
public string AdminEmail { get; set; } = string.Empty;
[XmlElement]
public string SiteCode { get; set; } = string.Empty;
[XmlElement]
public string Environment { get; set; } = "fdev";
[XmlElement]
public string Tier { get; set; } = string.Empty;
[XmlElement]
public string Status { get; set; } = "Provisioning";
[XmlElement]
public string ProvisionedAt { get; set; } = DateTimeOffset.UtcNow.ToString("o");
[XmlElement]
public string JobId { get; set; } = string.Empty;
// ── Container (written by InfrastructureStep / LaunchStep) ────────────
[XmlElement(IsNullable = true)]
public string? ContainerName { get; set; }
[XmlElement(IsNullable = true)]
public string? ContainerPort { get; set; }
[XmlElement(IsNullable = true)]
public string? ContainerImage { get; set; }
[XmlElement(IsNullable = true)]
public string? ContainerNetwork { get; set; }
[XmlElement(IsNullable = true)]
public string? NginxConfPath { get; set; }
[XmlElement(IsNullable = true)]
public string? ApiBaseUrl { get; set; }
[XmlElement(IsNullable = true)]
public string? PublicUrl { get; set; }
[XmlElement(IsNullable = true)]
public string? LastProvisioningStep { get; set; }
[XmlElement(IsNullable = true)]
public string? ProvisioningNotes { get; set; }
// ── web.config-style sections ─────────────────────────────────────────
[XmlElement("ConnectionStrings")]
public ConnectionStringsSection ConnectionStrings { get; set; } = new();
[XmlElement("AppSettings")]
public AppSettingsSection AppSettings { get; set; } = new();
// ── Helpers ───────────────────────────────────────────────────────────
public void SetConnectionString(string name, string connectionString)
{
var existing = ConnectionStrings.Entries.FirstOrDefault(e => e.Name == name);
if (existing is not null)
existing.ConnectionString = connectionString;
else
ConnectionStrings.Entries.Add(new ConnectionStringEntry { Name = name, ConnectionString = connectionString });
}
public string? GetConnectionString(string name) =>
ConnectionStrings.Entries.FirstOrDefault(e => e.Name == name)?.ConnectionString;
public void SetAppSetting(string key, string value)
{
var existing = AppSettings.Entries.FirstOrDefault(e => e.Key == key);
if (existing is not null)
existing.Value = value;
else
AppSettings.Entries.Add(new AppSettingEntry { Key = key, Value = value });
}
public string? GetAppSetting(string key) =>
AppSettings.Entries.FirstOrDefault(e => e.Key == key)?.Value;
}
// ── Section types ──────────────────────────────────────────────────────────
public class ConnectionStringsSection
{
[XmlElement("add")]
public List<ConnectionStringEntry> Entries { get; set; } = [];
}
public class AppSettingsSection
{
[XmlElement("add")]
public List<AppSettingEntry> Entries { get; set; } = [];
}
public class ConnectionStringEntry
{
[XmlAttribute("name")]
public string Name { get; set; } = string.Empty;
[XmlAttribute("connectionString")]
public string ConnectionString { get; set; } = string.Empty;
[XmlAttribute("providerName")]
public string ProviderName { get; set; } = "System.Data.SqlClient";
}
public class AppSettingEntry
{
[XmlAttribute("key")]
public string Key { get; set; } = string.Empty;
[XmlAttribute("value")]
public string Value { get; set; } = string.Empty;
}
+21
View File
@@ -0,0 +1,21 @@
using System.Text.Json.Serialization;
namespace ControlPlane.Core.Models;
/// <summary>
/// Defines the billing and support level for a provisioned tenant.
/// The tier gates which ComponentMode values are available per component in the StackConfig.
///
/// Trial - ephemeral sandbox, all-in-one image, no persistent data guarantee.
/// Shared - real production data, shared platform infrastructure (logical slices only).
/// Dedicated - full container isolation per component, still on ControlPlane's shared host.
/// Enterprise - full VM isolation per component (VpsDocker or VpsBareMetal), Pulumi provisioned.
/// </summary>
[JsonConverter(typeof(JsonStringEnumConverter))]
public enum TenantTier
{
Trial,
Shared,
Dedicated,
Enterprise
}
@@ -0,0 +1,138 @@
using System.Text.Json;
using ControlPlane.Core.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
namespace ControlPlane.Core.Services;
/// <summary>
/// Persists build and release history to JSON files in the ClientAssets folder.
/// Thread-safe — all writes go through a single lock per file.
/// </summary>
public class BuildHistoryService
{
private readonly string _buildsPath;
private readonly string _releasesPath;
private readonly ILogger<BuildHistoryService> _logger;
private static readonly SemaphoreSlim _buildLock = new(1, 1);
private static readonly SemaphoreSlim _releaseLock = new(1, 1);
private static readonly JsonSerializerOptions JsonOpts = new()
{
WriteIndented = true,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public BuildHistoryService(IConfiguration config, ILogger<BuildHistoryService> logger)
{
var folder = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
Directory.CreateDirectory(folder);
_buildsPath = Path.Combine(folder, "builds.json");
_releasesPath = Path.Combine(folder, "releases.json");
_logger = logger;
}
// ── Builds ──────────────────────────────────────────────────────────────
public async Task<BuildRecord> CreateBuildAsync(BuildKind kind, string target)
{
var record = new BuildRecord { Kind = kind, Target = target };
await SaveBuildAsync(record);
return record;
}
public async Task CompleteBuildAsync(BuildRecord record, BuildStatus status, string? digest = null)
{
record.Status = status;
record.FinishedAt = DateTimeOffset.UtcNow;
record.DurationMs = (int)(record.FinishedAt.Value - record.StartedAt).TotalMilliseconds;
record.ImageDigest = digest;
await SaveBuildAsync(record);
}
public async Task AppendBuildLogAsync(BuildRecord record, string line)
{
record.Log.Add(line);
// Flush to disk every 20 lines to avoid excessive I/O but keep reasonable freshness
if (record.Log.Count % 20 == 0)
await SaveBuildAsync(record);
}
public async Task<List<BuildRecord>> GetBuildsAsync()
{
await _buildLock.WaitAsync();
try { return LoadJson<BuildRecord>(_buildsPath); }
finally { _buildLock.Release(); }
}
private async Task SaveBuildAsync(BuildRecord record)
{
await _buildLock.WaitAsync();
try
{
var all = LoadJson<BuildRecord>(_buildsPath);
var idx = all.FindIndex(b => b.Id == record.Id);
if (idx >= 0) all[idx] = record;
else all.Insert(0, record);
// Keep last 100 builds
if (all.Count > 100) all = all[..100];
await File.WriteAllTextAsync(_buildsPath, JsonSerializer.Serialize(all, JsonOpts));
}
finally { _buildLock.Release(); }
}
// ── Releases ────────────────────────────────────────────────────────────
public async Task<ReleaseRecord> CreateReleaseAsync(string environment, string imageName)
{
var record = new ReleaseRecord { Environment = environment, ImageName = imageName };
await SaveReleaseAsync(record);
return record;
}
public async Task UpdateReleaseAsync(ReleaseRecord record)
{
record.FinishedAt = DateTimeOffset.UtcNow;
await SaveReleaseAsync(record);
}
public async Task<List<ReleaseRecord>> GetReleasesAsync()
{
await _releaseLock.WaitAsync();
try { return LoadJson<ReleaseRecord>(_releasesPath); }
finally { _releaseLock.Release(); }
}
private async Task SaveReleaseAsync(ReleaseRecord record)
{
await _releaseLock.WaitAsync();
try
{
var all = LoadJson<ReleaseRecord>(_releasesPath);
var idx = all.FindIndex(r => r.Id == record.Id);
if (idx >= 0) all[idx] = record;
else all.Insert(0, record);
if (all.Count > 50) all = all[..50];
await File.WriteAllTextAsync(_releasesPath, JsonSerializer.Serialize(all, JsonOpts));
}
finally { _releaseLock.Release(); }
}
// ── Helpers ─────────────────────────────────────────────────────────────
private static List<T> LoadJson<T>(string path)
{
if (!File.Exists(path)) return [];
try
{
var json = File.ReadAllText(path);
return JsonSerializer.Deserialize<List<T>>(json, JsonOpts) ?? [];
}
catch { return []; }
}
}
@@ -0,0 +1,77 @@
using System.Xml.Serialization;
using ControlPlane.Core.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
namespace ControlPlane.Core.Services;
/// <summary>
/// Reads and writes per-tenant XML config files under the ClientAssets folder.
/// One file per tenant: {subdomain}.xml
/// Thread-safe for concurrent reads; writes are serialized per subdomain via per-file locking.
/// </summary>
public class TenantRegistryService
{
private readonly string _folder;
private readonly ILogger<TenantRegistryService> _logger;
private static readonly XmlSerializer Serializer = new(typeof(TenantRecord));
// One lock object per subdomain so writes to different tenants never block each other
private readonly System.Collections.Concurrent.ConcurrentDictionary<string, object> _locks = new(StringComparer.OrdinalIgnoreCase);
public TenantRegistryService(IConfiguration configuration, ILogger<TenantRegistryService> logger)
{
_folder = configuration["ClientAssets__Folder"] ?? configuration["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
_logger = logger;
Directory.CreateDirectory(_folder);
}
// -- Write --
public void Save(TenantRecord record)
{
var path = FilePath(record.Subdomain);
var gate = _locks.GetOrAdd(record.Subdomain, _ => new object());
lock (gate)
{
using var writer = new StreamWriter(path, append: false, System.Text.Encoding.UTF8);
Serializer.Serialize(writer, record);
}
_logger.LogInformation("Saved tenant record: {Path}", path);
}
// -- Read --
public TenantRecord? TryGet(string subdomain)
{
var path = FilePath(subdomain);
if (!File.Exists(path)) return null;
using var reader = new StreamReader(path, System.Text.Encoding.UTF8);
return (TenantRecord?)Serializer.Deserialize(reader);
}
public IReadOnlyList<TenantRecord> GetAll()
{
var results = new List<TenantRecord>();
foreach (var file in Directory.EnumerateFiles(_folder, "*.xml"))
{
try
{
using var reader = new StreamReader(file, System.Text.Encoding.UTF8);
if (Serializer.Deserialize(reader) is TenantRecord record)
results.Add(record);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Skipping malformed tenant file: {File}", file);
}
}
return results;
}
public bool Exists(string subdomain) => File.Exists(FilePath(subdomain));
private string FilePath(string subdomain) =>
Path.Combine(_folder, $"{subdomain.ToLowerInvariant()}.xml");
}
@@ -0,0 +1,33 @@
<Project Sdk="Microsoft.NET.Sdk.Worker">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UserSecretsId>controlplane-worker-secrets</UserSecretsId>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Docker.DotNet" />
<PackageReference Include="Microsoft.Extensions.Hosting" />
<PackageReference Include="Npgsql" />
<PackageReference Include="Keycloak.AuthServices.Sdk" />
<PackageReference Include="MassTransit" />
<PackageReference Include="MassTransit.RabbitMQ" />
<PackageReference Include="Aspire.RabbitMQ.Client" />
<PackageReference Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" />
<PackageReference Include="OpenTelemetry.Extensions.Hosting" />
<PackageReference Include="OpenTelemetry.Instrumentation.Http" />
<PackageReference Include="OpenTelemetry.Instrumentation.Runtime" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Clarity.ServiceDefaults\Clarity.ServiceDefaults.csproj" />
<ProjectReference Include="..\ControlPlane.Core\ControlPlane.Core.csproj" />
</ItemGroup>
<ItemGroup>
<Folder Include="Properties\" />
</ItemGroup>
</Project>
+30
View File
@@ -0,0 +1,30 @@
# ── Build stage ──────────────────────────────────────────────────────────────
FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build
WORKDIR /src
COPY ["ControlPlane.Worker/ControlPlane.Worker.csproj", "ControlPlane.Worker/"]
COPY ["ControlPlane.Core/ControlPlane.Core.csproj", "ControlPlane.Core/"]
COPY ["Clarity.ServiceDefaults/Clarity.ServiceDefaults.csproj", "Clarity.ServiceDefaults/"]
COPY ["Directory.Packages.props", "./"]
RUN dotnet restore "ControlPlane.Worker/ControlPlane.Worker.csproj"
COPY . .
RUN dotnet publish "ControlPlane.Worker/ControlPlane.Worker.csproj" \
-c Release -o /app/publish --no-restore
# ── Runtime stage ─────────────────────────────────────────────────────────────
FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS runtime
WORKDIR /app
# Install Pulumi CLI so the Automation API can shell out to it
RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates \
&& curl -fsSL https://get.pulumi.com | sh \
&& apt-get purge -y curl \
&& rm -rf /var/lib/apt/lists/*
ENV PATH="/root/.pulumi/bin:${PATH}"
COPY --from=build /app/publish .
ENTRYPOINT ["dotnet", "ControlPlane.Worker.dll"]
+64
View File
@@ -0,0 +1,64 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Services;
using ControlPlane.Worker;
using ControlPlane.Worker.Services;
using ControlPlane.Worker.Steps;
using Keycloak.AuthServices.Sdk;
using MassTransit;
var builder = Host.CreateApplicationBuilder(args);
builder.AddServiceDefaults();
// Centralized infrastructure options — domain, network, internal URLs, cert paths
builder.Services.Configure<ClarityInfraOptions>(
builder.Configuration.GetSection(ClarityInfraOptions.Section));
// Keycloak Admin SDK client
builder.Services.AddKeycloakAdminHttpClient(o =>
{
o.AuthServerUrl = builder.Configuration["Keycloak:AuthServerUrl"] ?? "http://localhost:8080";
o.Realm = builder.Configuration["Keycloak:Realm"] ?? "master";
o.Resource = builder.Configuration["Keycloak:Resource"] ?? "admin-cli";
});
// Custom admin client - handles realm creation, roles, role assignment (not in SDK)
builder.Services.AddSingleton<KeycloakAdminClient>();
// Docker container manager for per-tenant Clarity.Server instances
builder.Services.AddSingleton<ClarityContainerService>();
// Tenant registry - persists provisioned tenant XML files to ClientAssets folder
builder.Services.AddSingleton<TenantRegistryService>();
// Saga steps in execution order — container launches LAST once all context is populated
builder.Services.AddSingleton<ISagaStep, KeycloakStep>();
builder.Services.AddSingleton<ISagaStep, VaultStep>();
builder.Services.AddSingleton<ISagaStep, MigrationStep>();
builder.Services.AddSingleton<ISagaStep, LaunchStep>();
builder.Services.AddSingleton<ISagaStep, HandoffStep>();
builder.Services.AddMassTransit(x =>
{
x.SetKebabCaseEndpointNameFormatter();
x.AddConsumer<ProvisioningConsumer>();
x.UsingRabbitMq((ctx, cfg) =>
{
cfg.Host(builder.Configuration.GetConnectionString("rabbitmq"));
cfg.ConfigureEndpoints(ctx);
});
});
try
{
var host = builder.Build();
host.Run();
}
catch (Exception ex)
{
Console.Error.WriteLine($"FATAL WORKER CRASH: {ex}");
throw;
}
+167
View File
@@ -0,0 +1,167 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Messages;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using MassTransit;
using Microsoft.Extensions.Options;
namespace ControlPlane.Worker;
/// <summary>
/// MassTransit consumer. Triggered by ProvisionClientCommand off RabbitMQ.
/// Runs the saga and publishes ProvisioningProgressEvent for each step transition.
/// </summary>
public sealed class ProvisioningConsumer(
IEnumerable<ISagaStep> steps,
IPublishEndpoint bus,
IConfiguration config,
IOptions<ClarityInfraOptions> infraOptions,
TenantRegistryService registry,
ILogger<ProvisioningConsumer> logger) : IConsumer<ProvisionClientCommand>
{
public async Task Consume(ConsumeContext<ProvisionClientCommand> context)
{
var cmd = context.Message;
var job = new ProvisioningJob
{
Id = cmd.JobId,
ClientName = cmd.ClientName,
StateCode = cmd.StateCode,
Subdomain = cmd.Subdomain,
AdminEmail = cmd.AdminEmail,
SiteCode = cmd.SiteCode,
Environment = cmd.Environment,
Status = ProvisioningStatus.Running
};
logger.LogInformation("Starting provisioning saga for job {JobId} ({Client})", job.Id, job.ClientName);
await RunSagaAsync(job, context.CancellationToken);
}
private async Task RunSagaAsync(ProvisioningJob job, CancellationToken cancellationToken)
{
var sagaContext = new SagaContext { Job = job };
var executedSteps = new Stack<ISagaStep>();
foreach (var step in steps)
{
try
{
await Publish(job.Id, "step_started", step.StepName, $"Starting: {step.StepName}");
logger.LogInformation("[{JobId}] Executing: {Step}", job.Id, step.StepName);
await step.ExecuteAsync(sagaContext, cancellationToken);
executedSteps.Push(step);
await Publish(job.Id, "step_complete", step.StepName, $"Completed: {step.StepName}");
}
catch (Exception ex)
{
logger.LogError(ex, "[{JobId}] Step {Step} failed", job.Id, step.StepName);
await Publish(job.Id, "step_failed", step.StepName, $"Failed: {step.StepName} - {ex.Message}");
await PublishDiagnostic(job.Id, step.StepName, ex);
job.Status = ProvisioningStatus.Compensating;
job.FailureReason = $"{step.StepName}: {ex.Message}";
await CompensateAsync(sagaContext, executedSteps, cancellationToken);
job.Status = ProvisioningStatus.Failed;
await Publish(job.Id, "job_failed", null, job.FailureReason);
return;
}
}
job.Status = ProvisioningStatus.Completed;
job.CompletedAt = DateTimeOffset.UtcNow;
await Publish(job.Id, "job_complete", null, "All steps completed successfully.");
var infra = infraOptions.Value;
var apiBaseUrl = sagaContext.TenantApiBaseUrl
?? infra.TenantPublicUrl(job.Subdomain);
// Persist to ClientAssets/{subdomain}.xml
var nginxConfPath = config["Nginx:ConfDPath"] is { } p
? Path.Combine(p, $"{job.Subdomain}.conf")
: null;
var record = new TenantRecord
{
JobId = job.Id.ToString(),
Subdomain = job.Subdomain,
ClientName = job.ClientName,
StateCode = job.StateCode,
AdminEmail = job.AdminEmail,
SiteCode = job.SiteCode,
Environment = job.Environment,
Tier = job.Tier.ToString(),
ApiBaseUrl = apiBaseUrl,
Status = "Provisioned",
ProvisionedAt = job.CompletedAt!.Value.ToString("o"),
ContainerName = sagaContext.ContainerName,
ContainerPort = null,
ContainerImage = config["Docker:ClarityServerImage"] ?? "clarity-server:latest",
ContainerNetwork = infra.Network,
NginxConfPath = nginxConfPath,
PublicUrl = infra.TenantPublicUrl(job.Subdomain),
LastProvisioningStep = "LaunchStep",
ProvisioningNotes = $"Provisioned at {job.CompletedAt:o}. All {job.CompletedSteps} steps completed.",
};
// AppSettings — enriched by each step via SagaContext
record.SetAppSetting("Keycloak:Realm", $"clarity-{job.Subdomain.ToLowerInvariant()}");
record.SetAppSetting("Keycloak:BaseUrl", infra.KeycloakPublicUrl);
record.SetAppSetting("Keycloak:InternalUrl", infra.KeycloakInternalUrl);
if (!string.IsNullOrWhiteSpace(sagaContext.TenantStackName))
record.SetAppSetting("Pulumi:StackName", sagaContext.TenantStackName);
// ConnectionStrings — written by MigrationStep once DB is provisioned
if (!string.IsNullOrWhiteSpace(sagaContext.TenantConnectionString))
record.SetConnectionString("TenantDb", sagaContext.TenantConnectionString);
registry.Save(record);
logger.LogInformation("[{JobId}] Provisioning completed. Tenant record saved.", job.Id);
}
private async Task CompensateAsync(SagaContext sagaContext, Stack<ISagaStep> executedSteps, CancellationToken cancellationToken)
{
while (executedSteps.TryPop(out var step))
{
try
{
logger.LogInformation("[{JobId}] Compensating: {Step}", sagaContext.Job.Id, step.StepName);
await Publish(sagaContext.Job.Id, "compensation_started", step.StepName, $"Rolling back: {step.StepName}");
await step.CompensateAsync(sagaContext, cancellationToken);
await Publish(sagaContext.Job.Id, "compensation_complete", step.StepName, $"Rolled back: {step.StepName}");
}
catch (Exception ex)
{
logger.LogError(ex, "[{JobId}] Compensation failed for {Step} - manual intervention required", sagaContext.Job.Id, step.StepName);
await PublishDiagnostic(sagaContext.Job.Id, $"{step.StepName} (compensation)", ex);
}
}
}
private Task Publish(Guid jobId, string type, string? step, string? message) =>
bus.Publish(new ProvisioningProgressEvent
{
JobId = jobId,
Type = type,
Step = step,
Message = message
});
private Task PublishDiagnostic(Guid jobId, string? step, Exception ex) =>
bus.Publish(new ProvisioningProgressEvent
{
JobId = jobId,
Type = "diagnostic",
Step = step,
Message = ex.Message,
Detail = ex.ToString()
});
}
@@ -0,0 +1,358 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Messages;
using Docker.DotNet;
using Docker.DotNet.Models;
using MassTransit;
using Microsoft.Extensions.Options;
namespace ControlPlane.Worker.Services;
/// <summary>
/// Manages Clarity.Server Docker containers for provisioned tenants.
/// Container naming convention: {env}-app-clarity-{siteCode}
/// e.g. fdev-app-clarity-01000014
/// </summary>
public class ClarityContainerService(
IConfiguration config,
IOptions<ClarityInfraOptions> infraOptions,
IPublishEndpoint bus,
ILogger<ClarityContainerService> logger)
{
private ClarityInfraOptions Infra => infraOptions.Value;
// The image to run - override via config for prod registries
private string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest";
private DockerClient CreateClient()
{
var uri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
return new DockerClientConfiguration(new Uri(uri)).CreateClient();
}
/// <summary>
/// Derives the container name from environment + siteCode.
/// Convention: {env}-app-clarity-{siteCode}
/// </summary>
public static string ContainerName(string environment, string siteCode) =>
$"{environment.ToLowerInvariant()}-app-clarity-{siteCode.ToLowerInvariant()}";
/// <summary>
/// Pulls the image (if not present locally), starts the container on the managed network,
/// and writes an nginx conf.d snippet so traffic routes in.
/// No host port binding — nginx reaches the container via Docker DNS on the shared network.
/// </summary>
public async Task<string> StartTenantContainerAsync(
string environment,
string siteCode,
string subdomain,
string keycloakRealm,
string? postgresConnectionString,
string? vaultToken,
Guid jobId,
CancellationToken cancellationToken)
{
using var docker = CreateClient();
var name = ContainerName(environment, siteCode);
// Stop and remove any existing container with this name (idempotent reprovision)
await TryRemoveExistingAsync(docker, name, cancellationToken);
// Pull image if not already local
await EnsureImageAsync(docker, cancellationToken);
// All service URLs use stable Docker DNS names on the managed network — no host ports involved.
var container = await docker.Containers.CreateContainerAsync(new CreateContainerParameters
{
Name = name,
Image = ImageName,
Env =
[
"ASPNETCORE_ENVIRONMENT=Production",
"ASPNETCORE_URLS=http://+:8080",
$"TenantSubdomain={subdomain}",
$"Keycloak__BaseUrl={Infra.KeycloakPublicUrl}",
$"Keycloak__InternalUrl={Infra.KeycloakInternalUrl}",
$"Keycloak__Realm={keycloakRealm}",
$"Vault__Address={Infra.VaultInternalUrl}",
.. (vaultToken is not null
? (string[])[$"Vault__Token={vaultToken}"]
: []),
.. (postgresConnectionString is not null
? (string[])[$"ConnectionStrings__postgresdb={postgresConnectionString}"]
: []),
],
HostConfig = new HostConfig
{
NetworkMode = Infra.Network,
RestartPolicy = new RestartPolicy { Name = RestartPolicyKind.UnlessStopped },
},
Labels = new Dictionary<string, string>
{
["clarity.managed"] = "true",
["clarity.subdomain"] = subdomain,
["clarity.siteCode"] = siteCode,
["clarity.env"] = environment,
},
}, cancellationToken);
// Ensure Keycloak and Vault are reachable on the managed network via their Docker DNS aliases.
// Aspire places them on its own bridge; tenant containers on clarity-net need them aliased here.
await EnsureContainerOnNetworkAsync(docker, "keycloak", Infra.Network, "keycloak", cancellationToken);
await EnsureContainerOnNetworkAsync(docker, "vault", Infra.Network, "vault", cancellationToken);
var started = await docker.Containers.StartContainerAsync(container.ID, null, cancellationToken);
if (!started)
throw new InvalidOperationException($"Docker failed to start container {name} (id={container.ID}).");
logger.LogInformation("Started container {Name} on {Network} (image: {Image})", name, Infra.Network, ImageName);
await WriteNginxConfigAsync(subdomain, name, jobId, cancellationToken);
return name;
}
/// <summary>
/// Stops and removes a tenant container. Called from InfrastructureStep.CompensateAsync.
/// </summary>
public async Task StopAndRemoveAsync(string containerName, CancellationToken cancellationToken)
{
using var docker = CreateClient();
await TryRemoveExistingAsync(docker, containerName, cancellationToken);
logger.LogInformation("Removed container {Name}", containerName);
}
// -- helpers --
private async Task EnsureImageAsync(DockerClient docker, CancellationToken cancellationToken)
{
var images = await docker.Images.ListImagesAsync(new ImagesListParameters
{
Filters = new Dictionary<string, IDictionary<string, bool>>
{
["reference"] = new Dictionary<string, bool> { [ImageName] = true }
}
}, cancellationToken);
if (images.Count > 0)
{
logger.LogInformation("Image {Image} already present locally.", ImageName);
return;
}
// Local image (no registry host) — pulling from Docker Hub will always fail.
// The image must be built manually before provisioning.
var isLocalOnly = !ImageName.Contains('/') || ImageName.StartsWith("localhost/");
if (isLocalOnly)
{
throw new InvalidOperationException(
$"Image '{ImageName}' was not found locally and cannot be pulled from a registry. " +
$"Build it first from the repo root:{Environment.NewLine}" +
$" docker build -f Clarity.Server/Dockerfile -t {ImageName} ." +
$"{Environment.NewLine}Then retry provisioning.");
}
// Registry image — attempt pull
logger.LogInformation("Pulling image {Image} from registry...", ImageName);
var (repo, tag) = SplitImageTag(ImageName);
await docker.Images.CreateImageAsync(
new ImagesCreateParameters { FromImage = repo, Tag = tag },
null,
new Progress<JSONMessage>(m =>
{
if (!string.IsNullOrWhiteSpace(m.Status))
logger.LogDebug("[docker pull] {Status} {Progress}", m.Status, m.ProgressMessage);
}),
cancellationToken);
}
// -- nginx conf.d helpers --
/// <summary>
/// Writes /NginxConfig/conf.d/{subdomain}.conf so nginx routes
/// {subdomain}.clarity.test → the containe
/// Then signals nginx to reload its config without dropping connections.
/// </summary>
private async Task WriteNginxConfigAsync(string subdomain, string containerName, Guid jobId, CancellationToken ct)
{
var confDPath = config["Nginx:ConfDPath"];
if (string.IsNullOrWhiteSpace(confDPath))
{
logger.LogWarning("Nginx:ConfDPath is not configured — skipping nginx conf write for {Subdomain}.", subdomain);
return;
}
var confContent = $$$"""
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: {{{subdomain}}}
server {
listen 443 ssl;
server_name {{{subdomain}}}.{{{Infra.Domain}}};
ssl_certificate {{{Infra.NginxCertPath}}};
ssl_certificate_key {{{Infra.NginxCertKeyPath}}};
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://{{{containerName}}}:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
""";
var confFile = Path.Combine(confDPath, $"{subdomain}.conf");
await File.WriteAllTextAsync(confFile, confContent, ct);
logger.LogInformation("Wrote nginx config for {Subdomain} → {Container}", subdomain, containerName);
await ReloadNginxAsync(jobId, subdomain, ct);
}
public async Task RemoveNginxConfigAsync(string subdomain, CancellationToken ct)
{
var confDPath = config["Nginx:ConfDPath"];
if (string.IsNullOrWhiteSpace(confDPath)) return;
var confFile = Path.Combine(confDPath, $"{subdomain}.conf");
if (File.Exists(confFile))
{
File.Delete(confFile);
logger.LogInformation("Removed nginx config for {Subdomain}", subdomain);
await ReloadNginxAsync(Guid.Empty, subdomain, ct);
}
}
/// Sends SIGHUP to the nginx container which triggers a graceful config reload.
private async Task ReloadNginxAsync(Guid jobId, string subdomain, CancellationToken ct)
{
try
{
using var docker = CreateClient();
// Find the nginx container by image name — Aspire appends a random suffix to the name
// so we can't rely on the static name "nginx".
var containers = await docker.Containers.ListContainersAsync(
new ContainersListParameters
{
Filters = new Dictionary<string, IDictionary<string, bool>>
{
["ancestor"] = new Dictionary<string, bool> { ["nginx"] = true }
}
}, ct);
var nginx = containers.FirstOrDefault();
if (nginx is null)
{
logger.LogWarning("nginx container not found — skipping reload.");
return;
}
await docker.Containers.KillContainerAsync(nginx.ID, new ContainerKillParameters { Signal = "HUP" }, ct);
var containerName = nginx.Names.FirstOrDefault() ?? nginx.ID;
logger.LogInformation("nginx reloaded (container: {Name}).", containerName);
if (jobId != Guid.Empty)
{
await bus.Publish(new ProvisioningProgressEvent
{
JobId = jobId,
Type = "nginx_reloaded",
Step = "Container Launch",
Message = $"nginx reloaded — route for {subdomain}.{Infra.Domain} is live.",
}, ct);
}
}
catch (Exception ex)
{
logger.LogWarning(ex, "Failed to reload nginx — new tenant route may not be active until next nginx restart.");
if (jobId != Guid.Empty)
{
await bus.Publish(new ProvisioningProgressEvent
{
JobId = jobId,
Type = "diagnostic",
Step = "Container Launch",
Message = "nginx reload failed — route may not be active.",
Detail = ex.ToString(),
}, ct);
}
}
}
// -- docker helpers --
private static async Task TryRemoveExistingAsync(DockerClient docker, string name, CancellationToken cancellationToken)
{
try
{
await docker.Containers.StopContainerAsync(name,
new ContainerStopParameters { WaitBeforeKillSeconds = 5 }, cancellationToken);
await docker.Containers.RemoveContainerAsync(name,
new ContainerRemoveParameters { Force = true }, cancellationToken);
}
catch (DockerContainerNotFoundException) { /* already gone - fine */ }
catch (DockerApiException ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotFound) { /* same */ }
}
private static (string repo, string tag) SplitImageTag(string image)
{
var colon = image.LastIndexOf(':');
return colon < 0 ? (image, "latest") : (image[..colon], image[(colon + 1)..]);
}
/// <summary>
/// Connects <paramref name="containerName"/> to <paramref name="network"/> with the given
/// <paramref name="alias"/> if it isn't already connected.
/// Silently no-ops if the container isn't found (it may not be running in all environments).
/// </summary>
private async Task EnsureContainerOnNetworkAsync(
DockerClient docker,
string containerName,
string network,
string alias,
CancellationToken cancellationToken)
{
try
{
var inspect = await docker.Containers.InspectContainerAsync(containerName, cancellationToken);
if (inspect.NetworkSettings.Networks.TryGetValue(network, out var existing))
{
// Already connected — check whether our alias is present.
var hasAlias = existing.Aliases?.Contains(alias, StringComparer.OrdinalIgnoreCase) == true;
if (hasAlias) return;
// Connected but without the alias — disconnect so we can reconnect with it.
await docker.Networks.DisconnectNetworkAsync(network, new NetworkDisconnectParameters
{
Container = inspect.ID,
Force = true,
}, cancellationToken);
}
await docker.Networks.ConnectNetworkAsync(network, new NetworkConnectParameters
{
Container = inspect.ID,
EndpointConfig = new EndpointSettings
{
Aliases = [alias],
},
}, cancellationToken);
logger.LogInformation("Connected container '{Container}' to network '{Network}' with alias '{Alias}'.", containerName, network, alias);
}
catch (DockerContainerNotFoundException)
{
logger.LogWarning("Container '{Container}' not found — skipping network connect.", containerName);
}
catch (DockerApiException ex) when (ex.StatusCode == System.Net.HttpStatusCode.NotFound)
{
logger.LogWarning("Container '{Container}' not found — skipping network connect.", containerName);
}
catch (Exception ex)
{
logger.LogWarning(ex, "Could not connect '{Container}' to '{Network}' — tenant JWT validation may fail.", containerName, network);
}
}
}
@@ -0,0 +1,194 @@
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
namespace ControlPlane.Worker.Services;
public class KeycloakAdminClient
{
private readonly HttpClient _http;
private readonly string _baseUrl;
private readonly string _adminUser;
private readonly string _adminPassword;
private readonly ILogger<KeycloakAdminClient> _logger;
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public KeycloakAdminClient(IConfiguration config, ILogger<KeycloakAdminClient> logger)
{
_logger = logger;
_baseUrl = (config["Keycloak:AuthServerUrl"] ?? config["Keycloak:BaseUrl"])?.TrimEnd('/') ?? "http://localhost:8080";
_adminUser = config["Keycloak:AdminUser"] ?? "admin";
_adminPassword = config["Keycloak:AdminPassword"] ?? "admin";
var maskedPw = _adminPassword.Length > 2 ? $"{_adminPassword[0]}***{_adminPassword[^1]}" : "***";
_logger.LogInformation("KeycloakAdminClient base URL: {Url}, user: {User}, password: {Password}",
_baseUrl, _adminUser, maskedPw);
_http = new HttpClient { BaseAddress = new Uri(_baseUrl) };
}
private async Task AuthorizeAsync(CancellationToken ct)
{
var form = new FormUrlEncodedContent(new Dictionary<string, string>
{
["grant_type"] = "password",
["client_id"] = "admin-cli",
["username"] = _adminUser,
["password"] = _adminPassword,
});
var res = await _http.PostAsync("/realms/master/protocol/openid-connect/token", form, ct);
res.EnsureSuccessStatusCode();
using var doc = JsonDocument.Parse(await res.Content.ReadAsStringAsync(ct));
var token = doc.RootElement.GetProperty("access_token").GetString()!;
_http.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", token);
}
public async Task CreateRealmAsync(string realmId, string displayName, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PostAsync("/admin/realms", Json(new
{
realm = realmId,
displayName = displayName,
enabled = true,
registrationAllowed = true,
registrationEmailAsUsername = false,
loginWithEmailAllowed = true,
resetPasswordAllowed = true,
verifyEmail = false,
sslRequired = "external",
}), ct);
if (res.StatusCode == System.Net.HttpStatusCode.Conflict)
{
_logger.LogWarning("Realm {Realm} already exists - skipping.", realmId);
return;
}
res.EnsureSuccessStatusCode();
_logger.LogInformation("Realm {Realm} created.", realmId);
}
public async Task DeleteRealmAsync(string realmId, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.DeleteAsync($"/admin/realms/{realmId}", ct);
if (res.StatusCode != System.Net.HttpStatusCode.NotFound)
res.EnsureSuccessStatusCode();
_logger.LogInformation("Realm {Realm} deleted.", realmId);
}
public async Task CreateRealmRoleAsync(string realmId, string roleName, string description, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PostAsync($"/admin/realms/{realmId}/roles",
Json(new { name = roleName, description }), ct);
if (res.StatusCode != System.Net.HttpStatusCode.Conflict)
res.EnsureSuccessStatusCode();
}
public async Task<string> CreateUserAsync(string realmId, string email, string firstName, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PostAsync($"/admin/realms/{realmId}/users",
Json(new { username = email, email, firstName, enabled = true, emailVerified = true }), ct);
if (res.StatusCode != System.Net.HttpStatusCode.Conflict)
res.EnsureSuccessStatusCode();
return await GetUserIdAsync(realmId, email, ct);
}
public async Task<string> GetUserIdAsync(string realmId, string email, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.GetAsync(
$"/admin/realms/{realmId}/users?email={Uri.EscapeDataString(email)}&exact=true", ct);
res.EnsureSuccessStatusCode();
using var doc = JsonDocument.Parse(await res.Content.ReadAsStringAsync(ct));
var users = doc.RootElement.EnumerateArray().ToList();
if (users.Count == 0)
throw new InvalidOperationException($"User {email} not found in realm {realmId}.");
return users[0].GetProperty("id").GetString()!;
}
public async Task AssignRealmRoleAsync(string realmId, string userId, string roleName, CancellationToken ct)
{
await AuthorizeAsync(ct);
var roleRes = await _http.GetAsync($"/admin/realms/{realmId}/roles/{roleName}", ct);
roleRes.EnsureSuccessStatusCode();
var roleJson = await roleRes.Content.ReadAsStringAsync(ct);
var res = await _http.PostAsync(
$"/admin/realms/{realmId}/users/{userId}/role-mappings/realm",
new StringContent($"[{roleJson}]", Encoding.UTF8, "application/json"), ct);
res.EnsureSuccessStatusCode();
}
public async Task CreateClientAsync(string realmId, object clientRepresentation, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PostAsync($"/admin/realms/{realmId}/clients",
Json(clientRepresentation), ct);
if (res.StatusCode != System.Net.HttpStatusCode.Conflict)
res.EnsureSuccessStatusCode();
}
/// <summary>
/// Returns the internal Keycloak UUID for a client by its clientId string.
/// </summary>
public async Task<string> GetClientUuidAsync(string realmId, string clientId, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.GetAsync(
$"/admin/realms/{realmId}/clients?clientId={Uri.EscapeDataString(clientId)}&search=false", ct);
res.EnsureSuccessStatusCode();
using var doc = JsonDocument.Parse(await res.Content.ReadAsStringAsync(ct));
var clients = doc.RootElement.EnumerateArray().ToList();
if (clients.Count == 0)
throw new InvalidOperationException($"Client '{clientId}' not found in realm '{realmId}'.");
return clients[0].GetProperty("id").GetString()!;
}
/// <summary>
/// Adds an audience protocol mapper to a client so that the named audience is included in every
/// access token issued by that client.
/// </summary>
public async Task AddAudienceMapperAsync(string realmId, string clientUuid, string audienceName, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PostAsync(
$"/admin/realms/{realmId}/clients/{clientUuid}/protocol-mappers/models",
Json(new
{
name = $"audience-{audienceName}",
protocol = "openid-connect",
protocolMapper = "oidc-audience-mapper",
consentRequired = false,
config = new Dictionary<string, string>
{
["included.client.audience"] = audienceName,
["id.token.claim"] = "false",
["access.token.claim"] = "true",
},
}), ct);
if (res.StatusCode != System.Net.HttpStatusCode.Conflict)
res.EnsureSuccessStatusCode();
_logger.LogInformation("Added audience mapper '{Audience}' to client {ClientUuid} in realm {Realm}.",
audienceName, clientUuid, realmId);
}
public async Task SendRequiredActionsEmailAsync(
string realmId, string userId, IEnumerable<string> actions, CancellationToken ct)
{
await AuthorizeAsync(ct);
var res = await _http.PutAsync(
$"/admin/realms/{realmId}/users/{userId}/execute-actions-email?lifespan=86400",
new StringContent(JsonSerializer.Serialize(actions, JsonOpts), Encoding.UTF8, "application/json"),
ct);
res.EnsureSuccessStatusCode();
}
private static StringContent Json(object payload) =>
new(JsonSerializer.Serialize(payload, JsonOpts), Encoding.UTF8, "application/json");
}
+27
View File
@@ -0,0 +1,27 @@
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
namespace ControlPlane.Worker.Steps;
public class HandoffStep(ILogger<HandoffStep> logger) : ISagaStep
{
public string StepName => "Handoff (Email Magic Link)";
public Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
// TODO: SendGrid / AWS SES
// 1. Send email to context.Job.AdminEmail
// 2. Include context.MagicLink for password setup
// 3. Include login URL: https://{context.Job.Subdomain}
logger.LogInformation("[{JobId}] Handoff step is a stub - email provider not yet wired.", context.Job.Id);
context.Job.CompletedSteps |= CompletedSteps.HandoffSent;
return Task.CompletedTask;
}
public Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
// Email already sent cannot be recalled - log only
logger.LogWarning("[{JobId}] Handoff step: email cannot be compensated if already sent.", context.Job.Id);
return Task.CompletedTask;
}
}
+90
View File
@@ -0,0 +1,90 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using ControlPlane.Worker.Services;
using Microsoft.Extensions.Options;
namespace ControlPlane.Worker.Steps;
public class KeycloakStep(
KeycloakAdminClient adminClient,
IConfiguration config,
IOptions<ClarityInfraOptions> infraOptions,
ILogger<KeycloakStep> logger) : ISagaStep
{
public string StepName => "Identity Bootstrapping (Keycloak)";
public async Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
var realmId = RealmId(context);
logger.LogInformation("[{JobId}] Creating Keycloak realm {Realm}.", context.Job.Id, realmId);
await adminClient.CreateRealmAsync(realmId, context.Job.ClientName, cancellationToken);
logger.LogInformation("[{JobId}] Creating AgencyAdmin role.", context.Job.Id);
await adminClient.CreateRealmRoleAsync(realmId, "AgencyAdmin", "Day-zero administrator for this Clarity tenant.", cancellationToken);
// Derive the tenant's public-facing origin from ClarityInfraOptions.
var tenantOrigin = infraOptions.Value.TenantPublicUrl(context.Job.Subdomain);
logger.LogInformation("[{JobId}] Creating Keycloak clients for realm {Realm} (origin: {Origin}).",
context.Job.Id, realmId, tenantOrigin);
// clarity-rest-api: bearer-only resource server — just registers the audience so JWT validation passes.
await adminClient.CreateClientAsync(realmId, new
{
clientId = "clarity-rest-api",
name = "Clarity REST API",
enabled = true,
bearerOnly = true,
publicClient = false,
}, cancellationToken);
// clarity-web-app: public OIDC client used by the React frontend.
await adminClient.CreateClientAsync(realmId, new
{
clientId = "clarity-web-app",
name = "Clarity Web App",
enabled = true,
publicClient = true,
standardFlowEnabled = true,
directAccessGrantsEnabled = false,
rootUrl = tenantOrigin,
baseUrl = "/",
redirectUris = new[] { $"{tenantOrigin}/*" },
webOrigins = new[] { tenantOrigin },
}, cancellationToken);
// Ensure tokens issued by clarity-web-app include "clarity-rest-api" in the `aud` claim
// so that Clarity.Server JWT bearer validation (Audience = "clarity-rest-api") passes.
logger.LogInformation("[{JobId}] Adding audience mapper for clarity-rest-api on clarity-web-app.", context.Job.Id);
var webAppUuid = await adminClient.GetClientUuidAsync(realmId, "clarity-web-app", cancellationToken);
await adminClient.AddAudienceMapperAsync(realmId, webAppUuid, "clarity-rest-api", cancellationToken);
logger.LogInformation("[{JobId}] Creating day-zero user {Email}.", context.Job.Id, context.Job.AdminEmail);
var userId = await adminClient.CreateUserAsync(realmId, context.Job.AdminEmail, context.Job.ClientName, cancellationToken);
logger.LogInformation("[{JobId}] Assigning AgencyAdmin role.", context.Job.Id);
await adminClient.AssignRealmRoleAsync(realmId, userId, "AgencyAdmin", cancellationToken);
// TODO No SMTP right now
//logger.LogInformation("[{JobId}] Sending required actions email to {Email}.", context.Job.Id, context.Job.AdminEmail);
//await adminClient.SendRequiredActionsEmailAsync(realmId, userId, ["UPDATE_PASSWORD", "VERIFY_EMAIL"], cancellationToken);
context.DayZeroUserSubjectId = userId;
//context.MagicLink = $"Action email sent to {context.Job.AdminEmail} for realm '{realmId}'.";
context.Job.CompletedSteps |= CompletedSteps.KeycloakProvisioned;
logger.LogInformation("[{JobId}] Keycloak provisioning complete for realm {Realm}.", context.Job.Id, realmId);
}
public async Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
var realmId = RealmId(context);
logger.LogWarning("[{JobId}] Compensating Keycloak - deleting realm {Realm}.", context.Job.Id, realmId);
await adminClient.DeleteRealmAsync(realmId, cancellationToken);
}
private static string RealmId(SagaContext context) =>
$"clarity-{context.Job.Subdomain.ToLowerInvariant()}";
}
+76
View File
@@ -0,0 +1,76 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using ControlPlane.Worker.Services;
using Microsoft.Extensions.Options;
namespace ControlPlane.Worker.Steps;
/// <summary>
/// Final saga step — launches the clarity-server Docker container with the fully
/// enriched SagaContext (connection strings, Keycloak realm, etc. all known).
/// Runs LAST so all env vars are available at container start.
/// </summary>
public class LaunchStep(
ILogger<LaunchStep> logger,
IConfiguration config,
IOptions<ClarityInfraOptions> infraOptions,
ClarityContainerService containers) : ISagaStep
{
public string StepName => "Container Launch";
public async Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
var job = context.Job;
logger.LogInformation("[{JobId}] Launching container {Env}-app-clarity-{Site}",
job.Id, job.Environment, job.SiteCode);
var containerName = await containers.StartTenantContainerAsync(
environment: job.Environment,
siteCode: job.SiteCode,
subdomain: job.Subdomain,
keycloakRealm: $"clarity-{job.Subdomain.ToLowerInvariant()}",
postgresConnectionString: context.TenantConnectionString,
vaultToken: ReadVaultToken(config),
jobId: job.Id,
cancellationToken: cancellationToken);
context.ContainerName = containerName;
context.TenantApiBaseUrl = infraOptions.Value.TenantPublicUrl(job.Subdomain);
logger.LogInformation("[{JobId}] Container {Name} live at {Url}",
job.Id, containerName, context.TenantApiBaseUrl);
context.Job.CompletedSteps |= CompletedSteps.InfrastructureProvisioned;
}
public async Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
if (string.IsNullOrWhiteSpace(context.ContainerName)) return;
logger.LogWarning("[{JobId}] Compensating: removing container {Name}", context.Job.Id, context.ContainerName);
await containers.StopAndRemoveAsync(context.ContainerName, cancellationToken);
await containers.RemoveNginxConfigAsync(context.Job.Subdomain, cancellationToken);
}
// Reads the Vault root token from the persisted init.json on the Vault volume.
// Falls back to config["Vault:Token"] then "root" for local dev.
private static string? ReadVaultToken(IConfiguration config)
{
var keysFile = config["Vault:KeysFile"];
if (!string.IsNullOrWhiteSpace(keysFile) && File.Exists(keysFile))
{
try
{
var json = File.ReadAllText(keysFile);
using var doc = System.Text.Json.JsonDocument.Parse(json);
if (doc.RootElement.TryGetProperty("root_token", out var tok))
return tok.GetString();
}
catch { /* fall through */ }
}
return config["Vault:Token"] ?? "root";
}
}
+119
View File
@@ -0,0 +1,119 @@
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using Npgsql;
namespace ControlPlane.Worker.Steps;
/// <summary>
/// Provisions a per-tenant Postgres database on the shared Postgres instance.
/// Writes TenantConnectionString to SagaContext for downstream steps (LaunchStep).
/// Compensation drops the database.
/// </summary>
public class MigrationStep(
IConfiguration config,
ILogger<MigrationStep> logger) : ISagaStep
{
public string StepName => "Database Migration & Seeding (EF Core)";
public async Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
var job = context.Job;
var dbName = TenantDbName(job.Subdomain);
var adminConnStr = config.GetConnectionString("postgres")
?? throw new InvalidOperationException(
"ConnectionStrings:postgres is missing. " +
"Ensure ControlPlane.Worker has .WithReference(postgres) in AppHost.");
logger.LogInformation("[{JobId}] Provisioning database '{Db}'.", job.Id, dbName);
await CreateDatabaseIfNotExistsAsync(adminConnStr, dbName, cancellationToken);
context.TenantConnectionString = BuildTenantConnectionString(adminConnStr, dbName);
logger.LogInformation("[{JobId}] Database '{Db}' ready.", job.Id, dbName);
// TODO: Run EF Core migrations once dynamic DbContext is wired:
// var opts = new DbContextOptionsBuilder<ApplicationDbContext>().UseNpgsql(context.TenantConnectionString).Options;
// await using var db = new ApplicationDbContext(opts);
// await db.Database.MigrateAsync(cancellationToken);
context.Job.CompletedSteps |= CompletedSteps.DatabaseMigrated;
}
public async Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
if (string.IsNullOrWhiteSpace(context.TenantConnectionString)) return;
var dbName = TenantDbName(context.Job.Subdomain);
var adminConnStr = config.GetConnectionString("postgres");
if (string.IsNullOrWhiteSpace(adminConnStr)) return;
logger.LogWarning("[{JobId}] Compensating: dropping database '{Db}'.", context.Job.Id, dbName);
try
{
await using var conn = new NpgsqlConnection(adminConnStr);
await conn.OpenAsync(cancellationToken);
await using var terminate = conn.CreateCommand();
terminate.CommandText = $"""
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE datname = '{dbName}' AND pid <> pg_backend_pid();
""";
await terminate.ExecuteNonQueryAsync(cancellationToken);
await using var drop = conn.CreateCommand();
drop.CommandText = $"DROP DATABASE IF EXISTS \"{dbName}\";";
await drop.ExecuteNonQueryAsync(cancellationToken);
logger.LogInformation("[{JobId}] Dropped database '{Db}'.", context.Job.Id, dbName);
}
catch (Exception ex)
{
logger.LogError(ex, "[{JobId}] Failed to drop database '{Db}' during compensation.", context.Job.Id, dbName);
}
}
// ── helpers ──────────────────────────────────────────────────────────────
// Deterministic DB name from subdomain: fdev-app-clarity-01000014 → clarity_fdev_app_clarity_01000014
internal static string TenantDbName(string subdomain) =>
$"clarity_{subdomain.Replace('-', '_').ToLowerInvariant()}";
private static async Task CreateDatabaseIfNotExistsAsync(
string adminConnStr, string dbName, CancellationToken ct)
{
await using var conn = new NpgsqlConnection(adminConnStr);
await conn.OpenAsync(ct);
await using var check = conn.CreateCommand();
check.CommandText = "SELECT 1 FROM pg_database WHERE datname = $1;";
check.Parameters.AddWithValue(dbName);
var exists = await check.ExecuteScalarAsync(ct) is not null;
if (!exists)
{
await using var create = conn.CreateCommand();
// DB name is internally derived, not user input — safe to interpolate
create.CommandText = $"CREATE DATABASE \"{dbName}\";";
await create.ExecuteNonQueryAsync(ct);
}
}
private static string BuildTenantConnectionString(string adminConnStr, string dbName)
{
var b = new NpgsqlConnectionStringBuilder(adminConnStr) { Database = dbName };
// Tenant containers reach Postgres via the Aspire shared network using the stable
// DNS alias "postgres" (the Aspire resource name) at the standard port 5432.
// The port in the admin connection string is Aspire's random host-side proxy port —
// reset it to 5432 so the in-network address is correct.
if (b.Host is "localhost" or "127.0.0.1")
{
b.Host = "postgres";
b.Port = 5432;
}
return b.ConnectionString;
}
}
+45
View File
@@ -0,0 +1,45 @@
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using System.Text.Json;
namespace ControlPlane.Worker.Steps;
public class VaultStep(ILogger<VaultStep> logger, IConfiguration config) : ISagaStep
{
public string StepName => "Cryptographic Pre-Flight (Vault)";
public Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
// TODO: VaultSharp
// 1. Assert Transit engine is active and healthy
// 2. Derive/validate TenantContextId (e.g. FL_COM_001)
// 3. Register TenantContextId in a KV entry or TenantRegistry table
// so Clarity.Server can resolve the derivation path later
//
// Root token is read at runtime from the persisted init.json on the Vault volume:
// var token = ReadRootToken();
logger.LogInformation("[{JobId}] Vault step is a stub - VaultSharp not yet wired.", context.Job.Id);
context.Job.CompletedSteps |= CompletedSteps.VaultVerified;
return Task.CompletedTask;
}
public Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
logger.LogInformation("[{JobId}] Vault step: no compensation needed.", context.Job.Id);
return Task.CompletedTask;
}
/// <summary>
/// Reads the root token from the init.json written by the Vault entrypoint on first boot.
/// Path is injected via Vault__KeysFile config.
/// </summary>
internal string ReadRootToken()
{
var path = config["Vault__KeysFile"]
?? throw new InvalidOperationException("Vault__KeysFile is not configured.");
using var doc = JsonDocument.Parse(File.ReadAllText(path));
return doc.RootElement.GetProperty("root_token").GetString()
?? throw new InvalidOperationException("root_token not found in Vault init.json.");
}
}
+15
View File
@@ -0,0 +1,15 @@
<Solution>
<Folder Name="/ControlPlane (Factory)/">
<Project Path="ControlPlane.AppHost/ControlPlane.AppHost.csproj" />
<Project Path="ControlPlane.Core/ControlPlane.Core.csproj" />
<Project Path="ControlPlane.Api/ControlPlane.Api.csproj" />
<Project Path="ControlPlane.Worker/ControlPlane.Worker.csproj" />
<Project Path="clarity.controlplane/clarity.controlplane.esproj">
<Build />
<Deploy />
</Project>
</Folder>
<Folder Name="/Solution Items/">
<File Path="Directory.Packages.props" />
</Folder>
</Solution>
+53
View File
@@ -0,0 +1,53 @@
<Project>
<PropertyGroup>
<!-- Enable central package management, https://learn.microsoft.com/en-us/nuget/consume-packages/Central-Package-Management -->
<ManagePackageVersionsCentrally>true</ManagePackageVersionsCentrally>
</PropertyGroup>
<!-- Aspire Packages -->
<ItemGroup>
<PackageVersion Include="Aspire.Hosting.PostgreSQL" Version="13.2.2" />
<PackageVersion Include="Aspire.Keycloak.Authentication" Version="13.2.2-preview.1.26207.2" />
<PackageVersion Include="Aspire.Npgsql.EntityFrameworkCore.PostgreSQL" Version="13.2.2" />
<PackageVersion Include="Aspire.StackExchange.Redis.OutputCaching" Version="13.2.2" />
<PackageVersion Include="Aspire.Hosting.JavaScript" Version="13.2.2" />
<PackageVersion Include="Aspire.Hosting.Keycloak" Version="13.2.2-preview.1.26207.2" />
<PackageVersion Include="Aspire.Hosting.Redis" Version="13.2.2" />
<PackageVersion Include="CommunityToolkit.Aspire.Hosting.Minio" Version="13.2.1-beta.532" />
<PackageVersion Include="CommunityToolkit.Aspire.Minio.Client" Version="13.2.1-beta.532" />
<PackageVersion Include="Docker.DotNet" Version="3.125.15" />
<PackageVersion Include="Microsoft.EntityFrameworkCore" Version="10.0.6" />
<PackageVersion Include="Microsoft.EntityFrameworkCore.Design" Version="10.0.6" />
<PackageVersion Include="Microsoft.EntityFrameworkCore.Tools" Version="10.0.6" />
<PackageVersion Include="Microsoft.Extensions.Configuration.Abstractions" Version="10.0.7" />
<PackageVersion Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.7" />
<PackageVersion Include="Npgsql" Version="10.0.2" />
<PackageVersion Include="LibGit2Sharp" Version="0.31.0" />
<PackageVersion Include="Scalar.Aspire" Version="0.9.24" />
<PackageVersion Include="VaultSharp" Version="1.17.5.1" />
<PackageVersion Include="Yarp.ReverseProxy" Version="2.3.0" />
</ItemGroup>
<!-- Clarity.MigrationService -->
<ItemGroup>
<PackageVersion Include="Microsoft.Extensions.Hosting" Version="10.0.6" />
</ItemGroup>
<!-- ControlPlane -->
<ItemGroup>
<PackageVersion Include="Aspire.Hosting.Sdk" Version="13.2.2" />
<PackageVersion Include="Aspire.Hosting.RabbitMQ" Version="13.2.2" />
<PackageVersion Include="Aspire.RabbitMQ.Client" Version="13.2.2" />
<PackageVersion Include="Keycloak.AuthServices.Sdk" Version="2.9.0" />
<PackageVersion Include="MassTransit" Version="8.4.1" />
<PackageVersion Include="MassTransit.RabbitMQ" Version="8.4.1" />
</ItemGroup>
<!-- Clarity.Server -->
<ItemGroup>
<PackageVersion Include="Microsoft.AspNetCore.OpenApi" Version="10.0.6" />
<PackageVersion Include="Microsoft.Extensions.Http.Resilience" Version="10.5.0" />
<PackageVersion Include="Microsoft.Extensions.ServiceDiscovery" Version="10.5.0" />
<PackageVersion Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" Version="1.15.2" />
<PackageVersion Include="OpenTelemetry.Extensions.Hosting" Version="1.15.2" />
<PackageVersion Include="OpenTelemetry.Instrumentation.AspNetCore" Version="1.15.1" />
<PackageVersion Include="OpenTelemetry.Instrumentation.Http" Version="1.15.0" />
<PackageVersion Include="OpenTelemetry.Instrumentation.Runtime" Version="1.15.0" />
</ItemGroup>
</Project>
+2
View File
@@ -0,0 +1,2 @@
VITE_CLARITY_DOMAIN=clarity.test
OPEN_ROUTER_KEY=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
+24
View File
@@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
+12
View File
@@ -0,0 +1,12 @@
This file explains how Visual Studio created the project.
The following tools were used to generate this project:
- create-vite
The following steps were used to generate this project:
- Create react project with create-vite: `npm init --yes vite@latest clarity.controlplane -- --template=react-ts --no-rolldown --no-immediate`.
- Updating `vite.config.ts` with port.
- Create project file (`clarity.controlplane.esproj`).
- Create `launch.json` to enable debugging.
- Add project to solution.
- Write this file.
+73
View File
@@ -0,0 +1,73 @@
# React + TypeScript + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Oxc](https://oxc.rs)
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/)
## React Compiler
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
```js
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Remove tseslint.configs.recommended and replace with this
tseslint.configs.recommendedTypeChecked,
// Alternatively, use this for stricter rules
tseslint.configs.strictTypeChecked,
// Optionally, add this for stylistic rules
tseslint.configs.stylisticTypeChecked,
// Other configs...
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
])
```
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
```js
// eslint.config.js
import reactX from 'eslint-plugin-react-x'
import reactDom from 'eslint-plugin-react-dom'
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Enable lint rules for React
reactX.configs['recommended-typescript'],
// Enable lint rules for React DOM
reactDom.configs.recommended,
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
])
```
@@ -0,0 +1,11 @@
<Project Sdk="Microsoft.VisualStudio.JavaScript.Sdk/1.0.4902498">
<PropertyGroup>
<StartupCommand>npm run dev</StartupCommand>
<JavaScriptTestRoot>src\</JavaScriptTestRoot>
<JavaScriptTestFramework>Vitest</JavaScriptTestFramework>
<!-- Allows the build (or compile) script located on package.json to run on Build -->
<ShouldRunBuildScript>false</ShouldRunBuildScript>
<!-- Folder where production build objects will be placed -->
<BuildOutputFolder>$(MSBuildProjectDirectory)\dist</BuildOutputFolder>
</PropertyGroup>
</Project>
+23
View File
@@ -0,0 +1,23 @@
import js from '@eslint/js'
import globals from 'globals'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
import tseslint from 'typescript-eslint'
import { defineConfig, globalIgnores } from 'eslint/config'
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
js.configs.recommended,
tseslint.configs.recommended,
reactHooks.configs.flat.recommended,
reactRefresh.configs.vite,
],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
},
},
])
+13
View File
@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>clarity.controlplane</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>
File diff suppressed because it is too large Load Diff
+34
View File
@@ -0,0 +1,34 @@
{
"name": "clarity.controlplane",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc -b && vite build",
"lint": "eslint .",
"preview": "vite preview"
},
"dependencies": {
"@blueprintjs/core": "^6.12.0",
"diff2html": "^3.4.56",
"highlight.js": "^11.11.1",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-multistep": "^7.0.0"
},
"devDependencies": {
"@eslint/js": "^9.39.4",
"@types/node": "^24.12.2",
"@types/react": "^18.3.23",
"@types/react-dom": "^18.3.7",
"@vitejs/plugin-react": "^6.0.1",
"eslint": "^9.39.4",
"eslint-plugin-react-hooks": "^7.1.1",
"eslint-plugin-react-refresh": "^0.5.2",
"globals": "^17.5.0",
"typescript": "~6.0.2",
"typescript-eslint": "^8.58.2",
"vite": "^8.0.9"
}
}
File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 9.3 KiB

+24
View File
@@ -0,0 +1,24 @@
<svg xmlns="http://www.w3.org/2000/svg">
<symbol id="bluesky-icon" viewBox="0 0 16 17">
<g clip-path="url(#bluesky-clip)"><path fill="#08060d" d="M7.75 7.735c-.693-1.348-2.58-3.86-4.334-5.097-1.68-1.187-2.32-.981-2.74-.79C.188 2.065.1 2.812.1 3.251s.241 3.602.398 4.13c.52 1.744 2.367 2.333 4.07 2.145-2.495.37-4.71 1.278-1.805 4.512 3.196 3.309 4.38-.71 4.987-2.746.608 2.036 1.307 5.91 4.93 2.746 2.72-2.746.747-4.143-1.747-4.512 1.702.189 3.55-.4 4.07-2.145.156-.528.397-3.691.397-4.13s-.088-1.186-.575-1.406c-.42-.19-1.06-.395-2.741.79-1.755 1.24-3.64 3.752-4.334 5.099"/></g>
<defs><clipPath id="bluesky-clip"><path fill="#fff" d="M.1.85h15.3v15.3H.1z"/></clipPath></defs>
</symbol>
<symbol id="discord-icon" viewBox="0 0 20 19">
<path fill="#08060d" d="M16.224 3.768a14.5 14.5 0 0 0-3.67-1.153c-.158.286-.343.67-.47.976a13.5 13.5 0 0 0-4.067 0c-.128-.306-.317-.69-.476-.976A14.4 14.4 0 0 0 3.868 3.77C1.546 7.28.916 10.703 1.231 14.077a14.7 14.7 0 0 0 4.5 2.306q.545-.748.965-1.587a9.5 9.5 0 0 1-1.518-.74q.191-.14.372-.293c2.927 1.369 6.107 1.369 8.999 0q.183.152.372.294-.723.437-1.52.74.418.838.963 1.588a14.6 14.6 0 0 0 4.504-2.308c.37-3.911-.63-7.302-2.644-10.309m-9.13 8.234c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.894 0 1.614.82 1.599 1.82.001 1-.705 1.82-1.6 1.82m5.91 0c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.893 0 1.614.82 1.599 1.82 0 1-.706 1.82-1.6 1.82"/>
</symbol>
<symbol id="documentation-icon" viewBox="0 0 21 20">
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="m15.5 13.333 1.533 1.322c.645.555.967.833.967 1.178s-.322.623-.967 1.179L15.5 18.333m-3.333-5-1.534 1.322c-.644.555-.966.833-.966 1.178s.322.623.966 1.179l1.534 1.321"/>
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M17.167 10.836v-4.32c0-1.41 0-2.117-.224-2.68-.359-.906-1.118-1.621-2.08-1.96-.599-.21-1.349-.21-2.848-.21-2.623 0-3.935 0-4.983.369-1.684.591-3.013 1.842-3.641 3.428C3 6.449 3 7.684 3 10.154v2.122c0 2.558 0 3.838.706 4.726q.306.383.713.671c.76.536 1.79.64 3.581.66"/>
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M3 10a2.78 2.78 0 0 1 2.778-2.778c.555 0 1.209.097 1.748-.047.48-.129.854-.503.982-.982.145-.54.048-1.194.048-1.749a2.78 2.78 0 0 1 2.777-2.777"/>
</symbol>
<symbol id="github-icon" viewBox="0 0 19 19">
<path fill="#08060d" fill-rule="evenodd" d="M9.356 1.85C5.05 1.85 1.57 5.356 1.57 9.694a7.84 7.84 0 0 0 5.324 7.44c.387.079.528-.168.528-.376 0-.182-.013-.805-.013-1.454-2.165.467-2.616-.935-2.616-.935-.349-.91-.864-1.143-.864-1.143-.71-.48.051-.48.051-.48.787.051 1.2.805 1.2.805.695 1.194 1.817.857 2.268.649.064-.507.27-.857.49-1.052-1.728-.182-3.545-.857-3.545-3.87 0-.857.31-1.558.8-2.104-.078-.195-.349-1 .077-2.078 0 0 .657-.208 2.14.805a7.5 7.5 0 0 1 1.946-.26c.657 0 1.328.092 1.946.26 1.483-1.013 2.14-.805 2.14-.805.426 1.078.155 1.883.078 2.078.502.546.799 1.247.799 2.104 0 3.013-1.818 3.675-3.558 3.87.284.247.528.714.528 1.454 0 1.052-.012 1.896-.012 2.156 0 .208.142.455.528.377a7.84 7.84 0 0 0 5.324-7.441c.013-4.338-3.48-7.844-7.773-7.844" clip-rule="evenodd"/>
</symbol>
<symbol id="social-icon" viewBox="0 0 20 20">
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M12.5 6.667a4.167 4.167 0 1 0-8.334 0 4.167 4.167 0 0 0 8.334 0"/>
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M2.5 16.667a5.833 5.833 0 0 1 8.75-5.053m3.837.474.513 1.035c.07.144.257.282.414.309l.93.155c.596.1.736.536.307.965l-.723.73a.64.64 0 0 0-.152.531l.207.903c.164.715-.213.991-.84.618l-.872-.52a.63.63 0 0 0-.577 0l-.872.52c-.624.373-1.003.094-.84-.618l.207-.903a.64.64 0 0 0-.152-.532l-.723-.729c-.426-.43-.289-.864.306-.964l.93-.156a.64.64 0 0 0 .412-.31l.513-1.034c.28-.562.735-.562 1.012 0"/>
</symbol>
<symbol id="x-icon" viewBox="0 0 19 19">
<path fill="#08060d" fill-rule="evenodd" d="M1.893 1.98c.052.072 1.245 1.769 2.653 3.77l2.892 4.114c.183.261.333.48.333.486s-.068.089-.152.183l-.522.593-.765.867-3.597 4.087c-.375.426-.734.834-.798.905a1 1 0 0 0-.118.148c0 .01.236.017.664.017h.663l.729-.83c.4-.457.796-.906.879-.999a692 692 0 0 0 1.794-2.038c.034-.037.301-.34.594-.675l.551-.624.345-.392a7 7 0 0 1 .34-.374c.006 0 .93 1.306 2.052 2.903l2.084 2.965.045.063h2.275c1.87 0 2.273-.003 2.266-.021-.008-.02-1.098-1.572-3.894-5.547-2.013-2.862-2.28-3.246-2.273-3.266.008-.019.282-.332 2.085-2.38l2-2.274 1.567-1.782c.022-.028-.016-.03-.65-.03h-.674l-.3.342a871 871 0 0 1-1.782 2.025c-.067.075-.405.458-.75.852a100 100 0 0 1-.803.91c-.148.172-.299.344-.99 1.127-.304.343-.32.358-.345.327-.015-.019-.904-1.282-1.976-2.808L6.365 1.85H1.8zm1.782.91 8.078 11.294c.772 1.08 1.413 1.973 1.425 1.984.016.017.241.02 1.05.017l1.03-.004-2.694-3.766L7.796 5.75 5.722 2.852l-1.039-.004-1.039-.004z" clip-rule="evenodd"/>
</symbol>
</svg>

After

Width:  |  Height:  |  Size: 4.9 KiB

+1
View File
@@ -0,0 +1 @@
/* App-level overrides — component styles live in index.css */
+76
View File
@@ -0,0 +1,76 @@
import '@blueprintjs/core/lib/css/blueprint.css';
import './App.css';
import { useState } from 'react';
import { Menu, MenuItem, MenuDivider } from '@blueprintjs/core';
import DashboardPage from './pages/DashboardPage';
import PipelinesPage from './pages/PipelinesPage';
import BuildMonitorPage from './pages/BuildMonitorPage';
import ImageBuildPage from './pages/ImageBuildPage';
import BranchPage from './pages/BranchPage';
import OpcPage from './opc/OpcPage';
import InfraPage from './pages/InfraPage';
function App() {
const [activeNav, setActiveNav] = useState('opc');
return (
<div className="cp-shell">
{/* ── Sidebar ── */}
<aside className="cp-sidebar">
<div className="cp-sidebar-brand">
<span className="brand-mark">CP</span>
<span className="brand-name">Control Plane</span>
</div>
<div className="cp-sidebar-nav">
<Menu className="cp-sidebar-menu">
<MenuItem icon="cloud-upload" text="Deployments" active={activeNav === 'deployments'} onClick={() => setActiveNav('deployments')} />
<MenuItem icon="git-branch" text="Pipelines" active={activeNav === 'pipelines'} onClick={() => setActiveNav('pipelines')} />
<MenuItem icon="git-merge" text="Branch Ladder" active={activeNav === 'branches'} onClick={() => setActiveNav('branches')} />
<MenuItem icon="build" text="Image Build" active={activeNav === 'image-build'} onClick={() => setActiveNav('image-build')} />
<MenuItem icon="pulse" text="Build Monitor" active={activeNav === 'build-monitor'} onClick={() => setActiveNav('build-monitor')} />
<MenuDivider />
<MenuItem icon="heat-grid" text="Infrastructure" active={activeNav === 'infra'} onClick={() => setActiveNav('infra')} />
<MenuItem icon="clipboard" text="OPC" active={activeNav === 'opc'} onClick={() => setActiveNav('opc')} />
<MenuItem icon="people" text="Clients" active={activeNav === 'clients'} onClick={() => setActiveNav('clients')} />
<MenuItem icon="cog" text="Settings" active={activeNav === 'settings'} onClick={() => setActiveNav('settings')} />
</Menu>
</div>
<div className="cp-sidebar-footer">
<div className="cp-sidebar-user">
<div className="user-avatar">A</div>
<div className="user-info">
<span className="user-name">Platform Admin</span>
<span className="user-role">Clarity Internal</span>
</div>
</div>
</div>
</aside>
{/* ── Main content ── */}
<main className="cp-main">
{activeNav === 'deployments' && <DashboardPage />}
{activeNav === 'pipelines' && <PipelinesPage />}
{activeNav === 'branches' && <BranchPage />}
{activeNav === 'image-build' && <ImageBuildPage />}
{activeNav === 'build-monitor' && <BuildMonitorPage />}
{activeNav === 'infra' && <InfraPage />}
{activeNav === 'opc' && <OpcPage />}
{activeNav === 'clients' && <PlaceholderPage title="Clients" />}
{activeNav === 'settings' && <PlaceholderPage title="Settings" />}
</main>
</div>
);
}
function PlaceholderPage({ title }: { title: string }) {
return (
<div className="page-header">
<h1>{title}</h1>
<p>Coming soon.</p>
</div>
);
}
export default App;
+44
View File
@@ -0,0 +1,44 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export type ServiceStatus = 'running' | 'stopped' | 'unhealthy' | 'unknown';
export interface InfraService {
name: string;
container: string;
status: ServiceStatus;
ports: string[];
uptime?: string;
}
export interface InfraStatusResponse {
services: InfraService[];
checkedAt: string;
}
export async function getInfraStatus(): Promise<InfraStatusResponse> {
const res = await fetch(`${BASE_URL}/api/infra/status`);
if (!res.ok) throw new Error('Failed to fetch infra status');
return res.json();
}
export async function infraServiceAction(
service: string,
action: 'start' | 'stop' | 'restart'
): Promise<void> {
const res = await fetch(`${BASE_URL}/api/infra/${service}/${action}`, { method: 'POST' });
if (!res.ok) throw new Error(`Failed to ${action} ${service}`);
}
export function streamComposeUp(onLine: (line: string) => void, onDone: () => void): EventSource {
const src = new EventSource(`${BASE_URL}/api/infra/compose/up/stream`);
src.onmessage = (e) => onLine(e.data);
src.onerror = () => { onDone(); src.close(); };
return src;
}
export function streamComposeDown(onLine: (line: string) => void, onDone: () => void): EventSource {
const src = new EventSource(`${BASE_URL}/api/infra/compose/down/stream`);
src.onmessage = (e) => onLine(e.data);
src.onerror = () => { onDone(); src.close(); };
return src;
}
+313
View File
@@ -0,0 +1,313 @@
import type { Opc, OpcNote, OpcArtifact, OpcType, OpcStatus, OpcPriority } from '../types/opc';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
// ── OPC CRUD ──────────────────────────────────────────────────────────────────
export async function getNextNumber(): Promise<string> {
const res = await fetch(`${BASE_URL}/api/opc/next-number`);
if (!res.ok) throw new Error('Failed to fetch next OPC number');
const data = await res.json();
return data.number as string;
}
export async function listOpcs(type?: string, status?: string): Promise<Opc[]> {
const params = new URLSearchParams();
if (type && type !== 'all') params.set('type', type);
if (status && status !== 'all') params.set('status', status);
const res = await fetch(`${BASE_URL}/api/opc?${params}`);
if (!res.ok) throw new Error(`Failed to load OPCs: ${res.statusText}`);
// API returns OpcRecord (camelCase from .NET JsonSerializerDefaults.Web)
return (await res.json()).map(mapRecord);
}
export async function getOpc(id: string): Promise<Opc> {
const res = await fetch(`${BASE_URL}/api/opc/${id}`);
if (!res.ok) throw new Error(`Failed to load OPC: ${res.statusText}`);
return mapRecord(await res.json());
}
export async function createOpc(req: {
title: string; type: OpcType; priority: OpcPriority;
assignee: string; description: string;
}): Promise<Opc> {
const res = await fetch(`${BASE_URL}/api/opc`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(req),
});
if (!res.ok) throw new Error(`Failed to create OPC: ${res.statusText}`);
return mapRecord(await res.json());
}
export async function updateOpc(id: string, req: {
title?: string; description?: string; type?: OpcType;
status?: OpcStatus; priority?: OpcPriority; assignee?: string;
}): Promise<Opc> {
const res = await fetch(`${BASE_URL}/api/opc/${id}`, {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(req),
});
if (!res.ok) throw new Error(`Failed to update OPC: ${res.statusText}`);
return mapRecord(await res.json());
}
// ── Notes ─────────────────────────────────────────────────────────────────────
export async function listNotes(opcId: string): Promise<OpcNote[]> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/notes`);
if (!res.ok) throw new Error(`Failed to load notes: ${res.statusText}`);
return (await res.json()).map(mapNote);
}
export async function addNote(opcId: string, author: string, content: string): Promise<OpcNote> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/notes`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ author, content }),
});
if (!res.ok) throw new Error(`Failed to add note: ${res.statusText}`);
return mapNote(await res.json());
}
// ── Artifacts ─────────────────────────────────────────────────────────────────
export async function listArtifacts(opcId: string, type?: string): Promise<OpcArtifact[]> {
const params = type ? `?type=${type}` : '';
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/artifacts${params}`);
if (!res.ok) throw new Error(`Failed to load artifacts: ${res.statusText}`);
return (await res.json()).map(mapArtifact);
}
export async function createArtifact(opcId: string, req: {
artifactType: string; title: string; content: string;
}): Promise<OpcArtifact> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/artifacts`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(req),
});
if (!res.ok) throw new Error(`Failed to create artifact: ${res.statusText}`);
return mapArtifact(await res.json());
}
export async function updateArtifact(artifactId: string, req: {
artifactType: string; title: string; content: string;
}): Promise<OpcArtifact> {
const res = await fetch(`${BASE_URL}/api/opc/artifacts/${artifactId}`, {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(req),
});
if (!res.ok) throw new Error(`Failed to update artifact: ${res.statusText}`);
return mapArtifact(await res.json());
}
export async function deleteArtifact(artifactId: string): Promise<void> {
const res = await fetch(`${BASE_URL}/api/opc/artifacts/${artifactId}`, { method: 'DELETE' });
if (!res.ok && res.status !== 404) throw new Error(`Failed to delete artifact: ${res.statusText}`);
}
// ── Git commit linkage ────────────────────────────────────────────────────────
// Commits are linked by convention: developers include "OPC # XXXX" in their commit message.
// The git log endpoint supports ?grep=OPC+%230001 to filter.
export interface LinkedCommit {
hash: string;
shortHash: string;
author: string;
date: string;
subject: string;
files: string[];
}
export async function getLinkedCommits(opcNumber: string): Promise<LinkedCommit[]> {
const res = await fetch(`${BASE_URL}/api/git/log?grep=${encodeURIComponent(opcNumber)}&limit=50`);
if (!res.ok) throw new Error(`Failed to load commits: ${res.statusText}`);
return res.json();
}
// ── Pinned commits ────────────────────────────────────────────────────────────
export interface PinnedCommit {
opcId: string;
hash: string;
shortHash: string;
subject: string;
author: string;
pinnedAt: string;
pinnedBy: string;
}
function mapPinnedCommit(d: Record<string, unknown>): PinnedCommit {
return {
opcId: d.opcId as string,
hash: d.hash as string,
shortHash: d.shortHash as string,
subject: d.subject as string,
author: d.author as string,
pinnedAt: d.pinnedAt as string,
pinnedBy: d.pinnedBy as string,
};
}
export async function getPinnedCommits(opcId: string): Promise<PinnedCommit[]> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/pinned-commits`);
if (!res.ok) throw new Error(`Failed to load pinned commits: ${res.statusText}`);
return (await res.json()).map(mapPinnedCommit);
}
export async function pinCommit(opcId: string, hash: string, pinnedBy: string): Promise<PinnedCommit> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/pinned-commits`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ hash, pinnedBy }),
});
if (!res.ok) {
const body = await res.text().catch(() => res.statusText);
throw new Error(body || res.statusText);
}
return mapPinnedCommit(await res.json());
}
export async function unpinCommit(opcId: string, hash: string): Promise<void> {
const res = await fetch(`${BASE_URL}/api/opc/${opcId}/pinned-commits/${hash}`, { method: 'DELETE' });
if (!res.ok && res.status !== 404) throw new Error(`Failed to unpin commit: ${res.statusText}`);
}
// ── Branch coverage ───────────────────────────────────────────────────────────
export interface BranchCoverage {
branch: string;
contains: boolean;
tipHash: string;
isHead: boolean;
}
export async function getBranchCoverage(hashes: string[]): Promise<BranchCoverage[]> {
if (hashes.length === 0) return [];
const res = await fetch(`${BASE_URL}/api/git/branch-coverage?commits=${hashes.join(',')}`);
if (!res.ok) throw new Error(`Failed to get branch coverage: ${res.statusText}`);
return res.json();
}
// ── Commit detail (full diff) ─────────────────────────────────────────────────
export interface CommitFile {
path: string;
oldPath: string;
status: string;
additions: number;
deletions: number;
patch: string;
}
export interface CommitDetail {
hash: string;
shortHash: string;
author: string;
email: string;
date: string;
subject: string;
body: string;
files: CommitFile[];
}
export async function getCommitDetail(hash: string): Promise<CommitDetail> {
const res = await fetch(`${BASE_URL}/api/git/commits/${hash}`);
if (!res.ok) throw new Error(`Failed to load commit ${hash}: ${res.statusText}`);
return res.json();
}
// ── AI assist ─────────────────────────────────────────────────────────────────
export async function aiAssist(prompt: string, context?: string): Promise<string> {
const res = await fetch(`${BASE_URL}/api/opc/ai-assist`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt, context }),
});
if (!res.ok) {
const err = await res.text();
throw new Error(`AI assist failed: ${err}`);
}
const data = await res.json();
return data.text as string;
}
// ── Field mappers (snake_case/PascalCase → camelCase) ─────────────────────────
// .NET JsonSerializerDefaults.Web produces camelCase already, so these are
// lightweight guards in case of null/missing fields.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function mapRecord(r: any): Opc {
return {
id: r.id,
number: r.number,
title: r.title,
description: r.description ?? '',
type: r.type as OpcType,
status: r.status as OpcStatus,
priority: r.priority as OpcPriority,
assignee: r.assignee ?? '',
createdAt: r.createdAt,
updatedAt: r.updatedAt,
notes: [], // loaded separately on drawer open
commits: [], // loaded separately on drawer open
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function mapNote(r: any): OpcNote {
return {
id: r.id,
author: r.author,
timestamp: r.createdAt,
content: r.content,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function mapArtifact(r: any): OpcArtifact {
return {
id: r.id,
opcId: r.opcId,
artifactType: r.artifactType,
title: r.title,
content: r.content,
createdAt: r.createdAt,
updatedAt: r.updatedAt,
};
}
// ── Gitea branch integration ───────────────────────────────────────────────────
export interface GiteaBranch {
name: string;
commitSha: string;
protected: boolean;
}
export async function listGiteaBranches(): Promise<GiteaBranch[]> {
const res = await fetch(`${BASE_URL}/api/gitea/branches`);
if (!res.ok) throw new Error(`Failed to load Gitea branches: ${res.statusText}`);
return res.json();
}
export async function createGiteaBranch(
opcNumber: string,
opcTitle: string,
from = 'master',
): Promise<GiteaBranch> {
const res = await fetch(`${BASE_URL}/api/gitea/branches`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ opcNumber, opcTitle, from }),
});
if (!res.ok) {
const body = await res.text().catch(() => res.statusText);
throw new Error(body || res.statusText);
}
return res.json();
}
@@ -0,0 +1,290 @@
import type { ProvisioningProgressEvent, ProvisioningRequest, TenantRecord } from '../types/provisioning';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export async function submitProvisioningJob(request: ProvisioningRequest): Promise<string> {
const res = await fetch(`${BASE_URL}/api/provision`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!res.ok) throw new Error(`Failed to queue job: ${res.statusText}`);
const data = await res.json();
return data.id as string;
}
export async function getTenants(): Promise<TenantRecord[]> {
const res = await fetch(`${BASE_URL}/api/tenants`);
if (!res.ok) throw new Error(`Failed to load tenants: ${res.statusText}`);
return res.json();
}
export function subscribeToTenantLogs(
subdomain: string,
onLine: (line: string) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/tenants/${subdomain}/logs`);
source.onmessage = (e) => { if (e.data) onLine(e.data); };
source.onerror = onError;
return source;
}
export function subscribeToJobStream(
jobId: string,
onEvent: (event: ProvisioningProgressEvent) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/provision/${jobId}/stream`);
source.onmessage = (e) => {
try { onEvent(JSON.parse(e.data)); } catch { /* ignore */ }
};
source.onerror = onError;
return source;
}
export interface ImageBuildStatus {
imageName: string | null;
builtAt: string | null;
lastMessage: string;
isBuilding: boolean;
}
export async function getImageStatus(): Promise<ImageBuildStatus> {
const res = await fetch(`${BASE_URL}/api/image/status`);
if (!res.ok) throw new Error(`Failed to get image status: ${res.statusText}`);
return res.json();
}
/** Triggers a build and streams log lines. Calls onLine for each log chunk, onDone when finished. */
export function triggerImageBuild(
onLine: (line: string) => void,
onDone: (success: boolean) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/image/build-stream`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done) { onDone(true); source.close(); }
else if (msg.line) onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onDone(false); onError(e); };
return source;
}
/** POST to kick off the build — returns immediately; use subscribeToJobStream for progress */
export async function startImageBuild(): Promise<void> {
const res = await fetch(`${BASE_URL}/api/image/build`, { method: 'POST' });
if (!res.ok) throw new Error(`Build trigger failed: ${res.statusText}`);
}
// ── Release API ──────────────────────────────────────────────────────────────
export interface TenantReleaseResult {
subdomain: string;
containerName: string;
success: boolean;
error?: string;
}
export interface ReleaseRecord {
id: string;
environment: string;
imageName: string;
status: 'Running' | 'Succeeded' | 'PartialFailure' | 'Failed';
startedAt: string;
finishedAt?: string;
tenants: TenantReleaseResult[];
}
export async function getReleaseHistory(): Promise<ReleaseRecord[]> {
const res = await fetch(`${BASE_URL}/api/release/history`);
if (!res.ok) throw new Error(`Failed to get release history: ${res.statusText}`);
return res.json();
}
/** Triggers a release to the given environment and streams log lines as SSE. */
export function triggerRelease(
env: string,
onLine: (line: string) => void,
onDone: (record: ReleaseRecord) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/release/${env}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.release) { onDone(msg.release as ReleaseRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
// ── Project Build API ────────────────────────────────────────────────────────
export interface ProjectDefinition {
name: string;
kind: 'DotnetProject' | 'NpmProject';
relativePath: string;
}
export interface BuildRecord {
id: string;
kind: 'DockerImage' | 'DotnetProject' | 'NpmProject';
target: string;
status: 'Running' | 'Succeeded' | 'Failed';
startedAt: string;
finishedAt?: string;
durationMs?: number;
log: string[];
}
export async function getProjects(): Promise<ProjectDefinition[]> {
const res = await fetch(`${BASE_URL}/api/builds/projects`);
if (!res.ok) throw new Error(`Failed to get projects: ${res.statusText}`);
return res.json();
}
export async function getBuildHistory(): Promise<BuildRecord[]> {
const res = await fetch(`${BASE_URL}/api/builds/history`);
if (!res.ok) throw new Error(`Failed to get build history: ${res.statusText}`);
return res.json();
}
/** Triggers a project build and streams log lines. */
export function triggerProjectBuild(
projectName: string,
onLine: (line: string) => void,
onDone: (record: BuildRecord) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/builds/${encodeURIComponent(projectName)}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.build) { onDone(msg.build as BuildRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
// ── Git History API ──────────────────────────────────────────────────────────
export interface GitCommit {
hash: string;
shortHash: string;
author: string;
date: string;
subject: string;
files: string[];
}
export async function getGitLog(path?: string, limit = 20): Promise<GitCommit[]> {
const params = new URLSearchParams({ limit: String(limit) });
if (path) params.set('path', path);
const res = await fetch(`${BASE_URL}/api/git/log?${params}`);
if (!res.ok) throw new Error(`Failed to get git log: ${res.statusText}`);
return res.json();
}
// ── Promotion / Branch Ladder API ────────────────────────────────────────────
export interface BranchStatus {
branch: string;
exists: boolean;
shortHash: string | null;
lastCommitSummary: string | null;
aheadOfNext: number;
behindNext: number;
unreleasedLines: string[];
}
export interface PromotionRecord {
id: string;
fromBranch: string;
toBranch: string;
requestedBy: string;
note: string | null;
status: 'Pending' | 'Running' | 'Succeeded' | 'Failed';
createdAt: string;
completedAt: string | null;
commitCount: number;
commitLines: string[];
log: string[];
}
export async function getLadderStatus(): Promise<BranchStatus[]> {
const res = await fetch(`${BASE_URL}/api/promotions/ladder`);
if (!res.ok) throw new Error(`Failed to get ladder status: ${res.statusText}`);
return res.json();
}
export async function getPromotionHistory(): Promise<PromotionRecord[]> {
const res = await fetch(`${BASE_URL}/api/promotions/history`);
if (!res.ok) throw new Error(`Failed to get promotion history: ${res.statusText}`);
return res.json();
}
/** Triggers a promotion and streams SSE lines. Calls onDone with the final record. */
export function triggerPromotion(
from: string,
to: string,
requestedBy: string,
note: string | undefined,
onLine: (line: string) => void,
onDone: (record: PromotionRecord) => void,
onError: (err: string) => void,
): () => void {
let cancelled = false;
const controller = new AbortController();
(async () => {
try {
const res = await fetch(`${BASE_URL}/api/promotions/promote`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ from, to, requestedBy, note }),
signal: controller.signal,
});
if (!res.ok || !res.body) { onError(res.statusText); return; }
const reader = res.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (!cancelled) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() ?? '';
for (const chunk of parts) {
const dataLine = chunk.replace(/^data:\s*/m, '').trim();
if (!dataLine) continue;
try {
const msg = JSON.parse(dataLine);
if (msg.done && msg.promotion) onDone(msg.promotion as PromotionRecord);
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* skip */ }
}
}
} catch (e) {
if (!cancelled) onError(e instanceof Error ? e.message : 'Unknown error');
}
})();
return () => { cancelled = true; controller.abort(); };
}
Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>

After

Width:  |  Height:  |  Size: 4.0 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 8.5 KiB

@@ -0,0 +1,117 @@
import { useEffect, useState, useRef } from 'react';
import { Button, Drawer, Intent, NonIdealState, Spinner, Tag, Tooltip } from '@blueprintjs/core';
import { html as diff2htmlHtml } from 'diff2html';
import 'diff2html/bundles/css/diff2html.min.css';
import hljs from 'highlight.js';
import 'highlight.js/styles/github.css';
import { getCommitDetail, type CommitDetail } from '../api/opcApi';
interface Props {
hash: string | null;
onClose: () => void;
}
export function GitCommitDrawer({ hash, onClose }: Props) {
const [detail, setDetail] = useState<CommitDetail | null>(null);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const diffRef = useRef<HTMLDivElement>(null);
useEffect(() => {
if (!hash) { setDetail(null); setError(null); return; }
setLoading(true); setDetail(null); setError(null);
getCommitDetail(hash)
.then(setDetail)
.catch(e => setError(String(e)))
.finally(() => setLoading(false));
}, [hash]);
// After diff HTML is injected, run highlight.js over code blocks
useEffect(() => {
if (detail && diffRef.current) {
diffRef.current.querySelectorAll<HTMLElement>('code[class]').forEach(el => {
hljs.highlightElement(el);
});
}
}, [detail]);
const combinedPatch = detail?.files.map(f => f.patch).join('\n') ?? '';
const diffHtml = combinedPatch
? diff2htmlHtml(combinedPatch, {
drawFileList: true,
matching: 'lines',
outputFormat: 'line-by-line',
renderNothingWhenEmpty: false,
})
: '';
return (
<Drawer
isOpen={!!hash}
onClose={onClose}
title={detail ? (
<span className="git-drawer-title">
<code className="git-drawer-hash">{detail.shortHash}</code>
<span className="git-drawer-subject">{detail.subject}</span>
</span>
) : 'Commit Diff'}
size="70%"
position="right"
className="git-commit-drawer"
>
<div className="git-drawer-body">
{loading && <NonIdealState icon={<Spinner size={24} />} title="Loading diff…" />}
{error && <NonIdealState icon="error" intent={Intent.DANGER} title="Failed to load commit" description={error} />}
{detail && (
<>
{/* Metadata bar */}
<div className="git-commit-meta-bar">
<div className="git-commit-meta-left">
<Tooltip content="Copy full hash">
<code
className="git-commit-hash-chip"
onClick={() => navigator.clipboard.writeText(detail.hash)}
style={{ cursor: 'pointer' }}
>
{detail.shortHash}
</code>
</Tooltip>
<span className="git-commit-author">{detail.author}</span>
<span className="git-commit-date">{detail.date}</span>
</div>
<div className="git-commit-meta-right">
<Tag intent={Intent.SUCCESS} minimal round icon="add">
+{detail.files.reduce((a, f) => a + f.additions, 0)}
</Tag>
<Tag intent={Intent.DANGER} minimal round icon="remove">
-{detail.files.reduce((a, f) => a + f.deletions, 0)}
</Tag>
<Tag minimal round>{detail.files.length} file{detail.files.length !== 1 ? 's' : ''}</Tag>
</div>
</div>
{/* Commit body if multiline */}
{detail.body.trim() !== detail.subject.trim() && (
<pre className="git-commit-body">{detail.body.trim()}</pre>
)}
{/* Diff */}
{diffHtml
? <div ref={diffRef} className="git-diff-container" dangerouslySetInnerHTML={{ __html: diffHtml }} />
: <NonIdealState icon="git-commit" title="No diff" description="This commit has no file changes." />
}
</>
)}
{!loading && !error && !detail && hash && (
<NonIdealState icon={<Spinner size={20} />} title="Loading…" />
)}
</div>
<div className="git-drawer-footer">
<Button text="Close" onClick={onClose} />
</div>
</Drawer>
);
}
@@ -0,0 +1,132 @@
import { useEffect, useRef, useState } from 'react';
import { Button, Callout, Intent, Tag } from '@blueprintjs/core';
import { getImageStatus, type ImageBuildStatus } from '../api/provisioningApi';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export default function ImageBuildPanel() {
const [status, setStatus] = useState<ImageBuildStatus | null>(null);
const [building, setBuilding] = useState(false);
const [logs, setLogs] = useState<string[]>([]);
const [open, setOpen] = useState(false);
const [error, setError] = useState<string | null>(null);
const logRef = useRef<HTMLDivElement>(null);
useEffect(() => {
getImageStatus().then(setStatus).catch(() => {});
}, []);
// Auto-scroll log panel
useEffect(() => {
if (logRef.current) logRef.current.scrollTop = logRef.current.scrollHeight;
}, [logs]);
const handleBuild = async () => {
if (building) return;
setBuilding(true);
setOpen(true);
setLogs([]);
setError(null);
try {
// POST /api/image/build — the response body IS the SSE stream
const res = await fetch(`${BASE_URL}/api/image/build`, { method: 'POST' });
if (!res.ok || !res.body) {
setError(`Build failed to start: ${res.statusText}`);
setBuilding(false);
return;
}
const reader = res.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() ?? '';
for (const chunk of parts) {
const dataLine = chunk.replace(/^data:\s*/m, '').trim();
if (!dataLine) continue;
try {
const msg = JSON.parse(dataLine);
if (msg.done) {
// Build finished — refresh status
getImageStatus().then(setStatus).catch(() => {});
} else if (typeof msg.line === 'string') {
setLogs((prev) => [...prev.slice(-1000), msg.line]);
}
} catch { /* ignore non-JSON */ }
}
}
} catch (e) {
setError(e instanceof Error ? e.message : 'Unknown error during build');
} finally {
setBuilding(false);
}
};
const lastBuilt = status?.builtAt
? new Date(status.builtAt).toLocaleString()
: 'Never';
return (
<div style={{ display: 'flex', flexDirection: 'column', gap: '0.5rem' }}>
<div style={{ display: 'flex', gap: '0.5rem', alignItems: 'center' }}>
<Button
icon="build"
intent={Intent.WARNING}
loading={building}
onClick={handleBuild}
text="Build Image"
/>
{!building && (
<Tag minimal intent={Intent.NONE} style={{ fontFamily: 'monospace', fontSize: '0.7rem' }}>
{status?.imageName ?? 'clarity-server:latest'} · last built {lastBuilt}
</Tag>
)}
{building && (
<Tag minimal intent={Intent.WARNING}>Building</Tag>
)}
{logs.length > 0 && !building && (
<Button
icon={open ? 'chevron-up' : 'chevron-down'}
minimal
small
onClick={() => setOpen((o) => !o)}
text={open ? 'Hide log' : 'Show log'}
/>
)}
</div>
{error && (
<Callout intent={Intent.DANGER} compact>{error}</Callout>
)}
{open && logs.length > 0 && (
<div
ref={logRef}
style={{
fontFamily: 'monospace',
fontSize: '0.72rem',
background: '#111',
color: '#d4d4d4',
padding: '0.6rem 0.8rem',
borderRadius: '4px',
height: '220px',
overflowY: 'auto',
whiteSpace: 'pre-wrap',
wordBreak: 'break-all',
}}
>
{logs.map((l, i) => <div key={i}>{l}</div>)}
</div>
)}
</div>
);
}

Some files were not shown because too many files have changed in this diff Show More