OPC # 0001: Extract OPC into standalone repo

This commit is contained in:
amadzarak
2026-04-25 17:26:42 -04:00
commit 42383bdc03
170 changed files with 21365 additions and 0 deletions
@@ -0,0 +1,14 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Messages;
using MassTransit;
namespace ControlPlane.Api.Consumers;
public sealed class ProvisioningProgressConsumer(SseEventBus bus) : IConsumer<ProvisioningProgressEvent>
{
public Task Consume(ConsumeContext<ProvisioningProgressEvent> context)
{
bus.Publish(context.Message);
return Task.CompletedTask;
}
}
+30
View File
@@ -0,0 +1,30 @@
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.AspNetCore.OpenApi" />
<PackageReference Include="Microsoft.Extensions.ServiceDiscovery" />
<PackageReference Include="MassTransit" />
<PackageReference Include="MassTransit.RabbitMQ" />
<PackageReference Include="Aspire.RabbitMQ.Client" />
<PackageReference Include="Docker.DotNet" />
<PackageReference Include="Npgsql" />
<PackageReference Include="LibGit2Sharp" />
<PackageReference Include="OpenTelemetry.Exporter.OpenTelemetryProtocol" />
<PackageReference Include="OpenTelemetry.Extensions.Hosting" />
<PackageReference Include="OpenTelemetry.Instrumentation.AspNetCore" />
<PackageReference Include="OpenTelemetry.Instrumentation.Http" />
<PackageReference Include="OpenTelemetry.Instrumentation.Runtime" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Clarity.ServiceDefaults\Clarity.ServiceDefaults.csproj" />
<ProjectReference Include="..\ControlPlane.Core\ControlPlane.Core.csproj" />
</ItemGroup>
</Project>
+108
View File
@@ -0,0 +1,108 @@
-- =============================================================================
-- OPC Seed Script seeded from TODO.md backlog
-- Run against the ControlPlane database.
-- OPC # 0001 is already live; this starts at 0002.
-- =============================================================================
INSERT INTO opc (id, number, title, description, type, status, priority, assignee, created_at, updated_at)
VALUES
-- ── Keycloak / Auth ───────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0002',
'Fix KeycloakStep 401 on realm provisioning',
'KeycloakStep is the current blocker in the provisioning saga. The step returns 401 when attempting to create the tenant realm. Investigate the admin-client credentials, token scope, and the endpoint URL used inside the Docker network.',
'Bug',
'In Progress',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0003',
'KeycloakStep: full realm + user provisioning flow',
'After the 401 is resolved, implement the full flow: create realm {subdomain}.clarity.io, create the admin role, create the day-zero admin user from AdminEmail, assign the admin role, and trigger execute-actions-email (verify email + set password).',
'Feature',
'New',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0004',
'Keycloak JWT backchannel issuer cleanup',
'Keycloak advertises its issuer based on the incoming request URL. When the backchannel hits http://keycloak:8080 directly it returns http://keycloak.clarity.test:8080 as the issuer, forcing layered workarounds in ValidIssuers and the rewrite handler. Clean fix: boot Keycloak with KC_HOSTNAME_URL=https://keycloak.clarity.test, verify via /.well-known/openid-configuration, then simplify ValidIssuers back to two entries. Deferred until next planned maintenance window (requires nuke to apply env var).',
'Tech Debt',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── VaultStep ─────────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0005',
'VaultStep: read root token and write initial secrets',
'Read the root token from /vault/file/init.json, enable KV-v2 secrets engine at {subdomain}/, then write the initial secrets: DB connection string and Keycloak client secret.',
'Feature',
'New',
'High',
'amadzarak',
NOW(), NOW()
),
-- ── MigrationStep ─────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0006',
'MigrationStep: run EF Core migrations per provisioning mode',
'Wire up EF Core migrations inside MigrationStep for all three provisioning modes. Shared: run against the shared DB scoped to the tenant schema. Isolated: run against the dedicated Postgres container registered in SagaContext. Dedicated: run against the full dedicated Postgres instance.',
'Feature',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── HandoffStep ───────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0007',
'HandoffStep: send magic-link email and mark saga complete',
'Send a magic-link / welcome email to AdminEmail via SMTP or SendGrid, then mark CompletedSteps.HandoffSent on the provisioning job. Blocked until SMTP is wired (currently SendRequiredActionsEmailAsync is commented out in KeycloakStep.cs).',
'Feature',
'New',
'Medium',
'amadzarak',
NOW(), NOW()
),
-- ── Observability ─────────────────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0008',
'Stream tenant container logs into Aspire dashboard',
'Use the Docker SDK to tail fdev-app-clarity-* container logs and forward them to Aspire''s structured log stream. Currently these logs are only visible via docker logs on the host.',
'Feature',
'New',
'Low',
'amadzarak',
NOW(), NOW()
),
-- ── Kubernetes (backburner) ───────────────────────────────────────────────────
(
gen_random_uuid(),
'OPC # 0009',
'Kubernetes migration path evaluation',
'Currently managing containers directly via Docker.DotNet. Evaluate k8s when: scheduling across multiple nodes is needed, rolling deploys are required, or client count exceeds single-host capacity. Options: k3s (self-hosted), AKS/EKS (cloud), or keep Docker Compose per host for mid-scale. ClarityContainerService abstraction is intentional swap Docker.DotNet for a k8s client without changing the saga.',
'General',
'New',
'Low',
'amadzarak',
NOW(), NOW()
);
+119
View File
@@ -0,0 +1,119 @@
-- =============================================================================
-- OPC Seed Script 2 completed work from TODO.md
-- Run against the ControlPlane database.
-- Picks up numbering at 0010 (00010009 covered in seed_opc.sql).
-- =============================================================================
INSERT INTO opc (id, number, title, description, type, status, priority, assignee, created_at, updated_at)
VALUES
(
gen_random_uuid(),
'OPC # 0010',
'Aspire AppHost wired: Vault, MinIO, RabbitMQ, Postgres, Keycloak, Worker, API, UI',
'Full Aspire AppHost configuration completed. All infrastructure services (Vault, MinIO, RabbitMQ, Postgres, Keycloak) and application services (Worker, API, UI) are registered and wired in the AppHost project.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0011',
'Fix CRLF → LF on entrypoint.sh (was breaking Vault container)',
'entrypoint.sh had Windows-style CRLF line endings which caused the Vault container to fail on startup. Fixed by enforcing LF via .gitattributes.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0012',
'Vault initialises and unseals on first run',
'Vault container now correctly initialises (generates root token + unseal keys) and auto-unseals on first run. Init output is written to /vault/file/init.json.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0013',
'Pin Keycloak bootstrap password (fix persistent container password drift)',
'Keycloak was experiencing password drift between container restarts due to the bootstrap admin credentials not being pinned. Fixed by explicitly setting the admin password so it persists across restarts.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0014',
'Fix Keycloak endpoint name: tcp → http',
'Keycloak Aspire resource was registered with a tcp endpoint name instead of http, causing service discovery failures. Renamed to http to align with the rest of the stack.',
'Bug',
'Done',
'Medium',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0015',
'Worker starts and correctly waits for all dependencies',
'The Worker service was starting before infrastructure dependencies were healthy. Implemented proper wait/health-check logic so the Worker blocks until Postgres, Keycloak, Vault, RabbitMQ, and MinIO are all ready.',
'Bug',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0016',
'MassTransit saga pipeline with compensation',
'Implemented the full MassTransit-based provisioning saga with forward steps and compensating transactions. Each step registers its rollback so a mid-saga failure cleanly tears down any already-provisioned resources.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0017',
'SSE progress stream: Worker → RabbitMQ → API → browser',
'Implemented a real-time Server-Sent Events pipeline. The Worker publishes step progress to RabbitMQ, the API consumes and streams events via SSE, and the browser receives live updates without polling.',
'Feature',
'Done',
'High',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0018',
'Frontend Diagnostics tab with full stack traces from worker',
'Added a Diagnostics tab to the frontend that displays structured error messages and full stack traces forwarded from the Worker service, making provisioning failures debuggable directly in the UI.',
'Feature',
'Done',
'Medium',
'amadzarak',
NOW(), NOW()
),
(
gen_random_uuid(),
'OPC # 0019',
'Enforce LF line endings for *.sh and *.hcl via .gitattributes',
'Added .gitattributes rules to enforce LF line endings for all *.sh and *.hcl files. Prevents CRLF issues from reappearing when contributors commit from Windows machines.',
'Tech Debt',
'Done',
'Low',
'amadzarak',
NOW(), NOW()
);
+203
View File
@@ -0,0 +1,203 @@
using ControlPlane.Core.Models;
using LibGit2Sharp;
namespace ControlPlane.Api.Endpoints;
public static class GitEndpoints
{
public static IEndpointRouteBuilder MapGitEndpoints(this IEndpointRouteBuilder app)
{
app.MapGet("/api/git/log", GetLog);
app.MapGet("/api/git/commits/{hash}", GetCommit);
app.MapGet("/api/git/branches", GetBranches);
app.MapGet("/api/git/branch-coverage", GetBranchCoverage);
return app;
}
// GET /api/git/log?grep=OPC+%23+0001&limit=50
private static IResult GetLog(
IConfiguration config,
string? grep = null,
int limit = 50)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository. Set Git:RepoRoot in appsettings.");
using var repo = new Repository(repoPath);
var tips = repo.Branches
.Where(b => b.Tip != null)
.Select(b => (GitObject)b.Tip)
.ToList();
var filter = new CommitFilter
{
SortBy = CommitSortStrategies.Topological | CommitSortStrategies.Time,
IncludeReachableFrom = tips.Count > 0 ? tips : (object)repo.Head,
};
IEnumerable<Commit> query = repo.Commits.QueryBy(filter);
if (!string.IsNullOrWhiteSpace(grep))
query = query.Where(c => c.Message.Contains(grep, StringComparison.OrdinalIgnoreCase));
var commits = query
.Take(limit)
.Select(c => ToGitCommit(repo, c))
.ToList();
return Results.Ok(commits);
}
// GET /api/git/commits/{hash}
private static IResult GetCommit(string hash, IConfiguration config)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var commit = repo.Lookup<Commit>(hash);
if (commit is null) return Results.NotFound();
var parentTree = commit.Parents.FirstOrDefault()?.Tree;
var changes = repo.Diff.Compare<TreeChanges>(parentTree, commit.Tree);
var patch = repo.Diff.Compare<Patch>(parentTree, commit.Tree);
var files = changes.Select(c => new
{
path = c.Path,
oldPath = c.OldPath,
status = c.Status.ToString(),
additions = patch[c.Path]?.LinesAdded ?? 0,
deletions = patch[c.Path]?.LinesDeleted ?? 0,
patch = patch[c.Path]?.Patch ?? string.Empty,
}).ToList();
return Results.Ok(new
{
hash = commit.Sha,
shortHash = commit.Sha[..7],
author = commit.Author.Name,
email = commit.Author.Email,
date = commit.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
subject = commit.MessageShort,
body = commit.Message,
files,
});
}
// GET /api/git/branches
private static IResult GetBranches(IConfiguration config)
{
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var branches = repo.Branches
.Where(b => !b.IsRemote && b.Tip != null)
.Select(b => new
{
name = b.FriendlyName,
hash = b.Tip.Sha,
shortHash = b.Tip.Sha[..7],
subject = b.Tip.MessageShort,
author = b.Tip.Author.Name,
date = b.Tip.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
isHead = b.IsCurrentRepositoryHead,
})
.OrderBy(b => b.name)
.ToList();
return Results.Ok(branches);
}
// GET /api/git/branch-coverage?commits=hash1,hash2,hash3
// Returns each local branch and whether it contains ALL of the given commits.
private static IResult GetBranchCoverage(IConfiguration config, string? commits = null)
{
if (string.IsNullOrWhiteSpace(commits)) return Results.Ok(Array.Empty<object>());
var hashes = commits.Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries);
if (hashes.Length == 0) return Results.Ok(Array.Empty<object>());
var repoPath = ResolveRepo(config);
if (repoPath is null)
return Results.Problem("Could not locate a git repository.");
using var repo = new Repository(repoPath);
var targetCommits = hashes
.Select(h => repo.Lookup<Commit>(h))
.Where(c => c is not null)
.ToList();
if (targetCommits.Count == 0) return Results.Ok(Array.Empty<object>());
var result = repo.Branches
.Where(b => !b.IsRemote && b.Tip != null)
.Select(b =>
{
var contains = targetCommits.All(tc =>
{
// If merge base of branch tip and target == target, then target is an ancestor
var mergeBase = repo.ObjectDatabase.FindMergeBase(b.Tip, tc!);
return mergeBase?.Sha == tc!.Sha;
});
return new
{
branch = b.FriendlyName,
contains,
tipHash = b.Tip.Sha[..7],
isHead = b.IsCurrentRepositoryHead,
};
})
.OrderBy(b => b.branch)
.ToList();
return Results.Ok(result);
}
// ── Helpers ───────────────────────────────────────────────────────────────
/// Resolves the repo root: explicit config overrides, otherwise auto-discover
/// from the running assembly directory upward via LibGit2Sharp.
private static string? ResolveRepo(IConfiguration config)
{
var configured = config["Git:RepoRoot"] ?? config["Docker:RepoRoot"];
if (!string.IsNullOrWhiteSpace(configured) && Directory.Exists(configured))
return configured;
// Auto-discover: walk up from the app's own directory
var startPath = AppContext.BaseDirectory;
var discovered = Repository.Discover(startPath);
if (discovered is null) return null;
// Repository.Discover returns the .git directory path; get the working dir
using var probe = new Repository(discovered);
return probe.Info.WorkingDirectory;
}
private static GitCommit ToGitCommit(Repository repo, Commit c)
{
string[] files;
try
{
var parentTree = c.Parents.FirstOrDefault()?.Tree;
var changes = repo.Diff.Compare<TreeChanges>(parentTree, c.Tree);
files = changes.Select(ch => ch.Path).ToArray();
}
catch { files = []; }
return new GitCommit(
Hash: c.Sha,
ShortHash: c.Sha[..7],
Author: c.Author.Name,
Date: c.Author.When.ToString("yyyy-MM-dd HH:mm:ss zzz"),
Subject: c.MessageShort,
Files: files
);
}
}
@@ -0,0 +1,79 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
namespace ControlPlane.Api.Endpoints;
public static class GiteaEndpoints
{
public static IEndpointRouteBuilder MapGiteaEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/gitea").WithTags("Gitea");
g.MapGet ("/repo", GetRepo);
g.MapGet ("/branches", ListBranches);
g.MapPost("/branches", CreateBranch);
g.MapGet ("/pulls", ListPulls);
g.MapGet ("/pulls/{number:long}", GetPull);
g.MapPost("/pulls", CreatePull);
g.MapGet ("/tags", ListTags);
g.MapPost("/tags", CreateTag);
g.MapGet ("/webhooks", ListWebhooks);
g.MapPost("/webhooks", RegisterWebhook);
return app;
}
private static async Task<IResult> GetRepo(GiteaService svc, CancellationToken ct)
{
var repo = await svc.GetRepoAsync(ct);
return repo is null ? Results.StatusCode(503) : Results.Ok(repo);
}
private static async Task<IResult> ListBranches(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListBranchesAsync(ct));
private static async Task<IResult> CreateBranch(
CreateBranchRequest req, GiteaService svc, CancellationToken ct)
{
var branch = await svc.CreateBranchAsync(req, ct);
return branch is null ? Results.BadRequest("Failed to create branch in Gitea.") : Results.Ok(branch);
}
private static async Task<IResult> ListPulls(
GiteaService svc, string state = "open", CancellationToken ct = default) =>
Results.Ok(await svc.ListPullRequestsAsync(state, ct));
private static async Task<IResult> GetPull(
long number, GiteaService svc, CancellationToken ct)
{
var pr = await svc.GetPullRequestAsync(number, ct);
return pr is null ? Results.NotFound() : Results.Ok(pr);
}
private static async Task<IResult> CreatePull(
CreatePullRequestRequest req, GiteaService svc, CancellationToken ct)
{
var pr = await svc.CreatePullRequestAsync(req, ct);
return pr is null ? Results.BadRequest("Failed to create PR in Gitea.") : Results.Ok(pr);
}
private static async Task<IResult> ListTags(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListTagsAsync(ct));
private static async Task<IResult> CreateTag(
CreateTagRequest req, GiteaService svc, CancellationToken ct)
{
var tag = await svc.CreateTagAsync(req, ct);
return tag is null ? Results.BadRequest("Failed to create tag in Gitea.") : Results.Ok(tag);
}
private static async Task<IResult> ListWebhooks(GiteaService svc, CancellationToken ct) =>
Results.Ok(await svc.ListWebhooksAsync(ct));
private static async Task<IResult> RegisterWebhook(
CreateWebhookRequest req, GiteaService svc, CancellationToken ct)
{
var hook = await svc.RegisterWebhookAsync(req, ct);
return hook is null ? Results.BadRequest("Failed to register webhook in Gitea.") : Results.Ok(hook);
}
}
@@ -0,0 +1,75 @@
using ControlPlane.Api.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ImageBuildEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapImageBuildEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/image").WithTags("Image");
group.MapGet("/status", GetStatus);
group.MapPost("/build", TriggerBuild);
return app;
}
/// <summary>Returns the last known build status without triggering a new build.</summary>
private static async Task<IResult> GetStatus(ImageBuildService svc) =>
Results.Ok(await svc.GetStatusAsync());
/// <summary>
/// Triggers a docker build and streams the output line-by-line as SSE.
/// The build context is the repo root, which must be configured via
/// Docker:RepoRoot in appsettings / environment.
/// </summary>
private static async Task TriggerBuild(
HttpContext ctx,
ImageBuildService svc,
IConfiguration config,
CancellationToken ct)
{
var repoRoot = config["Docker:RepoRoot"];
if (string.IsNullOrWhiteSpace(repoRoot) || !Directory.Exists(repoRoot))
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(new
{
error = "Docker:RepoRoot is not configured or does not exist.",
hint = "Add Docker__RepoRoot to the worker environment pointing at the repo root directory.",
}, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
// Use a Channel so the Progress<T> callback (sync) can safely hand lines
// to the async SSE writer without blocking the Docker build thread.
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = true, SingleReader = true });
void OnLine(string line) => channel.Writer.TryWrite(line);
// Run the build on a background thread so we can drain the channel here
var buildTask = Task.Run(() => svc.BuildAsync(repoRoot, OnLine, ct), ct)
.ContinueWith(_ => channel.Writer.TryComplete(), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
var json = JsonSerializer.Serialize(new { line }, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
await buildTask; // ensure build is fully done
// Signal stream end
await ctx.Response.WriteAsync("data: {\"done\":true}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
}
@@ -0,0 +1,232 @@
using System.Diagnostics;
using System.Text.Json;
using System.Text.Json.Serialization;
namespace ControlPlane.Api.Endpoints;
public static class InfraEndpoints
{
public static IEndpointRouteBuilder MapInfraEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/infra").WithTags("Infrastructure");
g.MapGet ("/status", GetStatus);
g.MapPost("/{container}/start", (string container) => ServiceAction(container, "start"));
g.MapPost("/{container}/stop", (string container) => ServiceAction(container, "stop"));
g.MapPost("/{container}/restart",(string container) => ServiceAction(container, "restart"));
g.MapGet ("/compose/up/stream", ComposeUpStream);
g.MapGet ("/compose/down/stream", ComposeDownStream);
return app;
}
// ── Known platform services ───────────────────────────────────────────────
private static readonly string[] PlatformContainers =
[
"clarity-postgres",
"clarity-keycloak",
"clarity-vault",
"clarity-minio",
"clarity-gitea",
"clarity-nginx",
"clarity-dnsmasq",
];
// ── Handlers ─────────────────────────────────────────────────────────────
private static async Task<IResult> GetStatus()
{
var services = new List<InfraService>();
foreach (var container in PlatformContainers)
{
var (code, output) = await DockerAsync(
$"inspect --format={{{{json .}}}} {container}");
if (code != 0 || string.IsNullOrWhiteSpace(output))
{
services.Add(new InfraService(container, container, "stopped", [], null));
continue;
}
try
{
using var doc = JsonDocument.Parse(output.Trim());
var root = doc.RootElement;
var state = root.GetProperty("State").GetProperty("Status").GetString() ?? "unknown";
var health = root.GetProperty("State").TryGetProperty("Health", out var h)
? h.GetProperty("Status").GetString()
: null;
var status = (state, health) switch
{
("running", "unhealthy") => "unhealthy",
("running", _) => "running",
("exited", _) => "stopped",
_ => state
};
// Ports
var ports = new List<string>();
if (root.TryGetProperty("NetworkSettings", out var ns) &&
ns.TryGetProperty("Ports", out var portsEl))
{
foreach (var port in portsEl.EnumerateObject())
{
if (port.Value.ValueKind != JsonValueKind.Null)
ports.Add(port.Name.Split('/')[0]);
}
}
// Uptime
string? uptime = null;
if (root.GetProperty("State").TryGetProperty("StartedAt", out var startedAt))
{
if (DateTime.TryParse(startedAt.GetString(), out var started) && state == "running")
{
var elapsed = DateTime.UtcNow - started.ToUniversalTime();
uptime = elapsed.TotalDays >= 1
? $"{(int)elapsed.TotalDays}d {elapsed.Hours}h"
: elapsed.TotalHours >= 1
? $"{(int)elapsed.TotalHours}h {elapsed.Minutes}m"
: $"{elapsed.Minutes}m";
}
}
// Friendly name
var name = root.TryGetProperty("Name", out var n)
? n.GetString()?.TrimStart('/') ?? container
: container;
services.Add(new InfraService(name, container, status, ports, uptime));
}
catch
{
services.Add(new InfraService(container, container, "unknown", [], null));
}
}
return Results.Ok(new InfraStatusResponse(services, DateTimeOffset.UtcNow));
}
private static async Task<IResult> ServiceAction(string container, string action)
{
if (!PlatformContainers.Contains(container))
return Results.BadRequest($"Unknown platform container: {container}");
var (code, output) = await DockerAsync($"{action} {container}");
return code == 0
? Results.Ok()
: Results.Problem(output ?? "Docker command failed", statusCode: 500);
}
private static Task ComposeUpStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "up --pull missing", ct);
private static Task ComposeDownStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "down", ct);
private static async Task StreamComposeOutput(
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct)
{
var infraDir = ResolveInfraPath(config);
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = false, SingleReader = true });
var psi = new ProcessStartInfo("docker",
$"compose -f \"{Path.Combine(infraDir, "docker-compose.yml")}\" {composeArgs}")
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
WorkingDirectory = infraDir,
};
var proc = Process.Start(psi)!;
// Read stdout + stderr concurrently into the channel
var stdoutTask = Task.Run(async () =>
{
while (await proc.StandardOutput.ReadLineAsync(ct) is { } line)
channel.Writer.TryWrite(line);
}, ct);
var stderrTask = Task.Run(async () =>
{
while (await proc.StandardError.ReadLineAsync(ct) is { } line)
channel.Writer.TryWrite(line);
}, ct);
_ = Task.WhenAll(stdoutTask, stderrTask)
.ContinueWith(_ => channel.Writer.TryComplete(), TaskScheduler.Default);
// Stream lines to client as SSE
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
if (line is null) continue;
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
await proc.WaitForExitAsync(ct);
var exitLine = proc.ExitCode == 0 ? "data: ✔ Done." : $"data: ✖ Exited with code {proc.ExitCode}";
await ctx.Response.WriteAsync($"{exitLine}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
proc.Dispose();
}
// ── Helpers ───────────────────────────────────────────────────────────────
private static string ResolveInfraPath(IConfiguration config)
{
var repoRoot = config["Docker:RepoRoot"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "..", "..", ".."));
return Path.GetFullPath(Path.Combine(repoRoot, "infra"));
}
private static Task<(int Code, string? Output)> DockerAsync(string args) =>
RunAsync("docker", args, null);
private static async Task<(int Code, string? Output)> ComposeAsync(string args, string infraDir)=>
await RunAsync("docker", $"compose -f \"{Path.Combine(infraDir, "docker-compose.yml")}\" {args}", infraDir);
private static async Task<(int Code, string? Output)> RunAsync(
string exe, string args, string? workingDir)
{
var psi = new ProcessStartInfo(exe, args)
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
if (workingDir is not null) psi.WorkingDirectory = workingDir;
using var proc = Process.Start(psi);
if (proc is null) return (-1, null);
var output = await proc.StandardOutput.ReadToEndAsync();
var err = await proc.StandardError.ReadToEndAsync();
await proc.WaitForExitAsync();
return (proc.ExitCode, string.IsNullOrWhiteSpace(output) ? err : output);
}
// ── Response models ───────────────────────────────────────────────────────
public record InfraService(
string Name,
string Container,
string Status,
List<string> Ports,
string? Uptime);
public record InfraStatusResponse(
List<InfraService> Services,
DateTimeOffset CheckedAt);
}
+244
View File
@@ -0,0 +1,244 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using LibGit2Sharp;
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class OpcEndpoints
{
private static readonly JsonSerializerOptions JsonOpts =
new(JsonSerializerDefaults.Web) { WriteIndented = false };
public static IEndpointRouteBuilder MapOpcEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/opc").WithTags("OPC");
// ── OPC CRUD ──────────────────────────────────────────────────────────
g.MapGet ("", ListOpcs);
g.MapGet ("/next-number", GetNextNumber);
g.MapPost ("", CreateOpc);
g.MapGet ("/{id:guid}", GetOpc);
g.MapPatch ("/{id:guid}", UpdateOpc);
g.MapDelete("/{id:guid}", DeleteOpc);
// ── Notes ─────────────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/notes", ListNotes);
g.MapPost ("/{id:guid}/notes", AddNote);
// ── Artifacts ─────────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/artifacts", ListArtifacts);
g.MapPost ("/{id:guid}/artifacts", CreateArtifact);
g.MapPatch ("/artifacts/{artifactId:guid}", UpdateArtifact);
g.MapDelete("/artifacts/{artifactId:guid}", DeleteArtifact);
// ── Pinned commits ────────────────────────────────────────────────────
g.MapGet ("/{id:guid}/pinned-commits", ListPinnedCommits);
g.MapPost ("/{id:guid}/pinned-commits", PinCommit);
g.MapDelete("/{id:guid}/pinned-commits/{hash}", UnpinCommit);
// ── AI assist (proxies to OpenRouter, key stays on server) ────────────
g.MapPost ("/ai-assist", AiAssist);
return app;
}
// ── OPC handlers ──────────────────────────────────────────────────────────
private static async Task<IResult> ListOpcs(
OpcService svc,
string? type = null, string? status = null,
CancellationToken ct = default)
{
var list = await svc.ListAsync(type, status, ct);
return Results.Ok(list);
}
private static async Task<IResult> GetNextNumber(
OpcService svc, CancellationToken ct)
{
var number = await svc.NextNumberAsync(ct);
return Results.Ok(new { number });
}
private static async Task<IResult> CreateOpc(
CreateOpcRequest req, OpcService svc, CancellationToken ct)
{
var opc = await svc.CreateAsync(req, ct);
return Results.Created($"/api/opc/{opc.Id}", opc);
}
private static async Task<IResult> GetOpc(
Guid id, OpcService svc, CancellationToken ct)
{
var opc = await svc.GetAsync(id, ct);
return opc is null ? Results.NotFound() : Results.Ok(opc);
}
private static async Task<IResult> UpdateOpc(
Guid id, UpdateOpcRequest req, OpcService svc, CancellationToken ct)
{
var opc = await svc.UpdateAsync(id, req, ct);
return opc is null ? Results.NotFound() : Results.Ok(opc);
}
private static async Task<IResult> DeleteOpc(
Guid id, OpcService svc, CancellationToken ct)
{
return await svc.DeleteAsync(id, ct) ? Results.NoContent() : Results.NotFound();
}
// ── Note handlers ─────────────────────────────────────────────────────────
private static async Task<IResult> ListNotes(
Guid id, OpcService svc, CancellationToken ct)
{
var notes = await svc.ListNotesAsync(id, ct);
return Results.Ok(notes);
}
private static async Task<IResult> AddNote(
Guid id, AddNoteRequest req, OpcService svc, CancellationToken ct)
{
var note = await svc.AddNoteAsync(id, req, ct);
return Results.Created($"/api/opc/{id}/notes/{note.Id}", note);
}
// ── Artifact handlers ─────────────────────────────────────────────────────
private static async Task<IResult> ListArtifacts(
Guid id, OpcService svc,
string? type = null, CancellationToken ct = default)
{
var artifacts = await svc.ListArtifactsAsync(id, type, ct);
return Results.Ok(artifacts);
}
private static async Task<IResult> CreateArtifact(
Guid id, UpsertArtifactRequest req, OpcService svc, CancellationToken ct)
{
var artifact = await svc.UpsertArtifactAsync(id, req, ct);
return Results.Created($"/api/opc/{id}/artifacts/{artifact.Id}", artifact);
}
private static async Task<IResult> UpdateArtifact(
Guid artifactId, UpsertArtifactRequest req, OpcService svc, CancellationToken ct)
{
var artifact = await svc.UpdateArtifactAsync(artifactId, req, ct);
return artifact is null ? Results.NotFound() : Results.Ok(artifact);
}
private static async Task<IResult> DeleteArtifact(
Guid artifactId, OpcService svc, CancellationToken ct)
{
return await svc.DeleteArtifactAsync(artifactId, ct)
? Results.NoContent()
: Results.NotFound();
}
// ── Pinned commit handlers ────────────────────────────────────────────────
private static async Task<IResult> ListPinnedCommits(
Guid id, OpcService svc, CancellationToken ct)
{
var commits = await svc.ListPinnedCommitsAsync(id, ct);
return Results.Ok(commits);
}
private static async Task<IResult> PinCommit(
Guid id, PinCommitRequest req, OpcService svc, IConfiguration config, CancellationToken ct)
{
var repoPath = config["Docker:RepoRoot"];
string fullHash = req.Hash;
string shortHash = req.Hash.Length >= 7 ? req.Hash[..7] : req.Hash;
string subject = string.Empty;
string author = string.Empty;
if (!string.IsNullOrWhiteSpace(repoPath) && Directory.Exists(repoPath))
{
using var repo = new Repository(repoPath);
var commit = repo.Lookup<Commit>(req.Hash);
if (commit is null) return Results.NotFound("Commit not found in repository.");
fullHash = commit.Sha;
shortHash = commit.Sha[..7];
subject = commit.MessageShort;
author = commit.Author.Name;
}
var pinned = await svc.PinCommitAsync(id, fullHash, shortHash, subject, author, req.PinnedBy, ct);
return pinned is null
? Results.NotFound()
: Results.Created($"/api/opc/{id}/pinned-commits/{fullHash}", pinned);
}
private static async Task<IResult> UnpinCommit(
Guid id, string hash, OpcService svc, CancellationToken ct)
{
return await svc.UnpinCommitAsync(id, hash, ct) ? Results.NoContent() : Results.NotFound();
}
// ── AI assist ─────────────────────────────────────────────────────────────
private static async Task<IResult> AiAssist(
AiAssistRequest req,
IConfiguration config,
IHttpClientFactory http,
CancellationToken ct)
{
var apiKey = config["OpenRouter:ApiKey"];
if (string.IsNullOrWhiteSpace(apiKey))
return Results.Problem("OpenRouter API key not configured. Add OpenRouter:ApiKey to appsettings.");
var systemPrompt =
"You are an assistant helping a software engineering team write clear, concise " +
"OPC (Online Project Communication) content — requirements, change descriptions, " +
"QA test paths, and specifications. Be direct, structured, and professional. " +
"Respond with plain text only (no markdown wrapping).";
var messages = new List<object>
{
new { role = "system", content = systemPrompt },
};
if (!string.IsNullOrWhiteSpace(req.Context))
messages.Add(new { role = "user", content = $"Context:\n{req.Context}" });
messages.Add(new { role = "user", content = req.Prompt });
var body = new
{
model = "anthropic/claude-3.5-haiku",
messages,
max_tokens = 1024,
};
var client = http.CreateClient("openrouter");
client.DefaultRequestHeaders.Authorization =
new AuthenticationHeaderValue("Bearer", apiKey);
client.DefaultRequestHeaders.Add("HTTP-Referer", "https://controlplane.clarity.internal");
client.DefaultRequestHeaders.Add("X-Title", "Clarity ControlPlane OPC");
var response = await client.PostAsync(
"https://openrouter.ai/api/v1/chat/completions",
new StringContent(JsonSerializer.Serialize(body), Encoding.UTF8, "application/json"),
ct);
if (!response.IsSuccessStatusCode)
{
var err = await response.Content.ReadAsStringAsync(ct);
return Results.Problem($"OpenRouter error {response.StatusCode}: {err}");
}
var json = await response.Content.ReadAsStringAsync(ct);
using var doc = JsonDocument.Parse(json);
var text = doc.RootElement
.GetProperty("choices")[0]
.GetProperty("message")
.GetProperty("content")
.GetString() ?? string.Empty;
return Results.Ok(new { text });
}
}
@@ -0,0 +1,65 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ProjectBuildEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web)
{
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public static IEndpointRouteBuilder MapProjectBuildEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/builds").WithTags("Builds");
group.MapGet("/projects", GetProjects);
group.MapGet("/history", GetHistory);
group.MapPost("/{projectName}", TriggerProjectBuild);
return app;
}
/// <summary>Returns the list of known projects the build monitor can track.</summary>
private static IResult GetProjects(ProjectBuildService svc) =>
Results.Ok(svc.GetProjects());
private static async Task<IResult> GetHistory(BuildHistoryService history) =>
Results.Ok(await history.GetBuildsAsync());
/// <summary>
/// Triggers a build for a named project and streams SSE output.
/// projectName must match one of the names returned by GET /api/builds/projects.
/// </summary>
private static async Task TriggerProjectBuild(
string projectName,
HttpContext ctx,
ProjectBuildService svc,
CancellationToken ct)
{
if (string.IsNullOrWhiteSpace(svc.RepoRoot))
{
ctx.Response.StatusCode = 503;
await ctx.Response.WriteAsJsonAsync(
new { error = "Docker:RepoRoot is not configured on the server." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
async Task Send(object payload)
{
var json = JsonSerializer.Serialize(payload, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
void OnLine(string line) => Send(new { line }).GetAwaiter().GetResult();
var record = await svc.BuildProjectAsync(projectName, OnLine, ct);
await Send(new { done = true, build = record });
}
}
@@ -0,0 +1,73 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class PromotionEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapPromotionEndpoints(this IEndpointRouteBuilder app)
{
var g = app.MapGroup("/api/promotions").WithTags("Promotions");
// GET /api/promotions/ladder — branch status for all 4 ladder branches
g.MapGet("/ladder", async (PromotionService svc, CancellationToken ct) =>
Results.Ok(await svc.GetLadderStatusAsync(ct)));
// GET /api/promotions/history
g.MapGet("/history", async (PromotionService svc) =>
Results.Ok(await svc.GetHistoryAsync()));
// POST /api/promotions/promote — body: { from, to, requestedBy, note }
// Streams SSE log lines then sends {done, promotion} when complete
g.MapPost("/promote", async (
HttpContext ctx,
PromotionService svc,
PromoteRequest req,
CancellationToken ct) =>
{
// Validate ladder step
var ladder = PromotionService.Ladder;
var fi = Array.IndexOf(ladder, req.From);
var ti = Array.IndexOf(ladder, req.To);
if (fi < 0 || ti < 0 || ti != fi + 1)
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = $"Invalid promotion step: {req.From} → {req.To}. Must be adjacent in ladder." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = true, SingleReader = true });
void OnLine(string line) => channel.Writer.TryWrite(line);
var promoteTask = Task.Run(() =>
svc.PromoteAsync(req.From, req.To, req.RequestedBy ?? "system", req.Note, OnLine, ct), ct)
.ContinueWith(t => channel.Writer.TryComplete(t.Exception), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
var json = JsonSerializer.Serialize(new { line }, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
var promotion = await promoteTask;
var doneJson = JsonSerializer.Serialize(new { done = true, promotion }, JsonOpts);
await ctx.Response.WriteAsync($"data: {doneJson}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
});
return app;
}
}
public record PromoteRequest(string From, string To, string? RequestedBy, string? Note);
@@ -0,0 +1,106 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Messages;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using MassTransit;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ProvisioningEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public static IEndpointRouteBuilder MapProvisioningEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/provision").WithTags("Provisioning");
group.MapPost("/", QueueProvisioningJob);
group.MapGet("/{id:guid}", GetJobStatus);
group.MapGet("/{id:guid}/stream", StreamJobEvents);
app.MapGet("/api/tenants", GetTenants).WithTags("Tenants");
return app;
}
private static async Task<IResult> QueueProvisioningJob(
ProvisioningRequest request,
Dictionary<Guid, ProvisioningJob> jobs,
IPublishEndpoint bus)
{
var job = new ProvisioningJob
{
ClientName = request.ClientName,
StateCode = request.StateCode.ToUpperInvariant(),
Subdomain = request.Subdomain,
AdminEmail = request.AdminEmail,
SiteCode = request.SiteCode,
Environment = request.Environment,
Tier = request.Tier,
Status = ProvisioningStatus.Pending
};
jobs[job.Id] = job;
await bus.Publish(new ProvisionClientCommand
{
JobId = job.Id,
ClientName = job.ClientName,
StateCode = job.StateCode,
Subdomain = job.Subdomain,
AdminEmail = job.AdminEmail,
SiteCode = job.SiteCode,
Environment = job.Environment,
Tier = job.Tier
});
return Results.Accepted($"/api/provision/{job.Id}", new { job.Id, job.Status });
}
private static IResult GetJobStatus(Guid id, Dictionary<Guid, ProvisioningJob> jobs) =>
jobs.TryGetValue(id, out var job) ? Results.Ok(job) : Results.NotFound();
private static IResult GetTenants(TenantRegistryService registry) =>
Results.Ok(registry.GetAll());
private static async Task StreamJobEvents(
Guid id,
SseEventBus bus,
Dictionary<Guid, ProvisioningJob> jobs,
HttpContext ctx,
CancellationToken cancellationToken)
{
if (!jobs.ContainsKey(id))
{
ctx.Response.StatusCode = 404;
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = bus.Subscribe(id);
try
{
await foreach (var evt in channel.Reader.ReadAllAsync(cancellationToken))
{
var json = JsonSerializer.Serialize(evt, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", cancellationToken);
await ctx.Response.Body.FlushAsync(cancellationToken);
if (evt.Type is "job_complete" or "job_failed") break;
}
}
catch (OperationCanceledException)
{
// Client disconnected (e.g. browser refresh) — not an error.
}
finally
{
bus.Unsubscribe(id, channel);
}
}
}
@@ -0,0 +1,62 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
public static class ReleaseEndpoints
{
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web)
{
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public static IEndpointRouteBuilder MapReleaseEndpoints(this IEndpointRouteBuilder app)
{
var group = app.MapGroup("/api/release").WithTags("Release");
group.MapGet("/history", GetHistory);
group.MapPost("/{env}", TriggerRelease);
return app;
}
private static async Task<IResult> GetHistory(BuildHistoryService history) =>
Results.Ok(await history.GetReleasesAsync());
/// <summary>
/// Triggers a rolling redeploy of all managed containers in the target env.
/// Streams SSE lines until release is complete.
/// Valid env values: fdev | uat | prod | all
/// </summary>
private static async Task TriggerRelease(
string env,
HttpContext ctx,
ReleaseService releases,
CancellationToken ct)
{
var valid = new[] { "fdev", "uat", "prod", "all" };
if (!valid.Contains(env, StringComparer.OrdinalIgnoreCase))
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = $"Invalid environment '{env}'. Valid: fdev, uat, prod, all." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
async Task Send(object payload)
{
var json = JsonSerializer.Serialize(payload, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
void OnLine(string line) => Send(new { line }).GetAwaiter().GetResult();
var record = await releases.ReleaseAsync(env, OnLine, ct);
await Send(new { done = true, release = record });
}
}
@@ -0,0 +1,108 @@
using Docker.DotNet;
using Docker.DotNet.Models;
using ControlPlane.Core.Services;
namespace ControlPlane.Api.Endpoints;
public static class TenantLogEndpoints
{
public static IEndpointRouteBuilder MapTenantLogEndpoints(this IEndpointRouteBuilder app)
{
app.MapGet("/api/tenants/{subdomain}/logs", StreamTenantLogs).WithTags("Tenants");
return app;
}
private static async Task StreamTenantLogs(
string subdomain,
IConfiguration config,
TenantRegistryService registry,
HttpContext ctx,
CancellationToken cancellationToken)
{
var tenant = registry.GetAll().FirstOrDefault(t => t.Subdomain == subdomain);
if (tenant is null)
{
ctx.Response.StatusCode = 404;
return;
}
var containerName = tenant.ContainerName;
if (string.IsNullOrWhiteSpace(containerName))
{
ctx.Response.StatusCode = 404;
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
var logParams = new ContainerLogsParameters
{
ShowStdout = true,
ShowStderr = true,
Follow = true,
Tail = "200",
Timestamps = true,
};
try
{
using var stream = await docker.Containers.GetContainerLogsAsync(
containerName, tty: false, logParams, cancellationToken);
// MultiplexedStream exposes CopyOutputToAsync which separates stdout/stderr
var stdoutBuf = new System.IO.MemoryStream();
var stderrBuf = new System.IO.MemoryStream();
// Stream with Follow=true won't complete until cancelled — use a pipe instead
var stdoutPipe = new System.IO.Pipelines.Pipe();
var stderrPipe = new System.IO.Pipelines.Pipe();
_ = Task.Run(async () =>
{
try
{
await stream.CopyOutputToAsync(
System.IO.Stream.Null,
stdoutPipe.Writer.AsStream(),
stderrPipe.Writer.AsStream(),
cancellationToken);
}
finally
{
stdoutPipe.Writer.Complete();
stderrPipe.Writer.Complete();
}
}, cancellationToken);
// Merge both pipes into SSE — read stdout line by line
var stdoutReader = new System.IO.StreamReader(stdoutPipe.Reader.AsStream());
var stderrReader = new System.IO.StreamReader(stderrPipe.Reader.AsStream());
var stdoutTask = ReadLinesAsync(stdoutReader, ctx, cancellationToken);
var stderrTask = ReadLinesAsync(stderrReader, ctx, cancellationToken);
await Task.WhenAll(stdoutTask, stderrTask);
}
catch (OperationCanceledException) { /* client disconnected — normal */ }
}
private static async Task ReadLinesAsync(
System.IO.StreamReader reader,
HttpContext ctx,
CancellationToken ct)
{
while (!ct.IsCancellationRequested)
{
var line = await reader.ReadLineAsync(ct);
if (line is null) break;
if (string.IsNullOrWhiteSpace(line)) continue;
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
}
}
+131
View File
@@ -0,0 +1,131 @@
using ControlPlane.Api.Consumers;
using ControlPlane.Api.Endpoints;
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using MassTransit;
using Npgsql;
var builder = WebApplication.CreateBuilder(args);
builder.AddServiceDefaults();
builder.Services.AddOpenApi();
builder.Services.ConfigureHttpJsonOptions(o =>
o.SerializerOptions.Converters.Add(new System.Text.Json.Serialization.JsonStringEnumConverter()));
// In-memory job store - swap for EF Core post-MVP
builder.Services.AddSingleton<Dictionary<Guid, ProvisioningJob>>();
// Tenant registry - reads ClientAssets/{subdomain}.xml files
builder.Services.AddSingleton<TenantRegistryService>();
// SSE event bus - ProgressConsumer writes here, SSE endpoint reads
builder.Services.AddSingleton<SseEventBus>();
// Build + release pipeline services
builder.Services.AddSingleton<BuildHistoryService>();
builder.Services.AddSingleton<ImageBuildService>();
builder.Services.AddSingleton<ReleaseService>();
builder.Services.AddSingleton<ProjectBuildService>();
builder.Services.AddSingleton<PromotionService>();
// OPC persistence (raw Npgsql)
var opcConnStr = builder.Configuration.GetConnectionString("opcdb");
if (!string.IsNullOrWhiteSpace(opcConnStr))
builder.Services.AddSingleton(NpgsqlDataSource.Create(opcConnStr));
else
builder.Services.AddSingleton(NpgsqlDataSource.Create("Host=localhost;Database=opcdb;Username=postgres;Password=controlplane-dev"));
builder.Services.AddScoped<OpcService>();
// Named HttpClient for OpenRouter AI assist proxy
builder.Services.AddHttpClient("openrouter");
// Gitea integration
builder.Services.AddHttpClient("gitea").ConfigurePrimaryHttpMessageHandler(() =>
new HttpClientHandler { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator });
builder.Services.AddScoped<GiteaService>();
builder.Services.AddMassTransit(x =>
{
x.SetKebabCaseEndpointNameFormatter();
// Receives ProvisioningProgressEvent from Worker and pushes to SSE
x.AddConsumer<ProvisioningProgressConsumer>();
x.UsingRabbitMq((ctx, cfg) =>
{
var connStr = builder.Configuration.GetConnectionString("rabbitmq");
if (!string.IsNullOrWhiteSpace(connStr))
cfg.Host(new Uri(connStr));
cfg.ConfigureEndpoints(ctx);
});
});
var app = builder.Build();
app.MapDefaultEndpoints();
if (app.Environment.IsDevelopment())
app.MapOpenApi();
app.MapProvisioningEndpoints();
app.MapTenantLogEndpoints();
app.MapImageBuildEndpoints();
app.MapReleaseEndpoints();
app.MapProjectBuildEndpoints();
app.MapGitEndpoints();
app.MapPromotionEndpoints();
app.MapOpcEndpoints();
app.MapGiteaEndpoints();
app.MapInfraEndpoints();
// Ensure OPC tables exist (idempotent — IF NOT EXISTS)
var ds = app.Services.GetRequiredService<NpgsqlDataSource>();
await using (var cmd = ds.CreateCommand("""
CREATE TABLE IF NOT EXISTS opc (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
number VARCHAR(20) NOT NULL UNIQUE,
title VARCHAR(500) NOT NULL,
description TEXT NOT NULL DEFAULT '',
type VARCHAR(50) NOT NULL DEFAULT 'General',
status VARCHAR(50) NOT NULL DEFAULT 'New',
priority VARCHAR(20) NOT NULL DEFAULT 'Medium',
assignee VARCHAR(200) NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_note (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
author VARCHAR(200) NOT NULL,
content TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_artifact (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
artifact_type VARCHAR(50) NOT NULL,
title VARCHAR(500) NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS opc_pinned_commit (
opc_id UUID NOT NULL REFERENCES opc(id) ON DELETE CASCADE,
hash VARCHAR(40) NOT NULL,
short_hash VARCHAR(10) NOT NULL DEFAULT '',
subject VARCHAR(1000) NOT NULL DEFAULT '',
author VARCHAR(200) NOT NULL DEFAULT '',
pinned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
pinned_by VARCHAR(200) NOT NULL DEFAULT '',
PRIMARY KEY (opc_id, hash)
);
CREATE INDEX IF NOT EXISTS ix_opc_number ON opc(number);
CREATE INDEX IF NOT EXISTS ix_opc_note_opc_id ON opc_note(opc_id);
CREATE INDEX IF NOT EXISTS ix_opc_artifact_opc_id ON opc_artifact(opc_id);
CREATE INDEX IF NOT EXISTS ix_opc_artifact_type ON opc_artifact(opc_id, artifact_type);
CREATE INDEX IF NOT EXISTS ix_opc_pinned_commit_opc_id ON opc_pinned_commit(opc_id);
"""))
await cmd.ExecuteNonQueryAsync();
app.Run();
@@ -0,0 +1,12 @@
{
"profiles": {
"ControlPlane.Api": {
"commandName": "Project",
"launchBrowser": true,
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development"
},
"applicationUrl": "https://localhost:7280;http://localhost:5280"
}
}
}
+216
View File
@@ -0,0 +1,216 @@
using System.Net.Http.Headers;
using System.Net.Http.Json;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using ControlPlane.Core.Models;
namespace ControlPlane.Api.Services;
/// <summary>
/// Thin wrapper around the Gitea REST API v1.
/// Configured via Gitea__BaseUrl, Gitea__Owner, and Gitea__Token in appsettings.
/// </summary>
public class GiteaService
{
private readonly HttpClient _http;
private readonly string _owner;
private readonly string _repo;
private readonly ILogger<GiteaService> _log;
private static readonly JsonSerializerOptions JsonOpts = new(JsonSerializerDefaults.Web);
public GiteaService(IHttpClientFactory factory, IConfiguration cfg, ILogger<GiteaService> log)
{
_log = log;
_owner = cfg["Gitea:Owner"] ?? "Clarity";
_repo = cfg["Gitea:Repo"] ?? "Clarity";
var baseUrl = cfg["Gitea:BaseUrl"] ?? "https://opc.clarity.test";
var token = cfg["Gitea:Token"] ?? string.Empty;
_http = factory.CreateClient("gitea");
_http.BaseAddress = new Uri(baseUrl.TrimEnd('/') + "/api/v1/");
_http.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
if (!string.IsNullOrWhiteSpace(token))
_http.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("token", token);
}
// ── Repos ─────────────────────────────────────────────────────────────────
public async Task<GiteaRepo?> GetRepoAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<GiteaRepo>($"repos/{_owner}/{_repo}", JsonOpts, ct);
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea GetRepo failed"); return null; }
}
// ── Branches ──────────────────────────────────────────────────────────────
public async Task<List<GiteaBranch>> ListBranchesAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaBranch>>(
$"repos/{_owner}/{_repo}/branches?limit=50", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListBranches failed"); return []; }
}
public async Task<GiteaBranch?> CreateBranchAsync(CreateBranchRequest req, CancellationToken ct = default)
{
// Slugify: "OPC # 0032" + title → "feature/OPC-0032-git-workflow-integration"
var slug = SlugifyTitle(req.OpcTitle);
var num = req.OpcNumber.Replace("OPC # ", "OPC-").Replace(" ", "");
var branchName = $"feature/{num}-{slug}";
var body = JsonSerializer.Serialize(new
{
new_branch_name = branchName,
old_branch_name = req.From,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/branches",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreateBranch failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaBranch>(JsonOpts, ct);
}
// ── Pull Requests ─────────────────────────────────────────────────────────
public async Task<List<GiteaPullRequest>> ListPullRequestsAsync(
string state = "open", CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaPullRequest>>(
$"repos/{_owner}/{_repo}/pulls?state={state}&limit=50", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListPRs failed"); return []; }
}
public async Task<GiteaPullRequest?> GetPullRequestAsync(long number, CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<GiteaPullRequest>(
$"repos/{_owner}/{_repo}/pulls/{number}", JsonOpts, ct);
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea GetPR failed"); return null; }
}
public async Task<GiteaPullRequest?> CreatePullRequestAsync(
CreatePullRequestRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
title = req.Title,
head = req.Head,
@base = req.Base,
body = req.Body,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/pulls",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreatePR failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaPullRequest>(JsonOpts, ct);
}
// ── Tags ──────────────────────────────────────────────────────────────────
public async Task<List<GiteaTag>> ListTagsAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaTag>>(
$"repos/{_owner}/{_repo}/tags?limit=20", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListTags failed"); return []; }
}
public async Task<GiteaTag?> CreateTagAsync(CreateTagRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
tag_name = req.TagName,
message = req.Message,
target = req.CommitSha,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/tags",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea CreateTag failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaTag>(JsonOpts, ct);
}
// ── Webhooks ──────────────────────────────────────────────────────────────
public async Task<List<GiteaWebhook>> ListWebhooksAsync(CancellationToken ct = default)
{
try
{
return await _http.GetFromJsonAsync<List<GiteaWebhook>>(
$"repos/{_owner}/{_repo}/hooks", JsonOpts, ct) ?? [];
}
catch (Exception ex) { _log.LogWarning(ex, "Gitea ListWebhooks failed"); return []; }
}
public async Task<GiteaWebhook?> RegisterWebhookAsync(
CreateWebhookRequest req, CancellationToken ct = default)
{
var body = JsonSerializer.Serialize(new
{
type = "gitea",
active = true,
config = new { url = req.TargetUrl, content_type = "json" },
events = req.Events,
}, JsonOpts);
var res = await _http.PostAsync(
$"repos/{_owner}/{_repo}/hooks",
new StringContent(body, Encoding.UTF8, "application/json"), ct);
if (!res.IsSuccessStatusCode)
{
var err = await res.Content.ReadAsStringAsync(ct);
_log.LogWarning("Gitea RegisterWebhook failed {Status}: {Error}", res.StatusCode, err);
return null;
}
return await res.Content.ReadFromJsonAsync<GiteaWebhook>(JsonOpts, ct);
}
// ── Helpers ───────────────────────────────────────────────────────────────
private static string SlugifyTitle(string title) =>
System.Text.RegularExpressions.Regex
.Replace(title.ToLowerInvariant(), @"[^a-z0-9]+", "-")
.Trim('-')[..Math.Min(40, title.Length)];
}
@@ -0,0 +1,144 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Docker.DotNet;
using Docker.DotNet.Models;
namespace ControlPlane.Api.Services;
/// <summary>
/// Drives `docker build` for the clarity-server image via the Docker SDK.
/// Streams each build log line to the provided callback so the API endpoint
/// can forward it as SSE to the control plane UI in real time.
/// Persists build history via BuildHistoryService.
/// </summary>
public class ImageBuildService(
IConfiguration config,
BuildHistoryService history,
ILogger<ImageBuildService> logger)
{
private static readonly SemaphoreSlim _lock = new(1, 1);
public bool IsBuilding => _lock.CurrentCount == 0;
public string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest";
public async Task<ImageBuildStatus> GetStatusAsync()
{
var builds = await history.GetBuildsAsync();
var last = builds.FirstOrDefault(b => b.Kind == BuildKind.DockerImage);
return new ImageBuildStatus(
last?.Target,
last?.FinishedAt,
last?.Status.ToString() ?? "Never built",
IsBuilding);
}
/// <summary>
/// Runs docker build and streams each log line to <paramref name="onLine"/>.
/// Returns true on success, false if the build failed or was already running.
/// </summary>
public async Task<bool> BuildAsync(
string repoRoot,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
onLine("⚠️ A build is already in progress.");
return false;
}
var record = await history.CreateBuildAsync(BuildKind.DockerImage, ImageName);
try
{
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
var (repo, tag) = SplitImageTag(ImageName);
var dockerfilePath = "Clarity.Server/Dockerfile";
void Log(string line) { onLine(line); record.Log.Add(line); }
Log($"▶ Building {ImageName} from {repoRoot}");
Log($" Dockerfile: {dockerfilePath}");
Log("──────────────────────────────────────");
var buildParams = new ImageBuildParameters
{
Dockerfile = dockerfilePath,
Tags = [$"{repo}:{tag}"],
Remove = true,
ForceRemove = true,
};
bool success = true;
string? errorDetail = null;
await docker.Images.BuildImageFromDockerfileAsync(
buildParams,
await CreateTarballAsync(repoRoot, ct),
authConfigs: null,
headers: null,
new Progress<JSONMessage>(msg =>
{
if (!string.IsNullOrWhiteSpace(msg.Stream))
Log(msg.Stream.TrimEnd('\n'));
if (msg.Error is not null)
{
success = false;
errorDetail = msg.Error.Message;
Log($"✖ {msg.Error.Message}");
}
}),
ct);
Log("──────────────────────────────────────");
if (success) Log($"✔ {ImageName} built successfully at {DateTimeOffset.UtcNow:u}");
else Log($"✖ Build failed: {errorDetail}");
await history.CompleteBuildAsync(record, success ? BuildStatus.Succeeded : BuildStatus.Failed);
logger.LogInformation("Image build {Result} for {Image}", success ? "succeeded" : "failed", ImageName);
return success;
}
catch (Exception ex)
{
record.Log.Add($"Exception: {ex.Message}");
await history.CompleteBuildAsync(record, BuildStatus.Failed);
onLine($"✖ Exception during build: {ex.Message}");
logger.LogError(ex, "Image build threw an exception.");
return false;
}
finally
{
_lock.Release();
}
}
/// <summary>
/// Packs the entire repo root into a tar stream for the Docker build context.
/// Respects .dockerignore if present.
/// </summary>
private static async Task<Stream> CreateTarballAsync(string repoRoot, CancellationToken ct)
{
// Use docker's own CLI to create the tarball via stdin — avoids reimplementing
// .dockerignore parsing. Fall back to a pure managed tar if CLI isn't available.
// For simplicity we use a managed approach: stream the directory as a tar.
var ms = new MemoryStream();
await Task.Run(() => TarHelper.Pack(repoRoot, ms), ct);
ms.Position = 0;
return ms;
}
private static (string repo, string tag) SplitImageTag(string image)
{
var colon = image.LastIndexOf(':');
return colon < 0 ? (image, "latest") : (image[..colon], image[(colon + 1)..]);
}
}
public record ImageBuildStatus(
string? ImageName,
DateTimeOffset? BuiltAt,
string LastMessage,
bool IsBuilding);
+297
View File
@@ -0,0 +1,297 @@
using ControlPlane.Core.Models;
using Npgsql;
namespace ControlPlane.Api.Services;
public class OpcService(NpgsqlDataSource db)
{
// ── Helpers ──────────────────────────────────────────────────────────────
private static OpcRecord ReadOpc(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetString(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetString(5),
r.GetString(6),
r.GetString(7),
r.GetDateTime(8),
r.GetDateTime(9)
);
private static OpcNote ReadNote(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetGuid(1),
r.GetString(2),
r.GetString(3),
r.GetDateTime(4)
);
private static OpcArtifact ReadArtifact(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetGuid(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetDateTime(5),
r.GetDateTime(6)
);
// ── Next OPC number ───────────────────────────────────────────────────────
public async Task<string> NextNumberAsync(CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT number FROM opc ORDER BY CAST(TRIM(SUBSTRING(number FROM 7)) AS INTEGER) DESC LIMIT 1");
var last = await cmd.ExecuteScalarAsync(ct) as string;
if (last is null) return "OPC # 0001";
if (int.TryParse(last[6..], out var n))
return $"OPC # {n + 1:D4}";
return "OPC # 0001";
}
// ── OPC CRUD ──────────────────────────────────────────────────────────────
public async Task<List<OpcRecord>> ListAsync(
string? typeFilter = null, string? statusFilter = null,
CancellationToken ct = default)
{
var sql = """
SELECT id, number, title, description, type, status, priority, assignee,
created_at, updated_at
FROM opc
WHERE ($1::text IS NULL OR type = $1)
AND ($2::text IS NULL OR status = $2)
ORDER BY created_at DESC
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(typeFilter ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(statusFilter ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcRecord>();
while (await r.ReadAsync(ct)) list.Add(ReadOpc(r));
return list;
}
public async Task<OpcRecord?> GetAsync(Guid id, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT id, number, title, description, type, status, priority, assignee, created_at, updated_at FROM opc WHERE id = $1");
cmd.Parameters.AddWithValue(id);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadOpc(r) : null;
}
public async Task<OpcRecord> CreateAsync(CreateOpcRequest req, CancellationToken ct = default)
{
var number = await NextNumberAsync(ct);
var sql = """
INSERT INTO opc (number, title, description, type, status, priority, assignee)
VALUES ($1, $2, $3, $4, 'New', $5, $6)
RETURNING id, number, title, description, type, status, priority, assignee,
created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(number);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Description);
cmd.Parameters.AddWithValue(req.Type);
cmd.Parameters.AddWithValue(req.Priority);
cmd.Parameters.AddWithValue(req.Assignee);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadOpc(r);
}
public async Task<OpcRecord?> UpdateAsync(Guid id, UpdateOpcRequest req, CancellationToken ct = default)
{
var sql = """
UPDATE opc SET
title = COALESCE($2, title),
description = COALESCE($3, description),
type = COALESCE($4, type),
status = COALESCE($5, status),
priority = COALESCE($6, priority),
assignee = COALESCE($7, assignee),
updated_at = NOW()
WHERE id = $1
RETURNING id, number, title, description, type, status, priority, assignee,
created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(id);
cmd.Parameters.AddWithValue(req.Title ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Description ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Type ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Status ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Priority ?? (object)DBNull.Value);
cmd.Parameters.AddWithValue(req.Assignee ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadOpc(r) : null;
}
public async Task<bool> DeleteAsync(Guid id, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand("DELETE FROM opc WHERE id = $1");
cmd.Parameters.AddWithValue(id);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
// ── Notes ──────────────────────────────────────────────────────────────────
public async Task<List<OpcNote>> ListNotesAsync(Guid opcId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT id, opc_id, author, content, created_at FROM opc_note WHERE opc_id = $1 ORDER BY created_at ASC");
cmd.Parameters.AddWithValue(opcId);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcNote>();
while (await r.ReadAsync(ct)) list.Add(ReadNote(r));
return list;
}
public async Task<OpcNote> AddNoteAsync(Guid opcId, AddNoteRequest req, CancellationToken ct = default)
{
var sql = """
INSERT INTO opc_note (opc_id, author, content)
VALUES ($1, $2, $3)
RETURNING id, opc_id, author, content, created_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(req.Author);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadNote(r);
}
// ── Artifacts ─────────────────────────────────────────────────────────────
public async Task<List<OpcArtifact>> ListArtifactsAsync(Guid opcId, string? artifactType = null, CancellationToken ct = default)
{
var sql = """
SELECT id, opc_id, artifact_type, title, content, created_at, updated_at
FROM opc_artifact
WHERE opc_id = $1
AND ($2::text IS NULL OR artifact_type = $2)
ORDER BY created_at ASC
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(artifactType ?? (object)DBNull.Value);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcArtifact>();
while (await r.ReadAsync(ct)) list.Add(ReadArtifact(r));
return list;
}
public async Task<OpcArtifact> UpsertArtifactAsync(Guid opcId, UpsertArtifactRequest req, CancellationToken ct = default)
{
var sql = """
INSERT INTO opc_artifact (opc_id, artifact_type, title, content)
VALUES ($1, $2, $3, $4)
ON CONFLICT DO NOTHING
RETURNING id, opc_id, artifact_type, title, content, created_at, updated_at
""";
// Simple insert; for updates use artifact id endpoint
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(req.ArtifactType);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
await r.ReadAsync(ct);
return ReadArtifact(r);
}
public async Task<OpcArtifact?> UpdateArtifactAsync(Guid artifactId, UpsertArtifactRequest req, CancellationToken ct = default)
{
var sql = """
UPDATE opc_artifact SET
title = $2,
content = $3,
updated_at = NOW()
WHERE id = $1
RETURNING id, opc_id, artifact_type, title, content, created_at, updated_at
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(artifactId);
cmd.Parameters.AddWithValue(req.Title);
cmd.Parameters.AddWithValue(req.Content);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadArtifact(r) : null;
}
public async Task<bool> DeleteArtifactAsync(Guid artifactId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand("DELETE FROM opc_artifact WHERE id = $1");
cmd.Parameters.AddWithValue(artifactId);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
// ── Pinned commits ────────────────────────────────────────────────────────
private static OpcPinnedCommit ReadPinnedCommit(NpgsqlDataReader r) => new(
r.GetGuid(0),
r.GetString(1),
r.GetString(2),
r.GetString(3),
r.GetString(4),
r.GetDateTime(5),
r.GetString(6)
);
public async Task<List<OpcPinnedCommit>> ListPinnedCommitsAsync(Guid opcId, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"SELECT opc_id, hash, short_hash, subject, author, pinned_at, pinned_by FROM opc_pinned_commit WHERE opc_id = $1 ORDER BY pinned_at DESC");
cmd.Parameters.AddWithValue(opcId);
await using var r = await cmd.ExecuteReaderAsync(ct);
var list = new List<OpcPinnedCommit>();
while (await r.ReadAsync(ct)) list.Add(ReadPinnedCommit(r));
return list;
}
public async Task<OpcPinnedCommit?> PinCommitAsync(
Guid opcId, string hash, string shortHash, string subject, string author, string pinnedBy,
CancellationToken ct = default)
{
// Verify the OPC exists
await using var existsCmd = db.CreateCommand("SELECT 1 FROM opc WHERE id = $1");
existsCmd.Parameters.AddWithValue(opcId);
var exists = await existsCmd.ExecuteScalarAsync(ct);
if (exists is null) return null;
var sql = """
INSERT INTO opc_pinned_commit (opc_id, hash, short_hash, subject, author, pinned_by)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (opc_id, hash) DO UPDATE SET
short_hash = EXCLUDED.short_hash,
subject = EXCLUDED.subject,
author = EXCLUDED.author,
pinned_by = EXCLUDED.pinned_by,
pinned_at = NOW()
RETURNING opc_id, hash, short_hash, subject, author, pinned_at, pinned_by
""";
await using var cmd = db.CreateCommand(sql);
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(hash);
cmd.Parameters.AddWithValue(shortHash);
cmd.Parameters.AddWithValue(subject);
cmd.Parameters.AddWithValue(author);
cmd.Parameters.AddWithValue(pinnedBy);
await using var r = await cmd.ExecuteReaderAsync(ct);
return await r.ReadAsync(ct) ? ReadPinnedCommit(r) : null;
}
public async Task<bool> UnpinCommitAsync(Guid opcId, string hash, CancellationToken ct = default)
{
await using var cmd = db.CreateCommand(
"DELETE FROM opc_pinned_commit WHERE opc_id = $1 AND hash = $2");
cmd.Parameters.AddWithValue(opcId);
cmd.Parameters.AddWithValue(hash);
return await cmd.ExecuteNonQueryAsync(ct) > 0;
}
}
@@ -0,0 +1,127 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
namespace ControlPlane.Api.Services;
/// <summary>
/// Runs dotnet build or npm run build for individual projects in the repo.
/// Used by the Build Monitor tab in the control plane UI.
/// </summary>
public class ProjectBuildService(
IConfiguration config,
BuildHistoryService history,
ILogger<ProjectBuildService> logger)
{
public string RepoRoot => config["Docker:RepoRoot"] ?? string.Empty;
/// <summary>Known projects in the solution, returned to the UI for the build monitor grid.</summary>
public IReadOnlyList<ProjectDefinition> GetProjects()
{
if (string.IsNullOrWhiteSpace(RepoRoot)) return [];
return
[
new("Clarity.Server", BuildKind.DotnetProject, "Clarity.Server/Clarity.Server.csproj"),
new("Clarity.ServiceDefaults", BuildKind.DotnetProject, "Clarity.ServiceDefaults/Clarity.ServiceDefaults.csproj"),
new("frontend (Clarity.Server)", BuildKind.NpmProject, "frontend"),
];
}
/// <summary>
/// Builds a single project and streams output to <paramref name="onLine"/>.
/// </summary>
public async Task<BuildRecord> BuildProjectAsync(
string projectName,
Action<string> onLine,
CancellationToken ct)
{
var projects = GetProjects();
var def = projects.FirstOrDefault(p => p.Name == projectName);
if (def is null)
{
var err = new BuildRecord { Kind = BuildKind.DotnetProject, Target = projectName, Status = BuildStatus.Failed };
err.Log.Add($"Unknown project: {projectName}");
return err;
}
var record = await history.CreateBuildAsync(def.Kind, def.RelativePath);
record.Log.Add($"▶ Building {def.Name} [{def.Kind}]");
record.Log.Add($" Path: {def.RelativePath}");
record.Log.Add("──────────────────────────────────────");
onLine($"▶ Building {def.Name}");
try
{
var (exe, args, workDir) = def.Kind == BuildKind.NpmProject
? BuildNpmCommand(def.RelativePath)
: BuildDotnetCommand(def.RelativePath);
var psi = new ProcessStartInfo(exe, args)
{
WorkingDirectory = workDir,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
using var proc = new Process { StartInfo = psi, EnableRaisingEvents = true };
void HandleLine(string? line)
{
if (line is null) return;
record.Log.Add(line);
onLine(line);
// Non-blocking fire-and-forget flush
_ = history.AppendBuildLogAsync(record, line);
}
proc.OutputDataReceived += (_, e) => HandleLine(e.Data);
proc.ErrorDataReceived += (_, e) => HandleLine(e.Data);
proc.Start();
proc.BeginOutputReadLine();
proc.BeginErrorReadLine();
await proc.WaitForExitAsync(ct);
var status = proc.ExitCode == 0 ? BuildStatus.Succeeded : BuildStatus.Failed;
var summary = proc.ExitCode == 0 ? "✔ Build succeeded." : $"✖ Build failed (exit {proc.ExitCode}).";
onLine("──────────────────────────────────────");
onLine(summary);
record.Log.Add(summary);
await history.CompleteBuildAsync(record, status);
logger.LogInformation("Project build [{Name}] {Status}", def.Name, status);
return record;
}
catch (Exception ex)
{
onLine($"✖ Exception: {ex.Message}");
record.Log.Add($"Exception: {ex.Message}");
await history.CompleteBuildAsync(record, BuildStatus.Failed);
logger.LogError(ex, "Project build [{Name}] threw.", def.Name);
return record;
}
}
private (string exe, string args, string workDir) BuildDotnetCommand(string relativePath)
{
var fullPath = Path.Combine(RepoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
return ("dotnet", $"build \"{fullPath}\" --configuration Release --nologo", RepoRoot);
}
private (string exe, string args, string workDir) BuildNpmCommand(string relativePath)
{
var workDir = Path.Combine(RepoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
// npm on Windows needs cmd /c
return (OperatingSystem.IsWindows() ? "cmd" : "sh",
OperatingSystem.IsWindows() ? "/c npm run build" : "-c \"npm run build\"",
workDir);
}
}
public record ProjectDefinition(string Name, BuildKind Kind, string RelativePath);
@@ -0,0 +1,283 @@
using ControlPlane.Core.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
using System.Text.Json;
namespace ControlPlane.Api.Services;
/// <summary>
/// Handles all git operations for the promotion workflow:
/// branch status, diff summaries, merge + push, and promotion history persistence.
/// All git commands run against the repo root configured in Docker:RepoRoot.
/// </summary>
public class PromotionService(IConfiguration config, ILogger<PromotionService> logger)
{
// The ordered promotion ladder — each step is a valid promotion.
public static readonly string[] Ladder = ["develop", "staging", "uat", "master"];
private string RepoRoot => config["Docker:RepoRoot"] ?? string.Empty;
private static readonly SemaphoreSlim _lock = new(1, 1);
private static readonly JsonSerializerOptions JsonOpts = new()
{
WriteIndented = true,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
// ── Branch status ────────────────────────────────────────────────────────
/// <summary>
/// Returns status for all ladder branches: last commit info + ahead/behind counts vs next branch.
/// </summary>
public async Task<List<BranchStatus>> GetLadderStatusAsync(CancellationToken ct = default)
{
var result = new List<BranchStatus>();
// Fetch to get up-to-date remote state, but don't fail if we're offline
await RunGitAsync("fetch --all --quiet", ct, swallowErrors: true);
foreach (var branch in Ladder)
{
var exists = await BranchExistsAsync(branch, ct);
if (!exists)
{
result.Add(new BranchStatus(branch, false, null, null, 0, 0, []));
continue;
}
// Last commit on this branch
var lastCommit = await GitOutputAsync($"log {branch} -1 --format=%h|%an|%ad|%s --date=short", ct);
string? shortHash = null, author = null, date = null, subject = null;
if (!string.IsNullOrWhiteSpace(lastCommit))
{
var p = lastCommit.Trim().Split('|', 4);
if (p.Length == 4) (shortHash, author, date, subject) = (p[0], p[1], p[2], p[3]);
}
// Ahead/behind vs the NEXT branch in the ladder
int ahead = 0, behind = 0;
var nextIdx = Array.IndexOf(Ladder, branch) + 1;
if (nextIdx < Ladder.Length)
{
var next = Ladder[nextIdx];
if (await BranchExistsAsync(next, ct))
{
var counts = await GitOutputAsync($"rev-list --left-right --count {next}...{branch}", ct);
if (!string.IsNullOrWhiteSpace(counts))
{
var parts = counts.Trim().Split('\t');
if (parts.Length == 2)
{
int.TryParse(parts[0], out behind);
int.TryParse(parts[1], out ahead);
}
}
}
}
// Unreleased commit summaries (commits in this branch not yet in next)
string[] unreleasedLines = [];
if (ahead > 0 && nextIdx < Ladder.Length && await BranchExistsAsync(Ladder[nextIdx], ct))
{
var log = await GitOutputAsync($"log {Ladder[nextIdx]}..{branch} --oneline --no-decorate", ct);
unreleasedLines = log.Split('\n', StringSplitOptions.RemoveEmptyEntries);
}
result.Add(new BranchStatus(branch, true, shortHash, $"{author} · {date} · {subject}",
ahead, behind, unreleasedLines));
}
return result;
}
// ── Promotion ────────────────────────────────────────────────────────────
/// <summary>
/// Merges <paramref name="from"/> into <paramref name="to"/> with a no-fast-forward merge commit,
/// then pushes. Streams progress lines to <paramref name="onLine"/>.
/// </summary>
public async Task<PromotionRequest> PromoteAsync(
string from,
string to,
string requestedBy,
string? note,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
var busy = new PromotionRequest { FromBranch = from, ToBranch = to, Status = PromotionStatus.Failed };
busy.Log.Add("⚠️ Another promotion is already in progress.");
return busy;
}
var req = new PromotionRequest
{
FromBranch = from,
ToBranch = to,
RequestedBy = requestedBy,
Note = note,
Status = PromotionStatus.Running,
};
void Log(string line) { req.Log.Add(line); onLine(line); }
try
{
Log($"▶ Promoting {from} → {to}");
if (!string.IsNullOrWhiteSpace(note)) Log($" Note: {note}");
Log("──────────────────────────────────────");
// 1. Fetch latest
Log(" git fetch --all");
await RunGitAsync("fetch --all --quiet", ct);
// 2. Checkout target branch
Log($" git checkout {to}");
await RunGitAsync($"checkout {to}", ct);
// 3. Pull target to latest
Log($" git pull origin {to}");
await RunGitAsync($"pull origin {to} --quiet", ct);
// 4. Count commits being promoted
var logOutput = await GitOutputAsync($"log {to}..{from} --oneline --no-decorate", ct);
var commitLines = logOutput.Split('\n', StringSplitOptions.RemoveEmptyEntries);
req.CommitCount = commitLines.Length;
req.CommitLines = commitLines;
Log($" Merging {commitLines.Length} commit(s) from {from}:");
foreach (var cl in commitLines) Log($" {cl}");
// 5. Merge with --no-ff for a clean promotion commit
var mergeMsg = $"chore: promote {from} → {to}" + (note != null ? $" — {note}" : "");
Log($" git merge --no-ff {from}");
await RunGitAsync($"merge --no-ff {from} -m \"{mergeMsg}\"", ct);
// 6. Push
Log($" git push origin {to}");
await RunGitAsync($"push origin {to}", ct);
// 7. Return to develop so the working tree stays clean
await RunGitAsync("checkout develop", ct, swallowErrors: true);
Log("──────────────────────────────────────");
Log($"✔ {from} → {to} promoted successfully at {DateTimeOffset.UtcNow:u}");
req.Status = PromotionStatus.Succeeded;
req.CompletedAt = DateTimeOffset.UtcNow;
}
catch (Exception ex)
{
Log($"✖ Promotion failed: {ex.Message}");
req.Status = PromotionStatus.Failed;
req.CompletedAt = DateTimeOffset.UtcNow;
// Try to abort any broken merge state
await RunGitAsync("merge --abort", ct, swallowErrors: true);
await RunGitAsync("checkout develop", ct, swallowErrors: true);
logger.LogError(ex, "Promotion {From}→{To} failed", from, to);
}
finally
{
await SaveAsync(req);
_lock.Release();
}
return req;
}
// ── History persistence ──────────────────────────────────────────────────
private string HistoryPath
{
get
{
var folder = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
Directory.CreateDirectory(folder);
return Path.Combine(folder, "promotions.json");
}
}
private static readonly SemaphoreSlim _fileLock = new(1, 1);
private async Task SaveAsync(PromotionRequest req)
{
await _fileLock.WaitAsync();
try
{
var all = LoadHistory();
var idx = all.FindIndex(r => r.Id == req.Id);
if (idx >= 0) all[idx] = req; else all.Insert(0, req);
if (all.Count > 100) all = all[..100];
await File.WriteAllTextAsync(HistoryPath, JsonSerializer.Serialize(all, JsonOpts));
}
finally { _fileLock.Release(); }
}
public async Task<List<PromotionRequest>> GetHistoryAsync()
{
await _fileLock.WaitAsync();
try { return LoadHistory(); }
finally { _fileLock.Release(); }
}
private List<PromotionRequest> LoadHistory()
{
if (!File.Exists(HistoryPath)) return [];
try { return JsonSerializer.Deserialize<List<PromotionRequest>>(File.ReadAllText(HistoryPath), JsonOpts) ?? []; }
catch { return []; }
}
// ── Git helpers ──────────────────────────────────────────────────────────
private async Task<bool> BranchExistsAsync(string branch, CancellationToken ct)
{
var output = await GitOutputAsync($"branch --list {branch}", ct);
return !string.IsNullOrWhiteSpace(output);
}
private async Task<string> GitOutputAsync(string args, CancellationToken ct)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var output = await proc.StandardOutput.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
return output;
}
private async Task RunGitAsync(string args, CancellationToken ct, bool swallowErrors = false)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var stderr = await proc.StandardError.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
if (!swallowErrors && proc.ExitCode != 0)
throw new InvalidOperationException($"git {args} exited {proc.ExitCode}: {stderr.Trim()}");
logger.LogDebug("git {Args} → exit {Code}", args, proc.ExitCode);
}
private ProcessStartInfo MakePsi(string args) => new("git", args)
{
WorkingDirectory = RepoRoot,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
}
/// <summary>Current status of a single branch in the promotion ladder.</summary>
public record BranchStatus(
string Branch,
bool Exists,
string? ShortHash,
string? LastCommitSummary,
int AheadOfNext, // commits this branch has that the next doesn't
int BehindNext, // commits next has that this branch doesn't (shouldn't happen in clean flow)
string[] UnreleasedLines // oneline log of the ahead commits
);
+191
View File
@@ -0,0 +1,191 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Docker.DotNet;
using Docker.DotNet.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
namespace ControlPlane.Api.Services;
/// <summary>
/// Orchestrates a release: finds all managed tenant containers matching the target
/// environment, removes each one, and restarts it from the latest clarity-server image.
/// Does NOT re-run Keycloak/Vault/DB steps — the container env vars are preserved from
/// the original provisioning and re-injected from the XML registry.
/// </summary>
public class ReleaseService(
IConfiguration config,
TenantRegistryService registry,
BuildHistoryService history,
ILogger<ReleaseService> logger)
{
private static readonly SemaphoreSlim _lock = new(1, 1);
public bool IsReleasing => _lock.CurrentCount == 0;
public string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest";
/// <summary>
/// Runs a release for the given environment and streams status lines to <paramref name="onLine"/>.
/// </summary>
public async Task<ReleaseRecord> ReleaseAsync(
string targetEnv,
Action<string> onLine,
CancellationToken ct)
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
onLine("⚠️ A release is already in progress.");
var blocked = new ReleaseRecord
{
Environment = targetEnv,
ImageName = ImageName,
Status = ReleaseStatus.Failed,
FinishedAt = DateTimeOffset.UtcNow,
};
blocked.Tenants.Add(new TenantReleaseResult
{
Subdomain = "*", ContainerName = "*",
Success = false, Error = "Release already in progress.",
});
return blocked;
}
var record = await history.CreateReleaseAsync(targetEnv, ImageName);
try
{
onLine($"▶ Release to [{targetEnv}] using {ImageName}");
onLine("──────────────────────────────────────");
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
// Find all managed tenant containers for this environment
var filterEnv = targetEnv == "all"
? new Dictionary<string, IDictionary<string, bool>>
{
["label"] = new Dictionary<string, bool> { ["clarity.managed=true"] = true },
}
: new Dictionary<string, IDictionary<string, bool>>
{
["label"] = new Dictionary<string, bool>
{
["clarity.managed=true"] = true,
[$"clarity.env={targetEnv}"] = true,
},
};
var containers = await docker.Containers.ListContainersAsync(
new ContainersListParameters { All = true, Filters = filterEnv }, ct);
if (containers.Count == 0)
{
onLine($" No managed containers found for environment [{targetEnv}].");
record.Status = ReleaseStatus.Succeeded;
record.FinishedAt = DateTimeOffset.UtcNow;
await history.UpdateReleaseAsync(record);
return record;
}
onLine($" Found {containers.Count} container(s) to redeploy.");
onLine("");
int succeeded = 0, failed = 0;
foreach (var container in containers)
{
var name = container.Names.FirstOrDefault()?.TrimStart('/') ?? container.ID[..12];
var tenantResult = new TenantReleaseResult
{
ContainerName = name,
Subdomain = container.Labels.TryGetValue("clarity.subdomain", out var sub) ? sub : name,
};
record.Tenants.Add(tenantResult);
try
{
onLine($" → {name}");
// Read env vars from existing container — preserve Keycloak/Vault/DB config
var inspect = await docker.Containers.InspectContainerAsync(container.ID, ct);
var env = inspect.Config.Env;
var labels = inspect.Config.Labels;
var network = inspect.HostConfig.NetworkMode;
// Stop and remove old container
onLine($" Stopping...");
try
{
await docker.Containers.StopContainerAsync(
container.ID, new ContainerStopParameters { WaitBeforeKillSeconds = 8 }, ct);
await docker.Containers.RemoveContainerAsync(
container.ID, new ContainerRemoveParameters { Force = true }, ct);
}
catch (Exception ex)
{
logger.LogWarning(ex, "Stop/remove failed for {Name}, forcing removal.", name);
await docker.Containers.RemoveContainerAsync(
container.ID, new ContainerRemoveParameters { Force = true }, ct);
}
// Create fresh container from latest image, preserving all env vars and labels
onLine($" Creating from {ImageName}...");
var created = await docker.Containers.CreateContainerAsync(
new CreateContainerParameters
{
Name = name,
Image = ImageName,
Env = env,
Labels = labels,
HostConfig = new HostConfig
{
NetworkMode = network,
RestartPolicy = new RestartPolicy { Name = RestartPolicyKind.UnlessStopped },
},
}, ct);
// Start it
var started = await docker.Containers.StartContainerAsync(created.ID, null, ct);
if (!started) throw new InvalidOperationException("Docker returned false for start.");
onLine($" ✔ {name} redeployed.");
tenantResult.Success = true;
succeeded++;
}
catch (Exception ex)
{
logger.LogError(ex, "Failed to redeploy {Name}.", name);
onLine($" ✖ {name} failed: {ex.Message}");
tenantResult.Success = false;
tenantResult.Error = ex.Message;
failed++;
}
await history.UpdateReleaseAsync(record);
}
record.Status = failed == 0 ? ReleaseStatus.Succeeded
: succeeded == 0 ? ReleaseStatus.Failed
: ReleaseStatus.PartialFailure;
record.FinishedAt = DateTimeOffset.UtcNow;
onLine("");
onLine("──────────────────────────────────────");
onLine($"{(record.Status == ReleaseStatus.Succeeded ? "" : "")} Release complete — {succeeded} succeeded, {failed} failed.");
}
catch (Exception ex)
{
logger.LogError(ex, "Release to [{Env}] threw an unhandled exception.", targetEnv);
record.Status = ReleaseStatus.Failed;
record.FinishedAt = DateTimeOffset.UtcNow;
onLine($"✖ Release aborted: {ex.Message}");
}
finally
{
await history.UpdateReleaseAsync(record);
_lock.Release();
}
return record;
}
}
+38
View File
@@ -0,0 +1,38 @@
using ControlPlane.Core.Messages;
using System.Collections.Concurrent;
using System.Threading.Channels;
namespace ControlPlane.Api.Services;
/// <summary>
/// Thin in-process pub/sub for SSE. MassTransit consumer writes here;
/// the SSE endpoint reads and streams to the browser.
/// </summary>
public sealed class SseEventBus
{
private readonly ConcurrentDictionary<Guid, List<Channel<ProvisioningProgressEvent>>> _subs = new();
public void Publish(ProvisioningProgressEvent evt)
{
if (!_subs.TryGetValue(evt.JobId, out var channels)) return;
lock (channels)
foreach (var ch in channels)
ch.Writer.TryWrite(evt);
}
public Channel<ProvisioningProgressEvent> Subscribe(Guid jobId)
{
var ch = Channel.CreateUnbounded<ProvisioningProgressEvent>();
_subs.GetOrAdd(jobId, _ => []).Add(ch);
return ch;
}
public void Unsubscribe(Guid jobId, Channel<ProvisioningProgressEvent> channel)
{
if (_subs.TryGetValue(jobId, out var channels))
{
lock (channels) channels.Remove(channel);
channel.Writer.TryComplete();
}
}
}
+84
View File
@@ -0,0 +1,84 @@
using System.Formats.Tar;
using System.IO.Compression;
namespace ControlPlane.Api.Services;
/// <summary>
/// Creates a gzipped tar stream from a directory, respecting .dockerignore rules.
/// Used to supply the Docker build context to the Docker SDK.
/// </summary>
internal static class TarHelper
{
private static readonly string[] DefaultIgnore =
[
".git", ".vs", ".vscode", "node_modules", "bin", "obj",
"VaultData", "*.user", "*.suo",
];
public static void Pack(string root, Stream destination)
{
var ignorePatterns = LoadDockerIgnore(root);
using var gz = new GZipStream(destination, CompressionLevel.Fastest, leaveOpen: true);
using var tar = new TarWriter(gz, TarEntryFormat.Gnu, leaveOpen: false);
foreach (var file in Directory.EnumerateFiles(root, "*", SearchOption.AllDirectories))
{
var relative = Path.GetRelativePath(root, file).Replace('\\', '/');
if (ShouldIgnore(relative, ignorePatterns))
continue;
var entry = new GnuTarEntry(TarEntryType.RegularFile, relative)
{
DataStream = File.OpenRead(file),
};
tar.WriteEntry(entry);
}
}
private static List<string> LoadDockerIgnore(string root)
{
var path = Path.Combine(root, ".dockerignore");
var patterns = new List<string>(DefaultIgnore);
if (!File.Exists(path)) return patterns;
foreach (var line in File.ReadAllLines(path))
{
var trimmed = line.Trim();
if (!string.IsNullOrEmpty(trimmed) && !trimmed.StartsWith('#'))
patterns.Add(trimmed);
}
return patterns;
}
private static bool ShouldIgnore(string relativePath, List<string> patterns)
{
var segments = relativePath.Split('/');
foreach (var pattern in patterns)
{
var p = pattern.TrimStart('/').TrimEnd('/');
// Glob suffix match (e.g. *.user)
if (p.StartsWith('*'))
{
if (relativePath.EndsWith(p[1..], StringComparison.OrdinalIgnoreCase))
return true;
continue;
}
// Exact full-path match or root-anchored prefix (e.g. .git, .vs)
if (relativePath.Equals(p, StringComparison.OrdinalIgnoreCase))
return true;
if (relativePath.StartsWith(p + "/", StringComparison.OrdinalIgnoreCase))
return true;
// Match any path segment so that nested bin/, obj/, node_modules/ etc. are caught
// regardless of which project subdirectory they live in.
if (segments.Any(seg => seg.Equals(p, StringComparison.OrdinalIgnoreCase)))
return true;
}
return false;
}
}
+21
View File
@@ -0,0 +1,21 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.AspNetCore": "Warning"
}
},
"AllowedHosts": "*",
"OpenRouter": {
"ApiKey": "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"
},
"Git": {
"RepoRoot": "C:\\Users\\amadzarak\\source\\repos\\Clarity"
},
"Gitea": {
"BaseUrl": "https://opc.clarity.test",
"Owner": "Clarity",
"Repo": "Clarity",
"Token": "2ef325f682915c5959bf6a0dc73cec7034fcd2a2"
}
}