22 Commits

Author SHA1 Message Date
amadzarak 2badb5264b OPC # 0006: OPC Git Trunk-Based management 2026-04-26 14:40:05 -04:00
amadzarak bb0c6e08c7 OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 14:30:10 -04:00
amadzarak 5e969a2b3e OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 13:45:05 -04:00
amadzarak 571f0bf2a4 OPC # 0006: OPC Git Trunk-Based management 2026-04-26 13:14:06 -04:00
amadzarak b9f0f6dd5f OPC # 0007: Patch FDEV provisioning for local aspire development
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 12:48:07 -04:00
amadzarak e8ac7b017c OPC # 0001: Diff viewer improvements 2026-04-26 11:54:50 -04:00
amadzarak 79c69e1363 OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 11:54:24 -04:00
amadzarak 553ea59d39 OPC # 0006: OPC Git Trunk-Based management 2026-04-26 11:32:23 -04:00
amadzarak 9ff1488bb5 OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 11:25:38 -04:00
amadzarak 6396fc8cc5 OPC # 0006: OPC Git Trunk-Based management 2026-04-26 11:09:15 -04:00
amadzarak b26cc1c0b6 OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 00:38:10 -04:00
amadzarak db025cce01 OPC # 0006: OPC Git Trunk-Based management
Co-authored-by: Copilot <copilot@github.com>
2026-04-26 00:26:56 -04:00
amadzarak 885ad47abe OPC # 0002: Improvements to Client provisioning workflows
Co-authored-by: Copilot <copilot@github.com>
2026-04-25 22:59:50 -04:00
amadzarak 7e360749b9 OPC # 0003: Move Gitea out of OPC Aspire Orchestration 2026-04-25 22:51:03 -04:00
amadzarak 5009f6e688 OPC # 0002: Fix issue from docker root directory change
Co-authored-by: Copilot <copilot@github.com>
2026-04-25 22:36:53 -04:00
amadzarak a27febdd55 OPC # 0002: Fix BOM issue in nginx.conf and dnsmasq.conf 2026-04-25 22:27:20 -04:00
amadzarak 80050cbedb OPC # 0002: Improvements to Client provisioning workflows 2026-04-25 22:24:40 -04:00
amadzarak deb160cdbc Update .gitignore 2026-04-25 22:23:48 -04:00
amadzarak e340b42223 OPC # 0002: Improvements to Client provisioning workflows 2026-04-25 21:58:44 -04:00
amadzarak 378daf98d6 OPC # 0002: Improvements to Client provisioning workflows
Co-authored-by: Copilot <copilot@github.com>
2026-04-25 21:57:42 -04:00
amadzarak 35fe82d225 OPC # 0002: Improvements to Client provisioning workflows 2026-04-25 21:33:28 -04:00
amadzarak 98049f3c50 OPC # 0002: Improvements to Client provisioning workflows 2026-04-25 20:56:32 -04:00
103 changed files with 4869 additions and 1123 deletions
+4
View File
@@ -369,3 +369,7 @@ ClientAssets/
infra/vault/data/
infra/vault/data/init.json
opc_export.sql
# TLS certs & keys generated locally — never commit private keys
infra/nginx/*.key
infra/nginx/*.crt
@@ -1,5 +1,8 @@
using ControlPlane.Api.Services;
using System.Diagnostics;
using System.Text.Json;
using System.Text.RegularExpressions;
using ControlPlane.Api.Services;
using ControlPlane.Core.Services;
namespace ControlPlane.Api.Endpoints;
@@ -12,8 +15,14 @@ public static class ImageBuildEndpoints
var group = app.MapGroup("/api/image").WithTags("Image");
group.MapGet("/status", GetStatus);
group.MapGet("/history", GetHistory);
group.MapPost("/build", TriggerBuild);
// Post-provisioning verification helpers
group.MapGet ("/verify/extra-hosts/{containerName}", GetExtraHosts);
group.MapPost("/verify/dns-test", DnsTest);
group.MapGet ("/artifact/{subdomain}", GetArtifact);
return app;
}
@@ -21,6 +30,26 @@ public static class ImageBuildEndpoints
private static async Task<IResult> GetStatus(ImageBuildService svc) =>
Results.Ok(await svc.GetStatusAsync());
/// <summary>Returns recent DockerImage build records for the sparkline chart.</summary>
private static async Task<IResult> GetHistory(BuildHistoryService history, int limit = 30)
{
var all = await history.GetBuildsAsync();
var records = all
.Where(b => b.Kind == ControlPlane.Core.Models.BuildKind.DockerImage)
.Take(Math.Clamp(limit, 1, 100))
.Select(b => new
{
b.Id,
b.Status,
b.StartedAt,
b.DurationMs,
b.CommitSha,
b.ImageDigest,
})
.ToList();
return Results.Ok(records);
}
/// <summary>
/// Triggers a docker build and streams the output line-by-line as SSE.
/// The build context is the repo root, which must be configured via
@@ -72,4 +101,118 @@ public static class ImageBuildEndpoints
await ctx.Response.WriteAsync("data: {\"done\":true}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
// ── Post-provisioning verification endpoints ──────────────────────────────
private static readonly Regex SafeContainerName = new(@"^[a-zA-Z0-9_.\-]+$", RegexOptions.Compiled);
/// <summary>
/// Returns the ExtraHosts list for a running tenant container.
/// e.g. GET /api/image/verify/extra-hosts/fdev-app-clarity-01000001
/// </summary>
private static async Task<IResult> GetExtraHosts(string containerName)
{
if (!SafeContainerName.IsMatch(containerName))
return Results.BadRequest(new { error = "Invalid container name." });
var (code, output) = await DockerRunAsync($"inspect --format {{{{json .HostConfig.ExtraHosts}}}} {containerName}");
if (code != 0 || string.IsNullOrWhiteSpace(output))
return Results.NotFound(new { error = $"Container '{containerName}' not found or not running.", detail = output });
try
{
var hosts = JsonDocument.Parse(output.Trim()).RootElement;
return Results.Ok(new { containerName, extraHosts = hosts });
}
catch
{
return Results.Ok(new { containerName, extraHosts = (object?)null, raw = output.Trim() });
}
}
/// <summary>
/// Runs <c>curl</c> from inside the container to verify *.clarity.test DNS resolves through nginx.
/// POST /api/image/verify/dns-test body: { container, url }
/// </summary>
private static async Task<IResult> DnsTest(DnsTestRequest body)
{
if (!SafeContainerName.IsMatch(body.Container))
return Results.BadRequest(new { error = "Invalid container name." });
// Only allow http/https URLs — prevents command injection via url field
if (!body.Url.StartsWith("http://", StringComparison.OrdinalIgnoreCase) &&
!body.Url.StartsWith("https://", StringComparison.OrdinalIgnoreCase))
return Results.BadRequest(new { error = "URL must start with http:// or https://." });
var psi = new ProcessStartInfo("docker",
$"exec {body.Container} curl -sf --max-time 10 --write-out \"\\nHTTP %{{http_code}}\" {body.Url}")
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
using var proc = Process.Start(psi);
if (proc is null) return Results.Problem("Failed to start docker process.");
var stdout = await proc.StandardOutput.ReadToEndAsync();
var stderr = await proc.StandardError.ReadToEndAsync();
await proc.WaitForExitAsync();
return Results.Ok(new
{
success = proc.ExitCode == 0,
exitCode = proc.ExitCode,
output = stdout.Trim(),
error = stderr.Trim(),
});
}
/// <summary>
/// Reads the generated docker-compose.yml from ClientAssets/{subdomain}/.
/// GET /api/image/artifact/{subdomain}
/// </summary>
private static async Task<IResult> GetArtifact(string subdomain, IConfiguration config)
{
// Restrict subdomain to safe characters — prevents path traversal
if (!SafeContainerName.IsMatch(subdomain))
return Results.BadRequest(new { error = "Invalid subdomain." });
var root = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
// Use Path.GetFileName to ensure no directory traversal
var safeSubdomain = Path.GetFileName(subdomain);
var composePath = Path.GetFullPath(Path.Combine(root, safeSubdomain, "docker-compose.yml"));
// Verify the final path is still inside the ClientAssets root
if (!composePath.StartsWith(Path.GetFullPath(root), StringComparison.OrdinalIgnoreCase))
return Results.BadRequest(new { error = "Invalid subdomain path." });
if (!File.Exists(composePath))
return Results.NotFound(new { error = $"No compose artifact found for '{subdomain}'." });
var content = await File.ReadAllTextAsync(composePath);
return Results.Ok(new { subdomain, path = composePath, content });
}
private static async Task<(int code, string? output)> DockerRunAsync(string args)
{
var psi = new ProcessStartInfo("docker", args)
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
using var proc = Process.Start(psi);
if (proc is null) return (-1, null);
var output = await proc.StandardOutput.ReadToEndAsync();
var err = await proc.StandardError.ReadToEndAsync();
await proc.WaitForExitAsync();
return (proc.ExitCode, string.IsNullOrWhiteSpace(output) ? err : output);
}
private record DnsTestRequest(string Container, string Url);
}
+48 -3
View File
@@ -15,6 +15,8 @@ public static class InfraEndpoints
g.MapPost("/{container}/stop", (string container) => ServiceAction(container, "stop"));
g.MapPost("/{container}/restart",(string container) => ServiceAction(container, "restart"));
g.MapGet ("/compose/up/stream", ComposeUpStream);
g.MapGet ("/compose/up-force/stream", ComposeUpForceStream);
g.MapGet ("/compose/nuke/stream", ComposeNukeStream);
g.MapGet ("/compose/down/stream", ComposeDownStream);
return app;
@@ -121,20 +123,58 @@ public static class InfraEndpoints
: Results.Problem(output ?? "Docker command failed", statusCode: 500);
}
// Starts all platform services; --remove-orphans cleans up containers with stale names
// (e.g. a leftover clarity-dnsmasq that causes the "name already in use" conflict).
private static Task ComposeUpStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "up --pull missing", ct);
StreamComposeOutput(ctx, config, "up -d --remove-orphans", ct);
// Force-recreates every container regardless of config drift — use after image or compose changes.
private static Task ComposeUpForceStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "up -d --force-recreate --remove-orphans", ct);
// Nuke: force-removes every known platform container by name first (kills orphans that
// --remove-orphans won't touch because they belong to a different compose project),
// then runs a fresh compose up.
private static async Task ComposeNukeStream(HttpContext ctx, IConfiguration config, CancellationToken ct)
{
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
async Task Send(string line)
{
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
await Send("▶ Removing all known platform containers…");
foreach (var container in PlatformContainers)
{
var (code, _) = await DockerAsync($"rm -f {container}");
await Send(code == 0
? $" ✔ removed {container}"
: $" · {container} not found (skipped)");
}
await Send("▶ Running compose up…");
await StreamComposeOutput(ctx, config, "up -d", ct, skipHeaders: true);
}
private static Task ComposeDownStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
StreamComposeOutput(ctx, config, "down", ct);
private static async Task StreamComposeOutput(
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct)
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct,
bool skipHeaders = false)
{
var infraDir = ResolveInfraPath(config);
if (!skipHeaders)
{
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
}
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = false, SingleReader = true });
@@ -186,9 +226,14 @@ public static class InfraEndpoints
private static string ResolveInfraPath(IConfiguration config)
{
var configured = config["Infra:Path"];
if (!string.IsNullOrWhiteSpace(configured))
return Path.GetFullPath(configured);
// Docker:RepoRoot is ClarityStack/ root — infra lives under OPC/
var repoRoot = config["Docker:RepoRoot"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "..", "..", ".."));
return Path.GetFullPath(Path.Combine(repoRoot, "infra"));
return Path.GetFullPath(Path.Combine(repoRoot, "OPC", "infra"));
}
private static Task<(int Code, string? Output)> DockerAsync(string args) =>
@@ -1,5 +1,6 @@
using ControlPlane.Api.Services;
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using System.Text.Json;
namespace ControlPlane.Api.Endpoints;
@@ -12,9 +13,9 @@ public static class PromotionEndpoints
{
var g = app.MapGroup("/api/promotions").WithTags("Promotions");
// GET /api/promotions/ladder — branch status for all 4 ladder branches
g.MapGet("/ladder", async (PromotionService svc, CancellationToken ct) =>
Results.Ok(await svc.GetLadderStatusAsync(ct)));
// GET /api/promotions/ladder?repo=Clarity — branch status for all 4 ladder branches
g.MapGet("/ladder", async (PromotionService svc, CancellationToken ct, string repo = "Clarity") =>
Results.Ok(await svc.GetLadderStatusAsync(repo, ct)));
// GET /api/promotions/history
g.MapGet("/history", async (PromotionService svc) =>
@@ -50,7 +51,7 @@ public static class PromotionEndpoints
void OnLine(string line) => channel.Writer.TryWrite(line);
var promoteTask = Task.Run(() =>
svc.PromoteAsync(req.From, req.To, req.RequestedBy ?? "system", req.Note, OnLine, ct), ct)
svc.PromoteAsync(req.From, req.To, req.RequestedBy ?? "system", req.Note, OnLine, ct, req.Repo ?? "Clarity"), ct)
.ContinueWith(t => channel.Writer.TryComplete(t.Exception), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
@@ -66,8 +67,144 @@ public static class PromotionEndpoints
await ctx.Response.Body.FlushAsync(ct);
});
// POST /api/promotions/reset — body: { branch, toSha, repo }
// Force-resets a downstream branch to a specific SHA (e.g. to recover from a GitFlow merge commit).
// Only allowed for staging/uat — never develop or main.
g.MapPost("/reset", async (PromotionService svc, ResetBranchRequest req, CancellationToken ct) =>
{
var allowed = new[] { "staging", "uat" };
if (!allowed.Contains(req.Branch))
return Results.BadRequest(new { error = $"Reset is only allowed for: {string.Join(", ", allowed)}." });
try
{
await svc.ResetBranchAsync(req.Branch, req.ToSha, req.Repo ?? "Clarity", ct);
return Results.Ok(new { reset = req.Branch, toSha = req.ToSha });
}
catch (Exception ex)
{
return Results.BadRequest(new { error = ex.Message });
}
});
// POST /api/promotions/cherry-pick — body: { shas, from, to, requestedBy, note, repo }
// Streams SSE log lines then sends {done, promotion} when complete.
// Unlike a full promote, cherry-pick applies selected commits as copies — branches will diverge.
g.MapPost("/cherry-pick", async (
HttpContext ctx,
PromotionService svc,
CherryPickRequest req,
CancellationToken ct) =>
{
var ladder = PromotionService.Ladder;
var fi = Array.IndexOf(ladder, req.From);
var ti = Array.IndexOf(ladder, req.To);
if (fi < 0 || ti < 0 || ti != fi + 1)
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = $"Invalid cherry-pick target: {req.From} → {req.To}. Must be adjacent in ladder." }, ct);
return;
}
if (req.Shas is null || req.Shas.Length == 0)
{
ctx.Response.StatusCode = 400;
await ctx.Response.WriteAsJsonAsync(
new { error = "No commits specified for cherry-pick." }, ct);
return;
}
ctx.Response.Headers.ContentType = "text/event-stream";
ctx.Response.Headers.CacheControl = "no-cache";
ctx.Response.Headers.Connection = "keep-alive";
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = true, SingleReader = true });
void OnLine(string line) => channel.Writer.TryWrite(line);
var cpTask = Task.Run(() =>
svc.CherryPickAsync(req.Shas, req.From, req.To, req.RequestedBy ?? "system", req.Note, OnLine, ct, req.Repo ?? "Clarity"), ct)
.ContinueWith(t => channel.Writer.TryComplete(t.Exception), TaskScheduler.Default);
await foreach (var line in channel.Reader.ReadAllAsync(ct))
{
var json = JsonSerializer.Serialize(new { line }, JsonOpts);
await ctx.Response.WriteAsync($"data: {json}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
}
var promotion = await cpTask;
var doneJson = JsonSerializer.Serialize(new { done = true, promotion }, JsonOpts);
await ctx.Response.WriteAsync($"data: {doneJson}\n\n", ct);
await ctx.Response.Body.FlushAsync(ct);
});
// GET /api/promotions/conformance?repo=Clarity
// Returns a full TBD conformance report: which branches are diverged, missing, or stale.
g.MapGet("/conformance", async (PromotionService svc, CancellationToken ct, string repo = "Clarity") =>
Results.Ok(await svc.GetConformanceAsync(repo, ct)));
// GET /api/promotions/conformance/all
// Returns conformance reports for all configured repos (Clarity, OPC, Gateway).
g.MapGet("/conformance/all", async (PromotionService svc, IConfiguration config, CancellationToken ct) =>
{
var allRepos = new[] { "Clarity", "OPC", "Gateway" };
var configured = allRepos
.Where(r => !string.IsNullOrWhiteSpace(config[$"Git:Repos:{r}"]))
.ToArray();
var tasks = configured.Select(r => svc.GetConformanceAsync(r, ct));
var results = await Task.WhenAll(tasks);
return Results.Ok(results);
});
// POST /api/promotions/create-branch — body: { branch, fromSha, repo }
// Creates a missing ladder branch at the given SHA and pushes to origin.
g.MapPost("/create-branch", async (PromotionService svc, CreateLadderBranchRequest req, CancellationToken ct) =>
{
var allowed = new[] { "staging", "uat", "main" };
if (!allowed.Contains(req.Branch))
return Results.BadRequest(new { error = $"Create-branch is only allowed for: {string.Join(", ", allowed)}." });
try
{
await svc.CreateBranchAsync(req.Branch, req.FromSha, req.Repo ?? "Clarity", ct);
return Results.Ok(new { created = req.Branch, fromSha = req.FromSha });
}
catch (Exception ex)
{
return Results.BadRequest(new { error = ex.Message });
}
});
// GET /api/promotions/build-gate?sha={sha}
// Returns the build-gate status for the given commit SHA.
// If status is "Red", the promote button in the UI should be disabled.
g.MapGet("/build-gate", async (string sha, BuildHistoryService history, CancellationToken ct) =>
{
var builds = await history.GetBuildsByShaAsync(sha);
var latest = builds.MaxBy(b => b.StartedAt);
if (latest is null)
return Results.Ok(new { status = "Unknown", sha, buildId = (string?)null, buildStatus = (string?)null });
var gateStatus = latest.Status switch
{
BuildStatus.Succeeded => "Green",
BuildStatus.Failed => "Red",
BuildStatus.Running => "Running",
_ => "Unknown",
};
return Results.Ok(new { status = gateStatus, sha, buildId = latest.Id, buildStatus = latest.Status.ToString() });
});
return app;
}
}
public record PromoteRequest(string From, string To, string? RequestedBy, string? Note);
public record PromoteRequest(string From, string To, string? RequestedBy, string? Note, string? Repo);
public record ResetBranchRequest(string Branch, string ToSha, string? Repo);
public record CherryPickRequest(string[] Shas, string From, string To, string? RequestedBy, string? Note, string? Repo);
public record CreateLadderBranchRequest(string Branch, string FromSha, string? Repo);
+44 -2
View File
@@ -32,9 +32,10 @@ builder.Services.AddSingleton<PromotionService>();
// OPC persistence (raw Npgsql)
var opcConnStr = builder.Configuration.GetConnectionString("opcdb");
if (!string.IsNullOrWhiteSpace(opcConnStr))
builder.Services.AddSingleton(NpgsqlDataSource.Create(opcConnStr));
// Replace 'localhost' with '127.0.0.1' to avoid Npgsql trying [::1] first on Windows
builder.Services.AddSingleton(NpgsqlDataSource.Create(opcConnStr.Replace("localhost", "127.0.0.1")));
else
builder.Services.AddSingleton(NpgsqlDataSource.Create("Host=localhost;Database=opcdb;Username=postgres;Password=controlplane-dev"));
builder.Services.AddSingleton(NpgsqlDataSource.Create("Host=127.0.0.1;Port=5433;Database=opcdb;Username=postgres;Password=controlplane-dev"));
builder.Services.AddScoped<OpcService>();
// Named HttpClient for OpenRouter AI assist proxy
@@ -125,7 +126,48 @@ await using (var cmd = ds.CreateCommand("""
CREATE INDEX IF NOT EXISTS ix_opc_artifact_opc_id ON opc_artifact(opc_id);
CREATE INDEX IF NOT EXISTS ix_opc_artifact_type ON opc_artifact(opc_id, artifact_type);
CREATE INDEX IF NOT EXISTS ix_opc_pinned_commit_opc_id ON opc_pinned_commit(opc_id);
-- Build + Release history
CREATE TABLE IF NOT EXISTS build_record (
id VARCHAR(8) PRIMARY KEY,
kind VARCHAR(20) NOT NULL,
target VARCHAR(500) NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'Running',
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
finished_at TIMESTAMPTZ,
duration_ms INTEGER,
image_digest VARCHAR(200),
commit_sha VARCHAR(40),
log TEXT NOT NULL DEFAULT ''
);
CREATE TABLE IF NOT EXISTS release_record (
id VARCHAR(8) PRIMARY KEY,
environment VARCHAR(50) NOT NULL,
image_name VARCHAR(200) NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'Running',
started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
finished_at TIMESTAMPTZ
);
CREATE TABLE IF NOT EXISTS release_tenant_result (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
release_id VARCHAR(8) NOT NULL REFERENCES release_record(id) ON DELETE CASCADE,
subdomain VARCHAR(200) NOT NULL,
container_name VARCHAR(200) NOT NULL,
success BOOLEAN NOT NULL DEFAULT FALSE,
error TEXT
);
CREATE INDEX IF NOT EXISTS ix_build_record_started_at ON build_record(started_at DESC);
CREATE INDEX IF NOT EXISTS ix_build_record_kind ON build_record(kind);
CREATE INDEX IF NOT EXISTS ix_release_record_started_at ON release_record(started_at DESC);
CREATE INDEX IF NOT EXISTS ix_release_tenant_release_id ON release_tenant_result(release_id);
"""))
await cmd.ExecuteNonQueryAsync();
// Idempotent column additions for schema migrations
await using (var migCmd = ds.CreateCommand("""
ALTER TABLE release_record ADD COLUMN IF NOT EXISTS opc_numbers TEXT[] NOT NULL DEFAULT '{}';
ALTER TABLE release_record ADD COLUMN IF NOT EXISTS commit_sha VARCHAR(40);
"""))
await migCmd.ExecuteNonQueryAsync();
app.Run();
+10 -1
View File
@@ -2,6 +2,7 @@ using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using Docker.DotNet;
using Docker.DotNet.Models;
using LibGit2Sharp;
namespace ControlPlane.Api.Services;
@@ -49,13 +50,21 @@ public class ImageBuildService(
var record = await history.CreateBuildAsync(BuildKind.DockerImage, ImageName);
// Capture HEAD SHA so the build is traceable back to a specific commit
try
{
using var repo = new Repository(repoRoot);
record.CommitSha = repo.Head.Tip?.Sha;
}
catch { /* not a git repo or no commits yet — CommitSha stays null */ }
try
{
var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine";
using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient();
var (repo, tag) = SplitImageTag(ImageName);
var dockerfilePath = "Clarity.Server/Dockerfile";
var dockerfilePath = "Clarity/Clarity.Server/Dockerfile";
void Log(string line) { onLine(line); record.Log.Add(line); }
@@ -1,5 +1,6 @@
using ControlPlane.Core.Models;
using ControlPlane.Core.Services;
using LibGit2Sharp;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
@@ -24,9 +25,14 @@ public class ProjectBuildService(
return
[
new("Clarity.Server", BuildKind.DotnetProject, "Clarity.Server/Clarity.Server.csproj"),
new("Clarity.ServiceDefaults", BuildKind.DotnetProject, "Clarity.ServiceDefaults/Clarity.ServiceDefaults.csproj"),
new("frontend (Clarity.Server)", BuildKind.NpmProject, "frontend"),
// ── Solution-level builds (primary targets) ──────────────────────
new("Clarity Solution", BuildKind.SolutionBuild, "Clarity/Clarity.slnx"),
new("ControlPlane Solution", BuildKind.SolutionBuild, "OPC/ControlPlane.slnx"),
// ── Individual Clarity projects ───────────────────────────────────
new("Clarity.Server", BuildKind.DotnetProject, "Clarity/Clarity.Server/Clarity.Server.csproj"),
new("Clarity.ServiceDefaults", BuildKind.DotnetProject, "Clarity/Clarity.ServiceDefaults/Clarity.ServiceDefaults.csproj"),
new("frontend (Clarity.Server)", BuildKind.NpmProject, "Clarity/frontend"),
];
}
@@ -53,6 +59,16 @@ public class ProjectBuildService(
record.Log.Add("──────────────────────────────────────");
onLine($"▶ Building {def.Name}");
// Capture HEAD SHA so the build is traceable to a specific commit
try
{
using var gitRepo = new Repository(RepoRoot);
record.CommitSha = gitRepo.Head.Tip?.Sha;
if (record.CommitSha is not null)
record.Log.Add($" Commit: {record.CommitSha[..8]}");
}
catch { /* not a git repo or no commits yet */ }
try
{
var (exe, args, workDir) = def.Kind == BuildKind.NpmProject
+704 -124
View File
@@ -1,22 +1,25 @@
using ControlPlane.Core.Models;
using LibGit2Sharp;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using System.Diagnostics;
using System.Text.Json;
namespace ControlPlane.Api.Services;
/// <summary>
/// Handles all git operations for the promotion workflow:
/// branch status, diff summaries, merge + push, and promotion history persistence.
/// All git commands run against the repo root configured in Docker:RepoRoot.
/// Handles all git operations for the promotion workflow using LibGit2Sharp.
/// No git.exe subprocess is ever spawned — all operations run through the managed
/// LibGit2Sharp API against the server's authoritative repository clone.
/// HEAD is never mutated; merges are performed directly on the object database
/// so the working tree always reflects the develop branch.
/// </summary>
public class PromotionService(IConfiguration config, ILogger<PromotionService> logger)
{
// The ordered promotion ladder — each step is a valid promotion.
public static readonly string[] Ladder = ["develop", "staging", "uat", "master"];
// The ordered promotion ladder — develop is trunk, main is production.
public static readonly string[] Ladder = ["develop", "staging", "uat", "main"];
private string RepoRoot => config["Docker:RepoRoot"] ?? string.Empty;
private string GetRepoPath(string repoName) =>
config[$"Git:Repos:{repoName}"] ?? string.Empty;
private static readonly SemaphoreSlim _lock = new(1, 1);
private static readonly JsonSerializerOptions JsonOpts = new()
@@ -26,67 +29,147 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
// ── Credentials ──────────────────────────────────────────────────────────
private FetchOptions MakeFetchOptions() => new()
{
CredentialsProvider = (_, _, _) => new UsernamePasswordCredentials
{
Username = config["Gitea:Owner"] ?? "git",
Password = config["Gitea:Token"] ?? string.Empty,
},
};
private PushOptions MakePushOptions() => new()
{
CredentialsProvider = (_, _, _) => new UsernamePasswordCredentials
{
Username = config["Gitea:Owner"] ?? "git",
Password = config["Gitea:Token"] ?? string.Empty,
},
};
private static Signature MakeSig() =>
new("OPC Control Plane", "opc@clarity.internal", DateTimeOffset.UtcNow);
// ── Remote URL (config-driven, never reads .git/config URL) ──────────────
/// <summary>
/// Builds the HTTPS remote URL for a named repo entirely from Gitea config.
/// The local clone's .git/config remote URL is irrelevant — this is the authority.
/// </summary>
private string GetRemoteUrl(string repoName)
{
var baseUrl = (config["Gitea:BaseUrl"]
?? throw new InvalidOperationException("Gitea:BaseUrl is not configured.")).TrimEnd('/');
var owner = config[$"Gitea:Repos:{repoName}:Owner"] ?? config["Gitea:Owner"]
?? throw new InvalidOperationException($"Gitea owner not configured for '{repoName}'.");
var repoSlug = config[$"Gitea:Repos:{repoName}:Repo"] ?? repoName;
return $"{baseUrl}/{owner}/{repoSlug}.git";
}
/// <summary>
/// Returns the 'origin' remote after normalising its URL to the config-driven HTTPS URL.
/// If the clone was checked out with SSH (e.g. on a dev machine), this corrects it silently
/// so that LibGit2Sharp — which has no SSH support — always uses HTTPS.
/// </summary>
private Remote EnsureRemote(Repository repo, string repoName)
{
var url = GetRemoteUrl(repoName);
var remote = repo.Network.Remotes["origin"];
if (remote is null)
return repo.Network.Remotes.Add("origin", url);
if (remote.Url != url)
repo.Network.Remotes.Update("origin", r => r.Url = url);
return repo.Network.Remotes["origin"]!;
}
// ── Branch status ────────────────────────────────────────────────────────
/// <summary>
/// Returns status for all ladder branches: last commit info + ahead/behind counts vs next branch.
/// Runs on a thread-pool thread because LibGit2Sharp network operations are synchronous.
/// </summary>
public async Task<List<BranchStatus>> GetLadderStatusAsync(CancellationToken ct = default)
public Task<List<BranchStatus>> GetLadderStatusAsync(string repoName = "Clarity", CancellationToken ct = default) =>
Task.Run(() => GetLadderStatusCore(repoName, ct), ct);
private List<BranchStatus> GetLadderStatusCore(string repoName, CancellationToken ct)
{
var repoPath = GetRepoPath(repoName);
if (string.IsNullOrWhiteSpace(repoPath) || !Directory.Exists(repoPath))
return Ladder.Select(b => new BranchStatus(b, false, null, null, 0, 0, [])).ToList();
using var repo = new Repository(repoPath);
// Fetch to get up-to-date remote refs; swallow network errors so status still works offline.
try
{
var remote = EnsureRemote(repo, repoName);
var refSpecs = remote.FetchRefSpecs.Select(r => r.Specification).ToList();
repo.Network.Fetch(remote.Name, refSpecs, MakeFetchOptions());
}
catch (Exception ex)
{
logger.LogWarning(ex, "Fetch during ladder status failed — continuing with cached refs");
}
var result = new List<BranchStatus>();
// Fetch to get up-to-date remote state, but don't fail if we're offline
await RunGitAsync("fetch --all --quiet", ct, swallowErrors: true);
for (var i = 0; i < Ladder.Length; i++)
{
ct.ThrowIfCancellationRequested();
foreach (var branch in Ladder)
var branchName = Ladder[i];
// Always read from the remote tracking ref so the status reflects what is on origin,
// not the server's potentially-stale local branch pointer.
var branch = repo.Branches[$"origin/{branchName}"];
if (branch?.Tip is null)
{
var exists = await BranchExistsAsync(branch, ct);
if (!exists)
{
result.Add(new BranchStatus(branch, false, null, null, 0, 0, []));
result.Add(new BranchStatus(branchName, false, null, null, 0, 0, []));
continue;
}
// Last commit on this branch
var lastCommit = await GitOutputAsync($"log {branch} -1 --format=%h|%an|%ad|%s --date=short", ct);
string? shortHash = null, author = null, date = null, subject = null;
if (!string.IsNullOrWhiteSpace(lastCommit))
{
var p = lastCommit.Trim().Split('|', 4);
if (p.Length == 4) (shortHash, author, date, subject) = (p[0], p[1], p[2], p[3]);
}
var tip = branch.Tip;
var when = tip.Author.When;
var summary = $"{tip.Author.Name} · {when:yyyy-MM-dd} · {tip.MessageShort}";
// Ahead/behind vs the NEXT branch in the ladder
int ahead = 0, behind = 0;
var nextIdx = Array.IndexOf(Ladder, branch) + 1;
if (nextIdx < Ladder.Length)
// Ahead/behind vs the next branch in the ladder
int ahead = 0;
int behind = 0;
CommitInfo[] unreleasedCommits = [];
if (i + 1 < Ladder.Length)
{
var next = Ladder[nextIdx];
if (await BranchExistsAsync(next, ct))
var nextBranch = repo.Branches[$"origin/{Ladder[i + 1]}"];
if (nextBranch?.Tip is not null)
{
var counts = await GitOutputAsync($"rev-list --left-right --count {next}...{branch}", ct);
if (!string.IsNullOrWhiteSpace(counts))
var div = repo.ObjectDatabase.CalculateHistoryDivergence(tip, nextBranch.Tip);
ahead = div.AheadBy ?? 0;
behind = div.BehindBy ?? 0;
if (ahead > 0)
{
var parts = counts.Trim().Split('\t');
if (parts.Length == 2)
unreleasedCommits = repo.Commits
.QueryBy(new CommitFilter
{
int.TryParse(parts[0], out behind);
int.TryParse(parts[1], out ahead);
}
IncludeReachableFrom = tip,
ExcludeReachableFrom = nextBranch.Tip,
SortBy = CommitSortStrategies.Topological | CommitSortStrategies.Time,
})
.Select(c => new CommitInfo(
c.Sha,
c.Sha[..7],
c.MessageShort,
c.Author.Name,
c.Author.When.ToString("yyyy-MM-dd")))
.ToArray();
}
}
}
// Unreleased commit summaries (commits in this branch not yet in next)
string[] unreleasedLines = [];
if (ahead > 0 && nextIdx < Ladder.Length && await BranchExistsAsync(Ladder[nextIdx], ct))
{
var log = await GitOutputAsync($"log {Ladder[nextIdx]}..{branch} --oneline --no-decorate", ct);
unreleasedLines = log.Split('\n', StringSplitOptions.RemoveEmptyEntries);
}
result.Add(new BranchStatus(branch, true, shortHash, $"{author} · {date} · {subject}",
ahead, behind, unreleasedLines));
result.Add(new BranchStatus(branchName, true, tip.Sha[..7], summary,
ahead, behind, unreleasedCommits, tip.Sha));
}
return result;
@@ -96,7 +179,8 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
/// <summary>
/// Merges <paramref name="from"/> into <paramref name="to"/> with a no-fast-forward merge commit,
/// then pushes. Streams progress lines to <paramref name="onLine"/>.
/// then pushes. HEAD is never mutated — the working tree stays on develop throughout.
/// Streams progress lines to <paramref name="onLine"/>.
/// </summary>
public async Task<PromotionRequest> PromoteAsync(
string from,
@@ -104,7 +188,8 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
string requestedBy,
string? note,
Action<string> onLine,
CancellationToken ct)
CancellationToken ct,
string repoName = "Clarity")
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
@@ -126,57 +211,13 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
try
{
Log($"▶ Promoting {from} → {to}");
if (!string.IsNullOrWhiteSpace(note)) Log($" Note: {note}");
Log("──────────────────────────────────────");
// 1. Fetch latest
Log(" git fetch --all");
await RunGitAsync("fetch --all --quiet", ct);
// 2. Checkout target branch
Log($" git checkout {to}");
await RunGitAsync($"checkout {to}", ct);
// 3. Pull target to latest
Log($" git pull origin {to}");
await RunGitAsync($"pull origin {to} --quiet", ct);
// 4. Count commits being promoted
var logOutput = await GitOutputAsync($"log {to}..{from} --oneline --no-decorate", ct);
var commitLines = logOutput.Split('\n', StringSplitOptions.RemoveEmptyEntries);
req.CommitCount = commitLines.Length;
req.CommitLines = commitLines;
Log($" Merging {commitLines.Length} commit(s) from {from}:");
foreach (var cl in commitLines) Log($" {cl}");
// 5. Merge with --no-ff for a clean promotion commit
var mergeMsg = $"chore: promote {from} → {to}" + (note != null ? $" — {note}" : "");
Log($" git merge --no-ff {from}");
await RunGitAsync($"merge --no-ff {from} -m \"{mergeMsg}\"", ct);
// 6. Push
Log($" git push origin {to}");
await RunGitAsync($"push origin {to}", ct);
// 7. Return to develop so the working tree stays clean
await RunGitAsync("checkout develop", ct, swallowErrors: true);
Log("──────────────────────────────────────");
Log($"✔ {from} → {to} promoted successfully at {DateTimeOffset.UtcNow:u}");
req.Status = PromotionStatus.Succeeded;
req.CompletedAt = DateTimeOffset.UtcNow;
await Task.Run(() => PromoteCore(from, to, note, repoName, req, Log, ct), ct);
}
catch (Exception ex)
{
Log($"✖ Promotion failed: {ex.Message}");
req.Status = PromotionStatus.Failed;
req.CompletedAt = DateTimeOffset.UtcNow;
// Try to abort any broken merge state
await RunGitAsync("merge --abort", ct, swallowErrors: true);
await RunGitAsync("checkout develop", ct, swallowErrors: true);
logger.LogError(ex, "Promotion {From}→{To} failed", from, to);
}
finally
@@ -188,6 +229,479 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
return req;
}
private void PromoteCore(
string from,
string to,
string? note,
string repoName,
PromotionRequest req,
Action<string> Log,
CancellationToken ct)
{
Log($"▶ Promoting {from} → {to} [{repoName}]");
if (!string.IsNullOrWhiteSpace(note)) Log($" Note: {note}");
Log("──────────────────────────────────────");
using var repo = new Repository(GetRepoPath(repoName));
// 1. Fetch latest remote state for all branches
Log(" Fetching origin...");
var remote = EnsureRemote(repo, repoName);
var refSpecs = remote.FetchRefSpecs.Select(r => r.Specification).ToList();
repo.Network.Fetch(remote.Name, refSpecs, MakeFetchOptions());
// 2. Resolve branches — always read from origin/ so we reflect what is actually on the remote,
// never the server's potentially-stale local branch pointers.
var fromBranch = repo.Branches[$"origin/{from}"]
?? throw new InvalidOperationException($"Remote branch 'origin/{from}' not found.");
// `to` is read locally because we need to mutate its ref and push — it is immediately
// fast-forwarded to origin/{to} in the next step so it is never stale when used.
var toBranch = repo.Branches[to]
?? throw new InvalidOperationException($"Branch '{to}' not found.");
// 3. Fast-forward local `to` to its remote tracking branch (equivalent to git pull --ff-only)
var remoteTracking = repo.Branches[$"origin/{to}"];
if (remoteTracking?.Tip is not null && toBranch.Tip.Sha != remoteTracking.Tip.Sha)
{
Log($" Fast-forwarding {to} to origin/{to}...");
repo.Refs.UpdateTarget(toBranch.Reference.CanonicalName, remoteTracking.Tip.Sha);
toBranch = repo.Branches[to]!; // refresh after update
}
ct.ThrowIfCancellationRequested();
var fromTip = fromBranch.Tip;
var toTip = toBranch.Tip;
// 4. Enumerate commits being promoted
var pendingCommits = repo.Commits.QueryBy(new CommitFilter
{
IncludeReachableFrom = fromTip,
ExcludeReachableFrom = toTip,
SortBy = CommitSortStrategies.Topological | CommitSortStrategies.Time,
}).ToList();
if (pendingCommits.Count == 0)
{
Log($" {to} is already up-to-date with {from}. Nothing to promote.");
req.Status = PromotionStatus.Succeeded;
req.CommitCount = 0;
req.CommitLines = [];
req.CompletedAt = DateTimeOffset.UtcNow;
return;
}
req.CommitCount = pendingCommits.Count;
req.CommitLines = pendingCommits.Select(c => $"{c.Sha[..7]} {c.MessageShort}").ToArray();
Log($" {pendingCommits.Count} commit(s) to promote:");
foreach (var cl in req.CommitLines) Log($" {cl}");
ct.ThrowIfCancellationRequested();
// 5. Safety check: `from` must be a descendant of `to` (fast-forward is only possible
// when the target branch has no commits that aren't already reachable from source).
// This is the TBD invariant — staging/uat/main are always subsets of develop's linear history.
var isAncestor = repo.ObjectDatabase.FindMergeBase(fromTip, toTip)?.Sha == toTip.Sha;
if (!isAncestor)
{
throw new InvalidOperationException(
$"'{to}' has commits not in '{from}' — fast-forward is not possible. " +
$"This means '{to}' diverged from trunk. " +
$"Check whether a hotfix was committed directly to '{to}' without being backported to '{from}'.");
}
// 6. Fast-forward: advance the local `to` ref to `from`'s tip — no merge commit, linear history.
// Equivalent to: git push origin {from}:{to}
// HEAD is never mutated, working tree is untouched.
var oldToSha = toTip.Sha;
repo.Refs.UpdateTarget(toBranch.Reference.CanonicalName, fromTip.Sha);
Log($" Fast-forward: refs/heads/{to} {oldToSha[..7]} → {fromTip.Sha[..7]}");
ct.ThrowIfCancellationRequested();
// 7. Push to origin; roll back the local ref if push fails so nothing is left half-done
Log($" Pushing {to} to origin...");
try
{
repo.Network.Push(remote, $"refs/heads/{to}:refs/heads/{to}", MakePushOptions());
}
catch
{
repo.Refs.UpdateTarget(toBranch.Reference.CanonicalName, oldToSha);
throw;
}
Log("──────────────────────────────────────");
Log($"✔ {from} → {to} promoted successfully ({pendingCommits.Count} commit(s)) at {DateTimeOffset.UtcNow:u}");
req.Status = PromotionStatus.Succeeded;
req.CompletedAt = DateTimeOffset.UtcNow;
}
// ── Branch reset (recovery) ────────────────────────────────────────────────
/// <summary>
/// Force-resets <paramref name="branchName"/> to <paramref name="toSha"/> and force-pushes to origin.
/// Used to recover a downstream branch that has drifted from trunk (e.g. after an accidental merge commit).
/// </summary>
public Task ResetBranchAsync(string branchName, string toSha, string repoName, CancellationToken ct) =>
Task.Run(() =>
{
var repoPath = GetRepoPath(repoName);
using var repo = new Repository(repoPath);
var commit = repo.Lookup<Commit>(toSha)
?? throw new InvalidOperationException($"SHA '{toSha}' not found in {repoName}.");
var branch = repo.Branches[branchName]
?? throw new InvalidOperationException($"Branch '{branchName}' not found in {repoName}.");
var oldSha = branch.Tip.Sha;
repo.Refs.UpdateTarget(branch.Reference.CanonicalName, commit.Sha);
try
{
var remote = EnsureRemote(repo, repoName);
// Force push — "+" prefix overrides remote reflog
repo.Network.Push(remote, $"+refs/heads/{branchName}:refs/heads/{branchName}", MakePushOptions());
}
catch
{
repo.Refs.UpdateTarget(branch.Reference.CanonicalName, oldSha);
throw;
}
logger.LogInformation("Reset {Branch} from {Old} to {New} in {Repo}", branchName, oldSha[..7], commit.Sha[..7], repoName);
}, ct);
// ── Cherry-pick (partial promotion) ──────────────────────────────────────
/// <summary>
/// Cherry-picks the specified commits from <paramref name="from"/> onto <paramref name="to"/>
/// and pushes. Unlike a full fast-forward promotion, cherry-pick copies individual commits
/// as new commits — useful for promoting a subset of changes to a downstream environment.
/// Note: cherry-pick will cause the target branch to diverge from trunk.
/// </summary>
public async Task<PromotionRequest> CherryPickAsync(
string[] shas,
string from,
string to,
string requestedBy,
string? note,
Action<string> onLine,
CancellationToken ct,
string repoName = "Clarity")
{
if (!await _lock.WaitAsync(TimeSpan.Zero, ct))
{
var busy = new PromotionRequest { FromBranch = from, ToBranch = to, Status = PromotionStatus.Failed };
busy.Log.Add("⚠️ Another promotion or cherry-pick is already in progress.");
return busy;
}
var req = new PromotionRequest
{
FromBranch = from,
ToBranch = to,
RequestedBy = requestedBy,
Note = note,
Status = PromotionStatus.Running,
};
void Log(string line) { req.Log.Add(line); onLine(line); }
try
{
await Task.Run(() => CherryPickCore(shas, from, to, repoName, req, Log, ct), ct);
}
catch (Exception ex)
{
Log($"✖ Cherry-pick failed: {ex.Message}");
req.Status = PromotionStatus.Failed;
req.CompletedAt = DateTimeOffset.UtcNow;
logger.LogError(ex, "Cherry-pick {From}→{To} failed", from, to);
}
finally
{
await SaveAsync(req);
_lock.Release();
}
return req;
}
private void CherryPickCore(
string[] shas,
string from,
string to,
string repoName,
PromotionRequest req,
Action<string> Log,
CancellationToken ct)
{
Log($"▶ Cherry-pick {shas.Length} commit(s): {from} → {to} [{repoName}]");
if (!string.IsNullOrWhiteSpace(req.Note)) Log($" Note: {req.Note}");
Log("──────────────────────────────────────");
using var repo = new Repository(GetRepoPath(repoName));
if (repo.Info.IsBare)
throw new InvalidOperationException(
"Cherry-pick requires a non-bare repository clone. " +
"Ensure Git:Repos:{name} points to a standard (non-bare) clone.");
// 1. Fetch
Log(" Fetching origin...");
var remote = EnsureRemote(repo, repoName);
var refSpecs = remote.FetchRefSpecs.Select(r => r.Specification).ToList();
repo.Network.Fetch(remote.Name, refSpecs, MakeFetchOptions());
// 2. Resolve target branch
var toBranch = repo.Branches[to]
?? throw new InvalidOperationException($"Branch '{to}' not found.");
// 3. Fast-forward `to` to its remote tracking branch (sync with origin)
var remoteTracking = repo.Branches[$"origin/{to}"];
if (remoteTracking?.Tip is not null && toBranch.Tip.Sha != remoteTracking.Tip.Sha)
{
Log($" Fast-forwarding {to} to origin/{to}...");
repo.Refs.UpdateTarget(toBranch.Reference.CanonicalName, remoteTracking.Tip.Sha);
toBranch = repo.Branches[to]!;
}
var savedToSha = toBranch.Tip.Sha;
var originalHeadBranchName = repo.Head.FriendlyName;
// 4. Resolve commits — shas arrive newest-first from UI (topological order);
// reverse so we apply oldest → newest (preserves logical order in history).
var commitsOrdered = shas
.Select(sha => repo.Lookup<Commit>(sha)
?? throw new InvalidOperationException($"Commit '{sha}' not found in {repoName}."))
.Reverse()
.ToList();
req.CommitCount = commitsOrdered.Count;
req.CommitLines = commitsOrdered.Select(c => $"{c.Sha[..7]} {c.MessageShort}").ToArray();
Log($" {commitsOrdered.Count} commit(s) to apply (oldest → newest):");
foreach (var c in commitsOrdered) Log($" {c.Sha[..7]} {c.MessageShort}");
ct.ThrowIfCancellationRequested();
// 5. Force-checkout target branch — discards any dirty working tree state left by a
// previous failed cherry-pick or interrupted operation. This is a server-only clone
// managed exclusively by the control plane, so force is always safe here.
Log($" Checking out {to} (force)...");
var forceCheckout = new CheckoutOptions { CheckoutModifiers = CheckoutModifiers.Force };
Commands.Checkout(repo, toBranch, forceCheckout);
try
{
var sig = MakeSig();
foreach (var commit in commitsOrdered)
{
ct.ThrowIfCancellationRequested();
Log($" Applying {commit.Sha[..7]} {commit.MessageShort}...");
var result = repo.CherryPick(commit, sig);
switch (result.Status)
{
case CherryPickStatus.CherryPicked:
Log($" \u2714 \u2192 {result.Commit!.Sha[..7]}");
break;
case CherryPickStatus.Conflicts:
Log($" \u2716 Conflict \u2014 aborting and rolling back");
repo.Reset(ResetMode.Hard, repo.Lookup<Commit>(savedToSha));
throw new InvalidOperationException(
$"Cherry-pick conflict on {commit.Sha[..7]}: {commit.MessageShort}. " +
"Resolve conflicts manually or promote a different set of commits.");
default:
Log($" \u2261 Already present or no changes \u2014 skipped");
break;
}
}
// 6. Push
Log($" Pushing {to} to origin...");
try
{
repo.Network.Push(remote, $"refs/heads/{to}:refs/heads/{to}", MakePushOptions());
}
catch
{
repo.Reset(ResetMode.Hard, repo.Lookup<Commit>(savedToSha));
throw;
}
Log("──────────────────────────────────────");
Log($"✔ Cherry-picked {commitsOrdered.Count} commit(s) to {to} at {DateTimeOffset.UtcNow:u}");
req.Status = PromotionStatus.Succeeded;
req.CompletedAt = DateTimeOffset.UtcNow;
}
finally
{
// Always restore HEAD to the original branch regardless of outcome
try
{
var headBranch = repo.Branches[originalHeadBranchName];
if (headBranch is not null)
Commands.Checkout(repo, headBranch, new CheckoutOptions { CheckoutModifiers = CheckoutModifiers.Force });
}
catch (Exception ex)
{
logger.LogWarning(ex, "Could not restore HEAD to '{Branch}' after cherry-pick", originalHeadBranchName);
}
}
}
// ── Conformance check ────────────────────────────────────────────────────
/// <summary>
/// Evaluates whether all branches in the TBD ladder are in conformance:
/// develop → staging → uat → main must form a strict linear ancestry chain with no divergence.
/// </summary>
public Task<ConformanceReport> GetConformanceAsync(string repoName = "Clarity", CancellationToken ct = default) =>
Task.Run(() => GetConformanceCore(repoName, ct), ct);
private ConformanceReport GetConformanceCore(string repoName, CancellationToken ct)
{
var repoPath = GetRepoPath(repoName);
var checks = new List<BranchConformanceCheck>();
if (string.IsNullOrWhiteSpace(repoPath) || !Directory.Exists(repoPath))
{
foreach (var b in Ladder)
checks.Add(new BranchConformanceCheck(b, null, ConformanceViolation.Missing, ConformanceSeverity.Critical,
$"Repository '{repoName}' is not configured or the path does not exist.", 0, 0, null));
return new ConformanceReport(repoName, false, checks.ToArray());
}
using var repo = new Repository(repoPath);
// Fetch latest remote refs — swallow network errors so status still works offline.
try
{
var remote = EnsureRemote(repo, repoName);
var refSpecs = remote.FetchRefSpecs.Select(r => r.Specification).ToList();
repo.Network.Fetch(remote.Name, refSpecs, MakeFetchOptions());
}
catch (Exception ex)
{
logger.LogWarning(ex, "Fetch during conformance check failed — continuing with cached refs");
}
for (var i = 0; i < Ladder.Length; i++)
{
ct.ThrowIfCancellationRequested();
var branchName = Ladder[i];
var srcName = i > 0 ? Ladder[i - 1] : null; // predecessor branch (e.g. develop for staging)
// Always read from origin/ tracking refs — never local branch pointers.
var branch = repo.Branches[$"origin/{branchName}"];
// ── Branch missing ──────────────────────────────────────────────
if (branch?.Tip is null)
{
var srcTip = srcName is not null ? repo.Branches[$"origin/{srcName}"]?.Tip?.Sha : null;
checks.Add(new BranchConformanceCheck(
branchName, srcName,
ConformanceViolation.Missing,
srcName is null ? ConformanceSeverity.Critical : ConformanceSeverity.Info,
srcName is not null
? $"Branch '{branchName}' does not exist. It should be created from '{srcName}'."
: $"Trunk branch '{branchName}' does not exist — the repository may be empty.",
0, 0, srcTip));
continue;
}
// ── Trunk (develop) — just needs to exist ───────────────────────
if (srcName is null)
{
checks.Add(new BranchConformanceCheck(
branchName, null, ConformanceViolation.OK, ConformanceSeverity.OK,
$"Trunk branch '{branchName}' exists.", 0, 0, null));
continue;
}
var srcBranch = repo.Branches[$"origin/{srcName}"];
if (srcBranch?.Tip is null)
{
// Source branch is itself missing — skip, it will be reported separately.
checks.Add(new BranchConformanceCheck(
branchName, srcName, ConformanceViolation.OK, ConformanceSeverity.OK,
$"Source branch '{srcName}' is missing — check skipped.", 0, 0, null));
continue;
}
// CalculateHistoryDivergence(srcTip, branchTip):
// AheadBy = commits srcBranch has that branch doesn't → branch is pending promotion (stale)
// BehindBy = commits branch has that srcBranch doesn't → branch is DIVERGED (violation)
var div = repo.ObjectDatabase.CalculateHistoryDivergence(srcBranch.Tip, branch.Tip);
var ahead = div.AheadBy ?? 0;
var behind = div.BehindBy ?? 0;
if (behind > 0)
{
// Downstream has commits the upstream doesn't — TBD violation (broken linear history).
checks.Add(new BranchConformanceCheck(
branchName, srcName,
ConformanceViolation.Diverged, ConformanceSeverity.Critical,
$"'{branchName}' has {behind} commit(s) not reachable from '{srcName}'. " +
$"This breaks TBD linear history. Likely caused by a commit made directly to '{branchName}' " +
$"without backporting to trunk. Fix: reset '{branchName}' to '{srcName}' tip.",
behind, ahead,
srcBranch.Tip.Sha));
}
else if (ahead > 0)
{
// Upstream has unreleased commits — normal TBD state, but flag if count is high.
var sev = ahead > 10 ? ConformanceSeverity.Warning : ConformanceSeverity.Info;
checks.Add(new BranchConformanceCheck(
branchName, srcName,
ConformanceViolation.Stale, sev,
$"'{branchName}' is {ahead} commit(s) behind '{srcName}'. " +
(ahead > 10 ? "Large backlog — consider promoting soon." : "Pending promotion."),
0, ahead, null));
}
else
{
checks.Add(new BranchConformanceCheck(
branchName, srcName, ConformanceViolation.OK, ConformanceSeverity.OK,
$"'{branchName}' is fully in sync with '{srcName}'.", 0, 0, null));
}
}
var isConformant = !checks.Any(c =>
c.Violation is ConformanceViolation.Diverged or ConformanceViolation.Missing);
return new ConformanceReport(repoName, isConformant, checks.ToArray());
}
// ── Create branch ─────────────────────────────────────────────────────────
/// <summary>
/// Creates a new branch at the given commit SHA and pushes it to origin.
/// Used to create missing ladder branches (e.g. staging, uat) from their source branch tip.
/// </summary>
public Task CreateBranchAsync(string branchName, string fromSha, string repoName, CancellationToken ct) =>
Task.Run(() =>
{
var repoPath = GetRepoPath(repoName);
using var repo = new Repository(repoPath);
if (repo.Branches[branchName] is not null)
throw new InvalidOperationException($"Branch '{branchName}' already exists in {repoName}.");
var commit = repo.Lookup<Commit>(fromSha)
?? throw new InvalidOperationException($"SHA '{fromSha}' not found in {repoName}.");
repo.Refs.Add($"refs/heads/{branchName}", commit.Sha);
var remote = EnsureRemote(repo, repoName);
repo.Network.Push(remote, $"refs/heads/{branchName}:refs/heads/{branchName}", MakePushOptions());
logger.LogInformation("Created branch {Branch} at {Sha} in {Repo}", branchName, commit.Sha[..7], repoName);
}, ct);
// ── History persistence ──────────────────────────────────────────────────
private string HistoryPath
@@ -231,46 +745,111 @@ public class PromotionService(IConfiguration config, ILogger<PromotionService> l
catch { return []; }
}
// ── Git helpers ──────────────────────────────────────────────────────────
// ── OPC number extraction ─────────────────────────────────────────────
private async Task<bool> BranchExistsAsync(string branch, CancellationToken ct)
private static readonly System.Text.RegularExpressions.Regex OpcTagPattern =
new(@"OPC\s*#\s*(\d+)", System.Text.RegularExpressions.RegexOptions.IgnoreCase
| System.Text.RegularExpressions.RegexOptions.Compiled);
/// <summary>
/// Scans the most recent <paramref name="limit"/> commits on <paramref name="branch"/> and
/// returns a distinct, sorted list of OPC numbers referenced in commit messages (e.g. "OPC # 0042").
/// Safe to call when git is not configured — returns an empty list on any error.
/// </summary>
public Task<List<string>> ExtractOpcNumbersAsync(
string repoName = "Clarity",
string branch = "main",
int limit = 50,
CancellationToken ct = default) =>
Task.Run(() => ExtractOpcNumbersCore(repoName, branch, limit), ct);
private List<string> ExtractOpcNumbersCore(string repoName, string branch, int limit)
{
var output = await GitOutputAsync($"branch --list {branch}", ct);
return !string.IsNullOrWhiteSpace(output);
var repoPath = GetRepoPath(repoName);
if (string.IsNullOrWhiteSpace(repoPath) || !Directory.Exists(repoPath))
return [];
try
{
using var repo = new Repository(repoPath);
var b = repo.Branches[branch] ?? repo.Branches[$"origin/{branch}"];
if (b is null) return [];
var set = new HashSet<string>(StringComparer.Ordinal);
foreach (var commit in b.Commits.Take(limit))
foreach (System.Text.RegularExpressions.Match m in OpcTagPattern.Matches(commit.Message))
set.Add($"OPC # {m.Groups[1].Value.PadLeft(4, '0')}");
return [.. set.OrderBy(x => x)];
}
catch (Exception ex)
{
logger.LogWarning(ex, "ExtractOpcNumbers failed for {Repo}/{Branch}", repoName, branch);
return [];
}
}
private async Task<string> GitOutputAsync(string args, CancellationToken ct)
/// <summary>
/// Returns distinct, sorted OPC numbers for commits reachable from <paramref name="toSha"/>
/// that are NOT reachable from <paramref name="fromSha"/> — i.e. the exact delta for this release.
/// Falls back to <see cref="ExtractOpcNumbersAsync"/> (last 50 commits) when <paramref name="fromSha"/>
/// is null (first-ever release for this environment).
/// </summary>
public Task<List<string>> ExtractOpcNumbersDeltaAsync(
string repoName,
string toSha,
string? fromSha,
CancellationToken ct = default) =>
fromSha is null
? ExtractOpcNumbersAsync(repoName, ct: ct)
: Task.Run(() => ExtractOpcNumbersDeltaCore(repoName, toSha, fromSha), ct);
private List<string> ExtractOpcNumbersDeltaCore(string repoName, string toSha, string fromSha)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var output = await proc.StandardOutput.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
return output;
var repoPath = GetRepoPath(repoName);
if (string.IsNullOrWhiteSpace(repoPath) || !Directory.Exists(repoPath))
return [];
try
{
using var repo = new Repository(repoPath);
var toCommit = repo.Lookup<Commit>(toSha);
var fromCommit = repo.Lookup<Commit>(fromSha);
if (toCommit is null) return [];
var filter = fromCommit is null
? new CommitFilter { IncludeReachableFrom = toCommit }
: new CommitFilter { IncludeReachableFrom = toCommit, ExcludeReachableFrom = fromCommit };
var set = new HashSet<string>(StringComparer.Ordinal);
foreach (var commit in repo.Commits.QueryBy(filter))
foreach (System.Text.RegularExpressions.Match m in OpcTagPattern.Matches(commit.Message))
set.Add($"OPC # {m.Groups[1].Value.PadLeft(4, '0')}");
return [.. set.OrderBy(x => x)];
}
catch (Exception ex)
{
logger.LogWarning(ex, "ExtractOpcNumbersDelta failed for {Repo} {From}..{To}", repoName, fromSha[..7], toSha[..7]);
return [];
}
}
private async Task RunGitAsync(string args, CancellationToken ct, bool swallowErrors = false)
/// <summary>Returns the full HEAD SHA of <paramref name="branch"/> in <paramref name="repoName"/>, or null.</summary>
public string? GetBranchTipSha(string repoName, string branch)
{
var psi = MakePsi(args);
using var proc = Process.Start(psi) ?? throw new InvalidOperationException("Failed to start git");
var stderr = await proc.StandardError.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
if (!swallowErrors && proc.ExitCode != 0)
throw new InvalidOperationException($"git {args} exited {proc.ExitCode}: {stderr.Trim()}");
logger.LogDebug("git {Args} → exit {Code}", args, proc.ExitCode);
var repoPath = GetRepoPath(repoName);
if (string.IsNullOrWhiteSpace(repoPath) || !Directory.Exists(repoPath)) return null;
try
{
using var repo = new Repository(repoPath);
return (repo.Branches[$"origin/{branch}"] ?? repo.Branches[branch])?.Tip?.Sha;
}
catch { return null; }
}
private ProcessStartInfo MakePsi(string args) => new("git", args)
{
WorkingDirectory = RepoRoot,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
CreateNoWindow = true,
};
}
/// <summary>A single unreleased commit — carries full SHA for cherry-pick operations.</summary>
public record CommitInfo(string Sha, string ShortSha, string Message, string Author, string Date);
/// <summary>Current status of a single branch in the promotion ladder.</summary>
public record BranchStatus(
string Branch,
@@ -278,6 +857,7 @@ public record BranchStatus(
string? ShortHash,
string? LastCommitSummary,
int AheadOfNext, // commits this branch has that the next doesn't
int BehindNext, // commits next has that this branch doesn't (shouldn't happen in clean flow)
string[] UnreleasedLines // oneline log of the ahead commits
int BehindNext, // commits next has that this branch doesn't (diverged)
CommitInfo[] UnreleasedCommits, // rich commit objects for cherry-pick UI
string? TipSha = null // full 40-char SHA for build-gate checks
);
+19 -1
View File
@@ -17,6 +17,7 @@ public class ReleaseService(
IConfiguration config,
TenantRegistryService registry,
BuildHistoryService history,
PromotionService promotions,
ILogger<ReleaseService> logger)
{
private static readonly SemaphoreSlim _lock = new(1, 1);
@@ -50,7 +51,12 @@ public class ReleaseService(
return blocked;
}
var record = await history.CreateReleaseAsync(targetEnv, ImageName);
// Resolve the Clarity branch for this environment and stamp the HEAD SHA
// before creating the record so we capture "what was deployed" accurately.
var branch = targetEnv switch { "fdev" => "develop", "staging" => "staging", "uat" => "uat", _ => "main" };
var currentSha = promotions.GetBranchTipSha("Clarity", branch);
var record = await history.CreateReleaseAsync(targetEnv, ImageName, currentSha);
try
{
@@ -182,6 +188,18 @@ public class ReleaseService(
}
finally
{
// Stamp the exact OPC ticket numbers introduced by this release:
// diff from previous release's SHA to this release's SHA on the Clarity branch.
try
{
var prev = await history.GetLastSuccessfulReleaseForEnvAsync(targetEnv);
// Exclude the current (in-flight) record — it's not succeeded yet
var prevSha = prev?.Id == record.Id ? null : prev?.CommitSha;
if (currentSha is not null)
record.OpcNumbers = await promotions.ExtractOpcNumbersDeltaAsync("Clarity", currentSha, prevSha, ct);
}
catch { /* git not configured — continue without OPC stamp */ }
await history.UpdateReleaseAsync(record);
_lock.Release();
}
+2
View File
@@ -13,6 +13,8 @@ internal static class TarHelper
[
".git", ".vs", ".vscode", "node_modules", "bin", "obj",
"VaultData", "*.user", "*.suo",
// Exclude sibling repos — build context is ClarityStack/ root but only Clarity/ is needed
"OPC", "gateway", "ClientAssets",
];
public static void Pack(string root, Stream destination)
+1 -23
View File
@@ -31,27 +31,6 @@ var cpPostgres = builder.AddPostgres("opc-postgres", password: cpPostgresPass
.WithPgAdmin();
var controlPlaneDb = cpPostgres.AddDatabase("opcdb");
var giteaDb = cpPostgres.AddDatabase("giteadb");
#endregion
#region GITEA
// Gitea is ControlPlane's code management component — owns its own DB on opc-postgres.
var gitea = builder.AddContainer("gitea", "gitea/gitea", "latest")
.WithHttpEndpoint(port: 3000, targetPort: 3000, name: "http")
.WithEndpoint(port: 2222, targetPort: 22, name: "ssh")
.WithVolume("clarity-gitea-data", "/data")
.WithEnvironment("GITEA__database__DB_TYPE", "postgres")
.WithEnvironment("GITEA__database__HOST", "host.docker.internal:5433")
.WithEnvironment("GITEA__database__NAME", "giteadb")
.WithEnvironment("GITEA__database__USER", "postgres")
.WithEnvironment("GITEA__database__PASSWD", "controlplane-dev")
.WithEnvironment("GITEA__server__DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__ROOT_URL", "http://opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_PORT", "2222")
.WithEnvironment("GITEA__service__DISABLE_REGISTRATION", "true")
.WaitFor(giteaDb)
.WithLifetime(ContainerLifetime.Persistent);
#endregion
#region RABBITMQ
@@ -67,9 +46,8 @@ var api = builder.AddProject<Projects.ControlPlane_Api>("controlplane-api")
.WaitFor(rabbit)
.WithReference(controlPlaneDb)
.WaitFor(controlPlaneDb)
.WithEnvironment("Gitea__BaseUrl", gitea.GetEndpoint("http"))
.WithEnvironment("ClientAssets__Folder", clientAssetsPath)
.WithEnvironment("Docker__RepoRoot", builder.AppHostDirectory.Replace("ControlPlane.AppHost", "").TrimEnd('\\', '/'))
.WithEnvironment("Docker__RepoRoot", Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", ".."))) // ClarityStack/ root — needed for Directory.*.props
.WithExternalHttpEndpoints();
#endregion
@@ -5,6 +5,7 @@
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Abstractions" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" />
<PackageReference Include="Npgsql" />
</ItemGroup>
</Project>
@@ -24,7 +24,27 @@ public class SagaContext
// Written by LaunchStep — primary app container name
public string? ContainerName { get; set; }
// Written by VaultStep — scoped periodic token for the tenant (not the root token)
// and its accessor used for compensation/revocation
public string? VaultToken { get; set; }
public string? VaultTokenAccessor { get; set; }
// Written by PulumiStep (DedicatedVM/Enterprise tier) — target host details for subsequent steps
public string? VmIpAddress { get; set; }
public string? VmSshKeyPath { get; set; }
/// <summary>
/// Per-component resolved endpoints for this provisioning job.
/// Keyed by component name: "Keycloak", "Vault", "Postgres", "Minio".
/// Built by ProvisioningWorker before the saga starts; OwnContainer host ports
/// are resolved and written back by InfrastructureProvisioningStep.
/// </summary>
public Dictionary<string, ResolvedEndpoint> ResolvedTopology { get; init; } =
new(StringComparer.OrdinalIgnoreCase);
/// <summary>
/// Absolute path to the generated docker-compose.yml for this tenant.
/// Non-null only for OwnContainer tenants.
/// </summary>
public string? ComposeFilePath { get; set; }
}
+2 -1
View File
@@ -4,7 +4,7 @@ using System.Text.Json.Serialization;
namespace ControlPlane.Core.Models;
public enum BuildStatus { Running, Succeeded, Failed }
public enum BuildKind { DockerImage, DotnetProject, NpmProject }
public enum BuildKind { DockerImage, DotnetProject, NpmProject, SolutionBuild }
/// <summary>
/// Persisted record of a single build run — image build, dotnet build, or npm build.
@@ -20,5 +20,6 @@ public class BuildRecord
public DateTimeOffset? FinishedAt { get; set; }
public int? DurationMs { get; set; }
public string? ImageDigest { get; set; } // populated for DockerImage builds
public string? CommitSha { get; set; } // HEAD SHA at build time
public List<string> Log { get; set; } = [];
}
+4 -1
View File
@@ -22,5 +22,8 @@ public enum ComponentMode
VpsDocker,
/// <summary>Own VM with the component running as a native OS process (no Docker).</summary>
VpsBareMetal
VpsBareMetal,
/// <summary>Component is not provisioned for this tenant (feature not elected).</summary>
Disabled
}
@@ -0,0 +1,28 @@
namespace ControlPlane.Core.Models;
public enum ConformanceViolation { OK, Missing, Diverged, Stale }
public enum ConformanceSeverity { OK, Info, Warning, Critical }
/// <summary>
/// The conformance state of one branch in the TBD ladder relative to its upstream source.
/// </summary>
public record BranchConformanceCheck(
string Branch,
string? SourceBranch, // the upstream branch this is derived from (null for trunk)
ConformanceViolation Violation,
ConformanceSeverity Severity,
string Detail,
int AheadOfSource, // commits this branch has that source doesn't — diverged
int BehindSource, // commits source has that this branch doesn't — pending promotion
string? FixSha // source tip SHA — used when resetting to fix divergence
);
/// <summary>
/// Full TBD conformance report for a single repository.
/// IsConformant = no Diverged or Missing violations exist.
/// </summary>
public record ConformanceReport(
string Repo,
bool IsConformant,
BranchConformanceCheck[] Checks
);
@@ -16,6 +16,8 @@ public class ReleaseRecord
public DateTimeOffset StartedAt { get; set; } = DateTimeOffset.UtcNow;
public DateTimeOffset? FinishedAt { get; set; }
public List<TenantReleaseResult> Tenants { get; set; } = [];
public List<string> OpcNumbers { get; set; } = [];
public string? CommitSha { get; set; } // Clarity branch HEAD SHA at release time
}
public class TenantReleaseResult
@@ -0,0 +1,53 @@
namespace ControlPlane.Core.Models;
/// <summary>
/// The fully-resolved network addresses for one infrastructure component for a specific tenant.
/// Built by ProvisioningWorker at job start from StackConfig + ClarityInfraOptions.
/// Carried through SagaContext and persisted in TenantRecord at saga completion.
///
/// Design principle: Clarity.Server always talks to PublicUrl (goes through nginx/dnsmasq).
/// The Worker uses AdminUrl (direct host-accessible URL) for admin API calls during provisioning.
/// InternalUrl is injected into container env vars for container-to-container communication.
/// </summary>
public sealed record ResolvedEndpoint
{
/// <summary>Mode elected for this component.</summary>
public ComponentMode Mode { get; init; }
/// <summary>
/// URL the Worker process uses to call this component's admin API.
/// Worker runs on the host machine:
/// SharedPlatform → http://localhost:{exposedPort} (docker-compose exposes to host)
/// OwnContainer → http://localhost:{ephemeralPort} (resolved by InfrastructureProvisioningStep)
/// VPS → operator-supplied external URL
/// </summary>
public string AdminUrl { get; init; } = string.Empty;
/// <summary>
/// Public DNS URL injected into Clarity.Server and surfaced in the TenantRecord.
/// Always routes through nginx/dnsmasq — no direct Docker DNS leaks to app code.
/// SharedPlatform → https://keycloak.clarity.test
/// OwnContainer → https://kc.{subdomain}.clarity.test
/// </summary>
public string PublicUrl { get; init; } = string.Empty;
/// <summary>
/// Docker-internal URL for container-to-container communication on the managed network.
/// SharedPlatform → http://keycloak:8080
/// OwnContainer → http://kc-{subdomain}:8080
/// </summary>
public string InternalUrl { get; init; } = string.Empty;
/// <summary>Docker container name, if the Worker manages this component.</summary>
public string? ContainerName { get; init; }
/// <summary>
/// Admin username for this component instance.
/// Null for SharedPlatform (read from Keycloak:AdminUser config at call time).
/// Explicitly set for OwnContainer sidecars.
/// </summary>
public string? AdminUser { get; init; }
/// <summary>Admin password for this component instance. See AdminUser.</summary>
public string? AdminPassword { get; init; }
}
+231 -79
View File
@@ -1,46 +1,36 @@
using System.Text.Json;
using ControlPlane.Core.Models;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using Npgsql;
namespace ControlPlane.Core.Services;
/// <summary>
/// Persists build and release history to JSON files in the ClientAssets folder.
/// Thread-safe — all writes go through a single lock per file.
/// Persists build and release history to opcdb (Postgres).
/// Replaces the previous JSON-file implementation.
/// NpgsqlDataSource is singleton and manages the connection pool; this service is safe to register as singleton.
/// </summary>
public class BuildHistoryService
public class BuildHistoryService(NpgsqlDataSource db, ILogger<BuildHistoryService> logger)
{
private readonly string _buildsPath;
private readonly string _releasesPath;
private readonly ILogger<BuildHistoryService> _logger;
private static readonly SemaphoreSlim _buildLock = new(1, 1);
private static readonly SemaphoreSlim _releaseLock = new(1, 1);
private static readonly JsonSerializerOptions JsonOpts = new()
{
WriteIndented = true,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
Converters = { new System.Text.Json.Serialization.JsonStringEnumConverter() },
};
public BuildHistoryService(IConfiguration config, ILogger<BuildHistoryService> logger)
{
var folder = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
Directory.CreateDirectory(folder);
_buildsPath = Path.Combine(folder, "builds.json");
_releasesPath = Path.Combine(folder, "releases.json");
_logger = logger;
}
// ── Builds ──────────────────────────────────────────────────────────────
public async Task<BuildRecord> CreateBuildAsync(BuildKind kind, string target)
{
var record = new BuildRecord { Kind = kind, Target = target };
await SaveBuildAsync(record);
await using var cmd = db.CreateCommand("""
INSERT INTO build_record (id, kind, target, status, started_at, commit_sha, log)
VALUES ($1, $2, $3, $4, $5, $6, $7)
""");
cmd.Parameters.AddWithValue(record.Id);
cmd.Parameters.AddWithValue(record.Kind.ToString());
cmd.Parameters.AddWithValue(record.Target);
cmd.Parameters.AddWithValue(record.Status.ToString());
cmd.Parameters.AddWithValue(record.StartedAt);
cmd.Parameters.AddWithValue((object?)record.CommitSha ?? DBNull.Value);
cmd.Parameters.AddWithValue(string.Empty);
await cmd.ExecuteNonQueryAsync();
return record;
}
@@ -50,89 +40,251 @@ public class BuildHistoryService
record.FinishedAt = DateTimeOffset.UtcNow;
record.DurationMs = (int)(record.FinishedAt.Value - record.StartedAt).TotalMilliseconds;
record.ImageDigest = digest;
await SaveBuildAsync(record);
await using var cmd = db.CreateCommand("""
UPDATE build_record
SET status = $2, finished_at = $3, duration_ms = $4, image_digest = $5, commit_sha = $6, log = $7
WHERE id = $1
""");
cmd.Parameters.AddWithValue(record.Id);
cmd.Parameters.AddWithValue(record.Status.ToString());
cmd.Parameters.AddWithValue(record.FinishedAt!.Value);
cmd.Parameters.AddWithValue((object?)record.DurationMs ?? DBNull.Value);
cmd.Parameters.AddWithValue((object?)record.ImageDigest ?? DBNull.Value);
cmd.Parameters.AddWithValue((object?)record.CommitSha ?? DBNull.Value);
cmd.Parameters.AddWithValue(string.Join('\n', record.Log));
await cmd.ExecuteNonQueryAsync();
}
public async Task AppendBuildLogAsync(BuildRecord record, string line)
{
record.Log.Add(line);
// Flush to disk every 20 lines to avoid excessive I/O but keep reasonable freshness
// Flush to Postgres every 20 lines — keeps the live log queryable without hammering the DB
if (record.Log.Count % 20 == 0)
await SaveBuildAsync(record);
await FlushLogAsync(record);
}
private async Task FlushLogAsync(BuildRecord record)
{
await using var cmd = db.CreateCommand("UPDATE build_record SET log = $2 WHERE id = $1");
cmd.Parameters.AddWithValue(record.Id);
cmd.Parameters.AddWithValue(string.Join('\n', record.Log));
await cmd.ExecuteNonQueryAsync();
}
public async Task<List<BuildRecord>> GetBuildsAsync()
{
await _buildLock.WaitAsync();
try { return LoadJson<BuildRecord>(_buildsPath); }
finally { _buildLock.Release(); }
var result = new List<BuildRecord>();
await using var cmd = db.CreateCommand("""
SELECT id, kind, target, status, started_at, finished_at, duration_ms, image_digest, commit_sha, log
FROM build_record
ORDER BY started_at DESC
LIMIT 100
""");
await using var reader = await cmd.ExecuteReaderAsync();
while (await reader.ReadAsync())
{
var logText = reader.IsDBNull(9) ? "" : reader.GetString(9);
result.Add(new BuildRecord
{
Id = reader.GetString(0),
Kind = Enum.Parse<BuildKind>(reader.GetString(1)),
Target = reader.GetString(2),
Status = Enum.Parse<BuildStatus>(reader.GetString(3)),
StartedAt = reader.GetFieldValue<DateTimeOffset>(4),
FinishedAt = reader.IsDBNull(5) ? null : reader.GetFieldValue<DateTimeOffset>(5),
DurationMs = reader.IsDBNull(6) ? null : reader.GetInt32(6),
ImageDigest = reader.IsDBNull(7) ? null : reader.GetString(7),
CommitSha = reader.IsDBNull(8) ? null : reader.GetString(8),
Log = logText.Length == 0 ? [] : [.. logText.Split('\n')],
});
}
private async Task SaveBuildAsync(BuildRecord record)
{
await _buildLock.WaitAsync();
try
{
var all = LoadJson<BuildRecord>(_buildsPath);
var idx = all.FindIndex(b => b.Id == record.Id);
if (idx >= 0) all[idx] = record;
else all.Insert(0, record);
// Keep last 100 builds
if (all.Count > 100) all = all[..100];
await File.WriteAllTextAsync(_buildsPath, JsonSerializer.Serialize(all, JsonOpts));
return result;
}
finally { _buildLock.Release(); }
// ── Builds by SHA ────────────────────────────────────────────────────────
/// <summary>Returns all build records whose <c>commit_sha</c> exactly matches <paramref name="sha"/>.</summary>
public async Task<List<BuildRecord>> GetBuildsByShaAsync(string sha)
{
var result = new List<BuildRecord>();
await using var cmd = db.CreateCommand("""
SELECT id, kind, target, status, started_at, finished_at, duration_ms, image_digest, commit_sha, log
FROM build_record
WHERE commit_sha = $1
ORDER BY started_at DESC
""");
cmd.Parameters.AddWithValue(sha);
await using var reader = await cmd.ExecuteReaderAsync();
while (await reader.ReadAsync())
{
var logText = reader.IsDBNull(9) ? "" : reader.GetString(9);
result.Add(new BuildRecord
{
Id = reader.GetString(0),
Kind = Enum.Parse<BuildKind>(reader.GetString(1)),
Target = reader.GetString(2),
Status = Enum.Parse<BuildStatus>(reader.GetString(3)),
StartedAt = reader.GetFieldValue<DateTimeOffset>(4),
FinishedAt = reader.IsDBNull(5) ? null : reader.GetFieldValue<DateTimeOffset>(5),
DurationMs = reader.IsDBNull(6) ? null : reader.GetInt32(6),
ImageDigest = reader.IsDBNull(7) ? null : reader.GetString(7),
CommitSha = reader.IsDBNull(8) ? null : reader.GetString(8),
Log = logText.Length == 0 ? [] : [.. logText.Split('\n')],
});
}
return result;
}
// ── Releases ────────────────────────────────────────────────────────────
public async Task<ReleaseRecord> CreateReleaseAsync(string environment, string imageName)
public async Task<ReleaseRecord> CreateReleaseAsync(string environment, string imageName, string? commitSha = null)
{
var record = new ReleaseRecord { Environment = environment, ImageName = imageName };
await SaveReleaseAsync(record);
var record = new ReleaseRecord { Environment = environment, ImageName = imageName, CommitSha = commitSha };
await using var cmd = db.CreateCommand("""
INSERT INTO release_record (id, environment, image_name, status, started_at, opc_numbers, commit_sha)
VALUES ($1, $2, $3, $4, $5, $6, $7)
""");
cmd.Parameters.AddWithValue(record.Id);
cmd.Parameters.AddWithValue(record.Environment);
cmd.Parameters.AddWithValue(record.ImageName);
cmd.Parameters.AddWithValue(record.Status.ToString());
cmd.Parameters.AddWithValue(record.StartedAt);
cmd.Parameters.Add(new NpgsqlParameter<string[]> { TypedValue = [.. record.OpcNumbers] });
cmd.Parameters.AddWithValue((object?)record.CommitSha ?? DBNull.Value);
await cmd.ExecuteNonQueryAsync();
return record;
}
public async Task UpdateReleaseAsync(ReleaseRecord record)
{
record.FinishedAt = DateTimeOffset.UtcNow;
await SaveReleaseAsync(record);
await using var conn = await db.OpenConnectionAsync();
await using var tx = await conn.BeginTransactionAsync();
await using var upd = new NpgsqlCommand("""
UPDATE release_record SET status = $2, finished_at = $3, opc_numbers = $4, commit_sha = $5 WHERE id = $1
""", conn, tx);
upd.Parameters.AddWithValue(record.Id);
upd.Parameters.AddWithValue(record.Status.ToString());
upd.Parameters.AddWithValue(record.FinishedAt!.Value);
upd.Parameters.Add(new NpgsqlParameter<string[]> { TypedValue = [.. record.OpcNumbers] });
upd.Parameters.AddWithValue((object?)record.CommitSha ?? DBNull.Value);
await upd.ExecuteNonQueryAsync();
// Replace tenant results wholesale on each update
await using var del = new NpgsqlCommand(
"DELETE FROM release_tenant_result WHERE release_id = $1", conn, tx);
del.Parameters.AddWithValue(record.Id);
await del.ExecuteNonQueryAsync();
foreach (var t in record.Tenants)
{
await using var ins = new NpgsqlCommand("""
INSERT INTO release_tenant_result (release_id, subdomain, container_name, success, error)
VALUES ($1, $2, $3, $4, $5)
""", conn, tx);
ins.Parameters.AddWithValue(record.Id);
ins.Parameters.AddWithValue(t.Subdomain);
ins.Parameters.AddWithValue(t.ContainerName);
ins.Parameters.AddWithValue(t.Success);
ins.Parameters.AddWithValue((object?)t.Error ?? DBNull.Value);
await ins.ExecuteNonQueryAsync();
}
await tx.CommitAsync();
}
public async Task<List<ReleaseRecord>> GetReleasesAsync()
{
await _releaseLock.WaitAsync();
try { return LoadJson<ReleaseRecord>(_releasesPath); }
finally { _releaseLock.Release(); }
var ordered = new List<ReleaseRecord>();
var lookup = new Dictionary<string, ReleaseRecord>();
await using var cmd = db.CreateCommand("""
SELECT id, environment, image_name, status, started_at, finished_at, opc_numbers, commit_sha
FROM release_record
ORDER BY started_at DESC
LIMIT 50
""");
await using (var reader = await cmd.ExecuteReaderAsync())
{
while (await reader.ReadAsync())
{
var r = new ReleaseRecord
{
Id = reader.GetString(0),
Environment = reader.GetString(1),
ImageName = reader.GetString(2),
Status = Enum.Parse<ReleaseStatus>(reader.GetString(3)),
StartedAt = reader.GetFieldValue<DateTimeOffset>(4),
FinishedAt = reader.IsDBNull(5) ? null : reader.GetFieldValue<DateTimeOffset>(5),
OpcNumbers = reader.IsDBNull(6) ? [] : [.. reader.GetFieldValue<string[]>(6)],
CommitSha = reader.IsDBNull(7) ? null : reader.GetString(7),
};
ordered.Add(r);
lookup[r.Id] = r;
}
}
private async Task SaveReleaseAsync(ReleaseRecord record)
{
await _releaseLock.WaitAsync();
try
{
var all = LoadJson<ReleaseRecord>(_releasesPath);
var idx = all.FindIndex(r => r.Id == record.Id);
if (idx >= 0) all[idx] = record;
else all.Insert(0, record);
if (lookup.Count == 0) return [];
if (all.Count > 50) all = all[..50];
await File.WriteAllTextAsync(_releasesPath, JsonSerializer.Serialize(all, JsonOpts));
}
finally { _releaseLock.Release(); }
// Load all tenant results for the fetched release IDs in one query
await using var cmd2 = db.CreateCommand("""
SELECT release_id, subdomain, container_name, success, error
FROM release_tenant_result
WHERE release_id = ANY($1)
""");
cmd2.Parameters.Add(new NpgsqlParameter<string[]> { TypedValue = [.. lookup.Keys] });
await using var reader2 = await cmd2.ExecuteReaderAsync();
while (await reader2.ReadAsync())
{
if (lookup.TryGetValue(reader2.GetString(0), out var r))
r.Tenants.Add(new TenantReleaseResult
{
Subdomain = reader2.GetString(1),
ContainerName = reader2.GetString(2),
Success = reader2.GetBoolean(3),
Error = reader2.IsDBNull(4) ? null : reader2.GetString(4),
});
}
// ── Helpers ─────────────────────────────────────────────────────────────
private static List<T> LoadJson<T>(string path)
{
if (!File.Exists(path)) return [];
try
{
var json = File.ReadAllText(path);
return JsonSerializer.Deserialize<List<T>>(json, JsonOpts) ?? [];
return ordered;
}
catch { return []; }
/// <summary>
/// Returns the most recent succeeded release for <paramref name="environment"/>, or null if none exists.
/// Used to calculate the OPC ticket delta between releases (previousSha..currentSha).
/// </summary>
public async Task<ReleaseRecord?> GetLastSuccessfulReleaseForEnvAsync(string environment)
{
await using var cmd = db.CreateCommand("""
SELECT id, environment, image_name, status, started_at, finished_at, opc_numbers, commit_sha
FROM release_record
WHERE environment = $1 AND status = 'Succeeded'
ORDER BY started_at DESC
LIMIT 1
""");
cmd.Parameters.AddWithValue(environment);
await using var reader = await cmd.ExecuteReaderAsync();
if (!await reader.ReadAsync()) return null;
return new ReleaseRecord
{
Id = reader.GetString(0),
Environment = reader.GetString(1),
ImageName = reader.GetString(2),
Status = Enum.Parse<ReleaseStatus>(reader.GetString(3)),
StartedAt = reader.GetFieldValue<DateTimeOffset>(4),
FinishedAt = reader.IsDBNull(5) ? null : reader.GetFieldValue<DateTimeOffset>(5),
OpcNumbers = reader.IsDBNull(6) ? [] : [.. reader.GetFieldValue<string[]>(6)],
CommitSha = reader.IsDBNull(7) ? null : reader.GetString(7),
};
}
}
@@ -1,5 +1,8 @@
using System.Diagnostics;
using System.Text;
using ControlPlane.Core.Config;
using ControlPlane.Core.Messages;
using ControlPlane.Core.Models;
using Docker.DotNet;
using Docker.DotNet.Models;
using MassTransit;
@@ -85,6 +88,16 @@ public class ClarityContainerService(
{
NetworkMode = Infra.Network,
RestartPolicy = new RestartPolicy { Name = RestartPolicyKind.UnlessStopped },
// Map *.clarity.test domains to the Docker host gateway so that Clarity.Server,
// running inside a container, can reach nginx (which routes *.clarity.test).
// This is required for Keycloak OIDC discovery and JWT iss-claim validation —
// Keycloak issues tokens with iss=https://keycloak.clarity.test/realms/...
// and Clarity.Server must be able to reach that URL for OIDC metadata.
ExtraHosts =
[
$"keycloak.{Infra.Domain}:host-gateway",
$"{subdomain}.{Infra.Domain}:host-gateway",
],
},
Labels = new Dictionary<string, string>
{
@@ -92,13 +105,16 @@ public class ClarityContainerService(
["clarity.subdomain"] = subdomain,
["clarity.siteCode"] = siteCode,
["clarity.env"] = environment,
// Groups containers in Docker Desktop by environment tier (fdev / uat / prod).
["com.docker.compose.project"] = $"clarity-{environment.ToLowerInvariant()}",
["com.docker.compose.service"] = name,
},
}, cancellationToken);
// Ensure Keycloak and Vault are reachable on the managed network via their Docker DNS aliases.
// Aspire places them on its own bridge; tenant containers on clarity-net need them aliased here.
await EnsureContainerOnNetworkAsync(docker, "keycloak", Infra.Network, "keycloak", cancellationToken);
await EnsureContainerOnNetworkAsync(docker, "vault", Infra.Network, "vault", cancellationToken);
await EnsureContainerOnNetworkAsync(docker, "clarity-keycloak", Infra.Network, "keycloak", cancellationToken);
await EnsureContainerOnNetworkAsync(docker, "clarity-vault", Infra.Network, "vault", cancellationToken);
var started = await docker.Containers.StartContainerAsync(container.ID, null, cancellationToken);
if (!started)
@@ -107,6 +123,7 @@ public class ClarityContainerService(
logger.LogInformation("Started container {Name} on {Network} (image: {Image})", name, Infra.Network, ImageName);
await WriteNginxConfigAsync(subdomain, name, jobId, cancellationToken);
await WriteComposeArtifactAsync(environment, subdomain, keycloakRealm, name, cancellationToken);
return name;
}
@@ -230,14 +247,13 @@ public class ClarityContainerService(
{
using var docker = CreateClient();
// Find the nginx container by image name — Aspire appends a random suffix to the name
// so we can't rely on the static name "nginx".
// Find the nginx container by name — platform infra always uses "clarity-nginx".
var containers = await docker.Containers.ListContainersAsync(
new ContainersListParameters
{
Filters = new Dictionary<string, IDictionary<string, bool>>
{
["ancestor"] = new Dictionary<string, bool> { ["nginx"] = true }
["name"] = new Dictionary<string, bool> { ["clarity-nginx"] = true }
}
}, ct);
@@ -355,4 +371,353 @@ public class ClarityContainerService(
logger.LogWarning(ex, "Could not connect '{Container}' to '{Network}' — tenant JWT validation may fail.", containerName, network);
}
}
// ── ClientAssets / compose artifact helpers ──────────────────────────────
private string ClientAssetsFolder(string subdomain)
{
var root = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
return Path.Combine(root, subdomain);
}
/// <summary>
/// Writes a docker-compose.yml to ClientAssets/{subdomain}/ documenting the SharedPlatform
/// clarity-server deployment. The file is an audit artifact — it is NOT executed by the Worker.
/// </summary>
private async Task WriteComposeArtifactAsync(
string environment,
string subdomain,
string keycloakRealm,
string containerName,
CancellationToken ct)
{
var folder = ClientAssetsFolder(subdomain);
try
{
Directory.CreateDirectory(folder);
var content = $$$"""
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: {{{subdomain}}}
# Tier: SharedPlatform
# Generated: {{{DateTimeOffset.UtcNow:O}}}
name: clarity-{{{subdomain}}}
services:
app-{{{subdomain}}}:
image: {{{ImageName}}}
restart: unless-stopped
container_name: {{{containerName}}}
environment:
ASPNETCORE_ENVIRONMENT: Production
ASPNETCORE_URLS: http://+:8080
TenantSubdomain: {{{subdomain}}}
Keycloak__BaseUrl: {{{Infra.KeycloakPublicUrl}}}
Keycloak__InternalUrl: {{{Infra.KeycloakInternalUrl}}}
Keycloak__Realm: {{{keycloakRealm}}}
Vault__Address: {{{Infra.VaultInternalUrl}}}
# ConnectionStrings__postgresdb: (persisted in TenantRecord)
networks:
- clarity-net
extra_hosts:
- "keycloak.{{{Infra.Domain}}}:host-gateway"
- "{{{subdomain}}}.{{{Infra.Domain}}}:host-gateway"
labels:
clarity.managed: "true"
clarity.subdomain: {{{subdomain}}}
clarity.env: {{{environment}}}
networks:
clarity-net:
external: true
""";
var composePath = Path.Combine(folder, "docker-compose.yml");
await File.WriteAllTextAsync(composePath, content, ct);
logger.LogInformation("Wrote compose artifact for {Subdomain} → {Path}", subdomain, composePath);
}
catch (Exception ex)
{
// Non-fatal — the container is already running; the artifact is an audit record.
logger.LogWarning(ex, "Could not write compose artifact for {Subdomain}.", subdomain);
}
}
// ── OwnContainer — sidecar lifecycle ─────────────────────────────────────
/// <summary>
/// OwnContainer tier — generates a per-tenant docker-compose.yml for sidecar services
/// (Keycloak, Vault, Postgres, MinIO as elected by StackConfig), writes it to
/// ClientAssets/{subdomain}/docker-compose.yml, and runs <c>docker compose up -d</c>.
/// Returns the absolute path to the compose file.
/// </summary>
public async Task<string> GenerateAndRunSidecarsAsync(
ProvisioningJob job,
Dictionary<string, ResolvedEndpoint> topology,
CancellationToken ct)
{
var folder = ClientAssetsFolder(job.Subdomain);
Directory.CreateDirectory(folder);
var content = BuildSidecarCompose(job);
var composePath = Path.Combine(folder, "docker-compose.yml");
await File.WriteAllTextAsync(composePath, content, ct);
logger.LogInformation("[{JobId}] Wrote sidecar compose → {Path}", job.Id, composePath);
await RunDockerComposeAsync(composePath, "up -d", job.Id, ct);
logger.LogInformation("[{JobId}] Sidecar containers started.", job.Id);
return composePath;
}
/// <summary>
/// After sidecars are started, inspects each OwnContainer component's Docker container
/// to resolve its ephemeral host port, then rewrites the topology AdminUrl to
/// <c>http://localhost:{hostPort}</c> so downstream saga steps can call admin APIs.
/// </summary>
public async Task UpdateTopologyWithHostPortsAsync(
Dictionary<string, ResolvedEndpoint> topology,
CancellationToken ct)
{
using var docker = CreateClient();
foreach (var (component, endpoint) in topology.ToList())
{
if (endpoint.Mode != ComponentMode.OwnContainer) continue;
if (string.IsNullOrWhiteSpace(endpoint.ContainerName)) continue;
try
{
var inspect = await docker.Containers.InspectContainerAsync(endpoint.ContainerName, ct);
var firstBinding = inspect.NetworkSettings.Ports
.SelectMany(p => p.Value ?? [])
.FirstOrDefault(b => !string.IsNullOrWhiteSpace(b.HostPort));
if (firstBinding is not null)
{
topology[component] = endpoint with { AdminUrl = $"http://localhost:{firstBinding.HostPort}" };
logger.LogInformation("Resolved {Component} host port → {Url}", component, topology[component].AdminUrl);
}
else
{
logger.LogWarning("No host port binding found for {Component} container {Name}.", component, endpoint.ContainerName);
}
}
catch (Exception ex)
{
logger.LogWarning(ex, "Could not resolve host port for {Component} container {Name}.", component, endpoint.ContainerName);
}
}
}
/// <summary>
/// Tears down all sidecar containers for a tenant by running
/// <c>docker compose down -v</c> against the stored compose file.
/// Called from InfrastructureProvisioningStep.CompensateAsync.
/// </summary>
public async Task TearDownComposeProjectAsync(string subdomain, CancellationToken ct)
{
var composePath = Path.Combine(ClientAssetsFolder(subdomain), "docker-compose.yml");
if (!File.Exists(composePath))
{
logger.LogWarning("No compose file found for {Subdomain} — nothing to tear down.", subdomain);
return;
}
await RunDockerComposeAsync(composePath, "down -v", Guid.Empty, ct);
logger.LogInformation("Tore down sidecar containers for {Subdomain}.", subdomain);
}
/// <summary>
/// Builds the docker-compose YAML content for OwnContainer sidecar services.
/// Services are included conditionally based on StackConfig. clarity-net is
/// declared as an external network so all sidecars join the shared platform network.
///
/// All services include <c>extra_hosts: host-gateway</c> entries for *.clarity.test so that
/// intra-container calls that go through nginx (e.g. OIDC discovery) route correctly.
/// </summary>
private string BuildSidecarCompose(ProvisioningJob job)
{
var s = job.Subdomain;
var stack = job.StackConfig;
var sb = new StringBuilder();
sb.AppendLine($"""
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: {s} | Tier: {job.Tier}
# Generated: {DateTimeOffset.UtcNow:O}
name: clarity-{s}
services:
""");
// ── Postgres ──────────────────────────────────────────────────────────
if (stack.Postgres == ComponentMode.OwnContainer)
{
sb.AppendLine($$"""
pg-{{s}}:
image: postgres:16
restart: unless-stopped
environment:
POSTGRES_USER: clarity
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-clarity-dev}
POSTGRES_DB: clarity
expose:
- "5432"
ports:
- "127.0.0.1::5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U clarity"]
interval: 10s
timeout: 5s
retries: 5
networks:
- clarity-net
labels:
clarity.managed: "true"
clarity.subdomain: {{s}}
clarity.component: postgres
""");
}
// ── Keycloak ──────────────────────────────────────────────────────────
if (stack.Keycloak == ComponentMode.OwnContainer)
{
var kcHostname = $"kc.{s}.{Infra.Domain}";
var dependsBlock = stack.Postgres == ComponentMode.OwnContainer
? $"""
depends_on:
pg-{s}:
condition: service_healthy
"""
: string.Empty;
sb.AppendLine($$"""
kc-{{s}}:
image: quay.io/keycloak/keycloak:latest
restart: unless-stopped
command: start-dev
environment:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD:-admin}
KC_DB: postgres
KC_DB_URL_HOST: pg-{{s}}
KC_DB_URL_DATABASE: keycloak
KC_DB_USERNAME: clarity
KC_DB_PASSWORD: ${POSTGRES_PASSWORD:-clarity-dev}
KC_HOSTNAME: {{kcHostname}}
KC_HOSTNAME_STRICT: "false"
KC_HTTP_ENABLED: "true"
expose:
- "8080"
ports:
- "127.0.0.1::8080"
networks:
- clarity-net
extra_hosts:
- "{{kcHostname}}:host-gateway"
{{dependsBlock}}
labels:
clarity.managed: "true"
clarity.subdomain: {{s}}
clarity.component: keycloak
""");
}
// ── Vault ─────────────────────────────────────────────────────────────
if (stack.Vault == ComponentMode.OwnContainer)
{
sb.AppendLine($$"""
vault-{{s}}:
image: hashicorp/vault:latest
restart: unless-stopped
cap_add:
- IPC_LOCK
environment:
VAULT_DEV_ROOT_TOKEN_ID: ${VAULT_TOKEN:-vault-dev-root}
VAULT_DEV_LISTEN_ADDRESS: "0.0.0.0:8200"
expose:
- "8200"
ports:
- "127.0.0.1::8200"
networks:
- clarity-net
labels:
clarity.managed: "true"
clarity.subdomain: {{s}}
clarity.component: vault
""");
}
// ── MinIO ─────────────────────────────────────────────────────────────
if (stack.Minio == ComponentMode.OwnContainer)
{
sb.AppendLine($$"""
minio-{{s}}:
image: minio/minio:latest
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minio}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minio-dev}
expose:
- "9000"
- "9001"
ports:
- "127.0.0.1::9000"
- "127.0.0.1::9001"
networks:
- clarity-net
labels:
clarity.managed: "true"
clarity.subdomain: {{s}}
clarity.component: minio
""");
}
sb.AppendLine("""
networks:
clarity-net:
external: true
""");
return sb.ToString();
}
/// <summary>
/// Runs <c>docker compose -f {composePath} {args}</c> as a child process.
/// Streams stdout/stderr to the logger and throws on non-zero exit.
/// </summary>
private async Task RunDockerComposeAsync(string composePath, string args, Guid jobId, CancellationToken ct)
{
var psi = new ProcessStartInfo("docker")
{
Arguments = $"compose -f \"{composePath}\" {args}",
WorkingDirectory = Path.GetDirectoryName(composePath)!,
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false,
};
using var process = Process.Start(psi)
?? throw new InvalidOperationException("Failed to start docker compose process.");
var stdoutTask = process.StandardOutput.ReadToEndAsync(ct);
var stderrTask = process.StandardError.ReadToEndAsync(ct);
await process.WaitForExitAsync(ct);
var stdout = await stdoutTask;
var stderr = await stderrTask;
if (!string.IsNullOrWhiteSpace(stdout))
logger.LogInformation("[docker compose] {Output}", stdout.Trim());
if (!string.IsNullOrWhiteSpace(stderr))
logger.LogInformation("[docker compose stderr] {Output}", stderr.Trim());
if (process.ExitCode != 0)
throw new InvalidOperationException(
$"'docker compose {args}' exited with code {process.ExitCode}. See logs for details.");
}
}
@@ -28,6 +28,28 @@ public class KeycloakAdminClient
_http = new HttpClient { BaseAddress = new Uri(_baseUrl) };
}
/// <summary>
/// Creates a KeycloakAdminClient for a specific base URL and credentials.
/// Used by KeycloakStep to target SharedPlatform or OwnContainer Keycloak instances
/// using the resolved topology rather than static DI configuration.
/// </summary>
public static KeycloakAdminClient ForUrl(
string adminUrl, string adminUser, string adminPassword,
ILogger<KeycloakAdminClient> logger)
=> new(adminUrl, adminUser, adminPassword, logger);
private KeycloakAdminClient(
string adminUrl, string adminUser, string adminPassword,
ILogger<KeycloakAdminClient> logger)
{
_logger = logger;
_baseUrl = adminUrl.TrimEnd('/');
_adminUser = adminUser;
_adminPassword = adminPassword;
_logger.LogInformation("KeycloakAdminClient base URL: {Url}, user: {User}", _baseUrl, _adminUser);
_http = new HttpClient { BaseAddress = new Uri(_baseUrl) };
}
private async Task AuthorizeAsync(CancellationToken ct)
{
var form = new FormUrlEncodedContent(new Dictionary<string, string>
@@ -0,0 +1,108 @@
using ControlPlane.Core.Config;
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using ControlPlane.Worker.Services;
using Microsoft.Extensions.Options;
namespace ControlPlane.Worker.Steps;
/// <summary>
/// First saga step — maps to "Infrastructure Provisioning" in the frontend step tracker.
///
/// SharedPlatform tenants:
/// Probes Keycloak and Vault health endpoints so the saga fails fast with a clear,
/// actionable message if infra/docker-compose.yml isn't running — rather than timing
/// out inside KeycloakStep with a cryptic connection refused.
///
/// OwnContainer tenants (Dedicated / Enterprise tiers):
/// Generates a per-tenant docker-compose.yml to ClientAssets/{subdomain}/,
/// starts all sidecar containers (Keycloak, Vault, Postgres, MinIO as elected),
/// then resolves the ephemeral host ports into SagaContext.ResolvedTopology so
/// downstream steps (KeycloakStep etc.) can call sidecar admin APIs from the host.
/// </summary>
public class InfrastructureProvisioningStep(
ClarityContainerService containers,
IConfiguration config,
IOptions<ClarityInfraOptions> infraOptions,
ILogger<InfrastructureProvisioningStep> logger) : ISagaStep
{
public string StepName => "Infrastructure Provisioning";
public async Task ExecuteAsync(SagaContext context, CancellationToken ct)
{
var job = context.Job;
var allSharedPlatform =
job.StackConfig.Keycloak == ComponentMode.SharedPlatform &&
job.StackConfig.Vault == ComponentMode.SharedPlatform &&
job.StackConfig.Postgres == ComponentMode.SharedPlatform &&
job.StackConfig.Minio == ComponentMode.SharedPlatform;
if (allSharedPlatform)
{
logger.LogInformation("[{JobId}] SharedPlatform tier — verifying platform services are reachable.", job.Id);
await VerifySharedPlatformAsync(context, ct);
}
else
{
logger.LogInformation("[{JobId}] OwnContainer tier — generating compose manifest and starting sidecars.", job.Id);
var composeFile = await containers.GenerateAndRunSidecarsAsync(job, context.ResolvedTopology, ct);
context.ComposeFilePath = composeFile;
await containers.UpdateTopologyWithHostPortsAsync(context.ResolvedTopology, ct);
logger.LogInformation("[{JobId}] Sidecars started. Compose file: {File}", job.Id, composeFile);
}
context.Job.CompletedSteps |= CompletedSteps.InfrastructureProvisioned;
}
public async Task CompensateAsync(SagaContext context, CancellationToken ct)
{
if (!string.IsNullOrWhiteSpace(context.ComposeFilePath))
{
logger.LogWarning("[{JobId}] Compensating: tearing down sidecar containers.", context.Job.Id);
await containers.TearDownComposeProjectAsync(context.Job.Subdomain, ct);
}
}
// ── SharedPlatform health probes ─────────────────────────────────────────
private async Task VerifySharedPlatformAsync(SagaContext context, CancellationToken ct)
{
using var http = new HttpClient { Timeout = TimeSpan.FromSeconds(10) };
var kcUrl = context.ResolvedTopology.TryGetValue("Keycloak", out var kc)
? kc.AdminUrl : "http://localhost:8080";
var vaultUrl = context.ResolvedTopology.TryGetValue("Vault", out var vault)
? vault.AdminUrl : "http://localhost:8200";
await ProbeAsync(http, $"{kcUrl}/health/ready", "Keycloak", ct);
// Vault returns non-200 on sealed/standby — any HTTP response means it's running
await ProbeAsync(http, $"{vaultUrl}/v1/sys/health", "Vault", ct, acceptAnyHttpResponse: true);
}
private async Task ProbeAsync(
HttpClient http, string url, string serviceName, CancellationToken ct,
bool acceptAnyHttpResponse = false)
{
try
{
var resp = await http.GetAsync(url, ct);
if (!acceptAnyHttpResponse && !resp.IsSuccessStatusCode)
throw new InvalidOperationException(
$"{serviceName} at {url} returned HTTP {(int)resp.StatusCode}. Is it healthy?");
logger.LogInformation("{Service} is reachable at {Url} ({Status}).",
serviceName, url, (int)resp.StatusCode);
}
catch (HttpRequestException ex)
{
throw new InvalidOperationException(
$"{serviceName} is not reachable at {url}. " +
$"Run `docker compose up -d` from OPC/infra/ before provisioning. ({ex.Message})", ex);
}
catch (TaskCanceledException ex) when (!ct.IsCancellationRequested)
{
throw new InvalidOperationException(
$"{serviceName} health check at {url} timed out. Is the service running?", ex);
}
}
}
+13 -2
View File
@@ -41,6 +41,17 @@ public class KeycloakStep(
}, cancellationToken);
// clarity-web-app: public OIDC client used by the React frontend.
// fdev is a developer dogfood environment — allow localhost redirect URIs so that a
// local Aspire dev loop (any port) can complete the OIDC flow against the shared
// OPC infra Keycloak without any post-provisioning patching.
var isFdev = string.Equals(context.Job.Environment, "fdev", StringComparison.OrdinalIgnoreCase);
var redirectUris = isFdev
? new[] { $"{tenantOrigin}/*", "http://localhost:*/*", "http://*.dev.localhost:*/*" }
: new[] { $"{tenantOrigin}/*" };
var webOrigins = isFdev
? "+" // match all valid redirect URI origins
: tenantOrigin;
await adminClient.CreateClientAsync(realmId, new
{
clientId = "clarity-web-app",
@@ -51,8 +62,8 @@ public class KeycloakStep(
directAccessGrantsEnabled = false,
rootUrl = tenantOrigin,
baseUrl = "/",
redirectUris = new[] { $"{tenantOrigin}/*" },
webOrigins = new[] { tenantOrigin },
redirectUris,
webOrigins = new[] { webOrigins },
}, cancellationToken);
// Ensure tokens issued by clarity-web-app include "clarity-rest-api" in the `aud` claim
+1 -1
View File
@@ -32,7 +32,7 @@ public class LaunchStep(
subdomain: job.Subdomain,
keycloakRealm: $"clarity-{job.Subdomain.ToLowerInvariant()}",
postgresConnectionString: context.TenantConnectionString,
vaultToken: ReadVaultToken(config),
vaultToken: context.VaultToken ?? ReadVaultToken(config),
jobId: job.Id,
cancellationToken: cancellationToken);
+4 -4
View File
@@ -20,10 +20,10 @@ public class MigrationStep(
var job = context.Job;
var dbName = TenantDbName(job.Subdomain);
var adminConnStr = config.GetConnectionString("postgres")
var adminConnStr = config.GetConnectionString("platformdb")
?? throw new InvalidOperationException(
"ConnectionStrings:postgres is missing. " +
"Ensure ControlPlane.Worker has .WithReference(postgres) in AppHost.");
"ConnectionStrings:platformdb is missing. " +
"Ensure ControlPlane.Worker appsettings.json has a platformdb connection string.");
logger.LogInformation("[{JobId}] Provisioning database '{Db}'.", job.Id, dbName);
await CreateDatabaseIfNotExistsAsync(adminConnStr, dbName, cancellationToken);
@@ -44,7 +44,7 @@ public class MigrationStep(
if (string.IsNullOrWhiteSpace(context.TenantConnectionString)) return;
var dbName = TenantDbName(context.Job.Subdomain);
var adminConnStr = config.GetConnectionString("postgres");
var adminConnStr = config.GetConnectionString("platformdb");
if (string.IsNullOrWhiteSpace(adminConnStr)) return;
logger.LogWarning("[{JobId}] Compensating: dropping database '{Db}'.", context.Job.Id, dbName);
+105 -23
View File
@@ -1,6 +1,9 @@
using ControlPlane.Core.Interfaces;
using ControlPlane.Core.Models;
using System.Net.Http.Headers;
using System.Text;
using System.Text.Json;
using System.Text.Json.Nodes;
namespace ControlPlane.Worker.Steps;
@@ -8,38 +11,117 @@ public class VaultStep(ILogger<VaultStep> logger, IConfiguration config) : ISaga
{
public string StepName => "Cryptographic Pre-Flight (Vault)";
public Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
// Policy grants the tenant token exactly the three Transit operations Clarity.Server needs:
// GenerateTenantKEKAsync → datakey/plaintext (first boot only)
// DecryptTenantKEKAsync → decrypt (every restart)
// RewrapTenantKEKAsync → rewrap (key rotation)
private const string PolicyTemplate = """
path "clarity-transit/datakey/plaintext/master-key" {
capabilities = ["update"]
}
path "clarity-transit/decrypt/master-key" {
capabilities = ["update"]
}
path "clarity-transit/rewrap/master-key" {
capabilities = ["update"]
}
""";
public async Task ExecuteAsync(SagaContext context, CancellationToken cancellationToken)
{
// TODO: VaultSharp
// 1. Assert Transit engine is active and healthy
// 2. Derive/validate TenantContextId (e.g. FL_COM_001)
// 3. Register TenantContextId in a KV entry or TenantRegistry table
// so Clarity.Server can resolve the derivation path later
//
// Root token is read at runtime from the persisted init.json on the Vault volume:
// var token = ReadRootToken();
logger.LogInformation("[{JobId}] Vault step is a stub - VaultSharp not yet wired.", context.Job.Id);
var rootToken = ReadRootToken();
var vaultAddr = (config["Vault:Address"] ?? "http://localhost:8200").TrimEnd('/');
var subdomain = context.Job.Subdomain.ToLowerInvariant();
var policyName = $"clarity-tenant-{subdomain}";
using var http = new HttpClient { BaseAddress = new Uri(vaultAddr) };
http.DefaultRequestHeaders.Add("X-Vault-Token", rootToken);
// ── 1. Assert Transit engine + master-key are healthy ─────────────────
logger.LogInformation("[{JobId}] Verifying Vault Transit engine and master-key.", context.Job.Id);
var healthRes = await http.GetAsync("v1/clarity-transit/keys/master-key", cancellationToken);
if (!healthRes.IsSuccessStatusCode)
throw new InvalidOperationException(
$"Vault Transit master-key not found at {vaultAddr}. " +
"Ensure OPC infra is running and the entrypoint has bootstrapped Vault.");
// ── 2. Upsert per-tenant policy (idempotent PUT) ──────────────────────
logger.LogInformation("[{JobId}] Writing Vault policy '{Policy}'.", context.Job.Id, policyName);
var policyBody = JsonSerializer.Serialize(new { policy = PolicyTemplate });
var policyRes = await http.PutAsync(
$"v1/sys/policies/acl/{policyName}",
new StringContent(policyBody, Encoding.UTF8, "application/json"),
cancellationToken);
policyRes.EnsureSuccessStatusCode();
// ── 3. Create scoped periodic token bound to tenant policy ────────────
logger.LogInformation("[{JobId}] Creating scoped Vault token for policy '{Policy}'.", context.Job.Id, policyName);
var tokenBody = JsonSerializer.Serialize(new
{
policies = new[] { policyName },
period = "72h",
renewable = true,
metadata = new Dictionary<string, string>
{
["tenant"] = subdomain,
["createdBy"] = "ControlPlane.Worker",
},
});
var tokenRes = await http.PostAsync(
"v1/auth/token/create",
new StringContent(tokenBody, Encoding.UTF8, "application/json"),
cancellationToken);
tokenRes.EnsureSuccessStatusCode();
var tokenJson = JsonNode.Parse(await tokenRes.Content.ReadAsStringAsync(cancellationToken))!;
context.VaultToken = tokenJson["auth"]!["client_token"]!.GetValue<string>();
context.VaultTokenAccessor = tokenJson["auth"]!["accessor"]!.GetValue<string>();
logger.LogInformation("[{JobId}] Vault step complete. Token accessor: {Accessor}",
context.Job.Id, context.VaultTokenAccessor);
context.Job.CompletedSteps |= CompletedSteps.VaultVerified;
return Task.CompletedTask;
}
public Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
public async Task CompensateAsync(SagaContext context, CancellationToken cancellationToken)
{
logger.LogInformation("[{JobId}] Vault step: no compensation needed.", context.Job.Id);
return Task.CompletedTask;
if (string.IsNullOrWhiteSpace(context.VaultTokenAccessor)) return;
logger.LogWarning("[{JobId}] Compensating Vault — revoking token accessor {Accessor}.",
context.Job.Id, context.VaultTokenAccessor);
try
{
var rootToken = ReadRootToken();
var vaultAddr = (config["Vault:Address"] ?? "http://localhost:8200").TrimEnd('/');
using var http = new HttpClient { BaseAddress = new Uri(vaultAddr) };
http.DefaultRequestHeaders.Add("X-Vault-Token", rootToken);
var body = JsonSerializer.Serialize(new { accessor = context.VaultTokenAccessor });
await http.PostAsync(
"v1/auth/token/revoke-accessor",
new StringContent(body, Encoding.UTF8, "application/json"),
cancellationToken);
}
catch (Exception ex)
{
logger.LogError(ex, "[{JobId}] Failed to revoke Vault token accessor {Accessor} during compensation.",
context.Job.Id, context.VaultTokenAccessor);
}
}
/// <summary>
/// Reads the root token from the init.json written by the Vault entrypoint on first boot.
/// Path is injected via Vault__KeysFile config.
/// </summary>
internal string ReadRootToken()
{
var path = config["Vault__KeysFile"]
?? throw new InvalidOperationException("Vault__KeysFile is not configured.");
var path = config["Vault:KeysFile"] ?? config["Vault__KeysFile"];
if (!string.IsNullOrWhiteSpace(path) && File.Exists(path))
{
using var doc = JsonDocument.Parse(File.ReadAllText(path));
return doc.RootElement.GetProperty("root_token").GetString()
?? throw new InvalidOperationException("root_token not found in Vault init.json.");
if (doc.RootElement.TryGetProperty("root_token", out var tok))
return tok.GetString()!;
}
return config["Vault:Token"]
?? throw new InvalidOperationException(
"Cannot resolve Vault root token: neither Vault:KeysFile nor Vault:Token is configured.");
}
}
@@ -0,0 +1,5 @@
{
"Vault": {
"KeysFile": "C:\\Users\\amadzarak\\source\\repos\\ClarityStack\\OPC\\infra\\vault\\data\\init.json"
}
}
+56
View File
@@ -0,0 +1,56 @@
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.Hosting.Lifetime": "Information"
}
},
// ── Keycloak ──────────────────────────────────────────────────────────────────
// Worker runs on the host machine → use localhost URLs for admin API calls.
// These are the shared platform Keycloak credentials from infra/docker-compose.yml.
// Aspire no longer injects these — they live here.
"Keycloak": {
"AuthServerUrl": "http://localhost:8080",
"AdminUser": "admin",
"AdminPassword": "Admin1234!",
"Realm": "master",
"Resource": "admin-cli"
},
// ── Vault ─────────────────────────────────────────────────────────────────────
// Worker uses localhost:8200 for admin calls.
// Vault:KeysFile is machine-specific → set in appsettings.Development.json.
"Vault": {
"Address": "http://localhost:8200",
"ContainerAddress": "http://vault:8200"
},
// ── ClarityInfraOptions (Clarity section) ─────────────────────────────────────
// These values describe what gets injected INTO tenant containers at docker run time.
// Containers live on clarity-net → use Docker DNS names (keycloak, vault, postgres).
// Nginx/dnsmasq surface these at public DNS names for the browser.
"Clarity": {
"Domain": "clarity.test",
"Network": "clarity-net",
"KeycloakPublicUrl": "https://keycloak.clarity.test",
"KeycloakInternalUrl": "http://keycloak:8080",
"VaultInternalUrl": "http://vault:8200",
"NginxCertPath": "/etc/nginx/certs/clarity.test.crt",
"NginxCertKeyPath": "/etc/nginx/certs/clarity.test.key"
},
// ── Docker ───────────────────────────────────────────────────────────────────
"Docker": {
"Socket": "npipe://./pipe/docker_engine",
"ClarityServerImage": "clarity-server:latest"
},
// ── Connection strings ────────────────────────────────────────────────────────
// platformdb: the shared infra postgres from infra/docker-compose.yml.
// Worker connects on localhost:5432 for tenant DB provisioning (MigrationStep).
// Aspire-managed opcdb (port 5433) is injected separately by AppHost via .WithReference.
"ConnectionStrings": {
"platformdb": "Host=localhost;Port=5432;Username=postgres;Password=postgres"
}
}
+59 -1
View File
@@ -13,7 +13,8 @@
"highlight.js": "^11.11.1",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-multistep": "^7.0.0"
"react-multistep": "^7.0.0",
"react-router-dom": "^7.14.2"
},
"devDependencies": {
"@eslint/js": "^9.39.4",
@@ -1710,6 +1711,19 @@
"dev": true,
"license": "MIT"
},
"node_modules/cookie": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
"integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
@@ -3043,6 +3057,44 @@
"react-dom": "^16.8.0 || ^17 || ^18"
}
},
"node_modules/react-router": {
"version": "7.14.2",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-7.14.2.tgz",
"integrity": "sha512-yCqNne6I8IB6rVCH7XUvlBK7/QKyqypBFGv+8dj4QBFJiiRX+FG7/nkdAvGElyvVZ/HQP5N19wzteuTARXi5Gw==",
"license": "MIT",
"dependencies": {
"cookie": "^1.0.1",
"set-cookie-parser": "^2.6.0"
},
"engines": {
"node": ">=20.0.0"
},
"peerDependencies": {
"react": ">=18",
"react-dom": ">=18"
},
"peerDependenciesMeta": {
"react-dom": {
"optional": true
}
}
},
"node_modules/react-router-dom": {
"version": "7.14.2",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.14.2.tgz",
"integrity": "sha512-YZcM5ES8jJSM+KrJ9BdvHHqlnGTg5tH3sC5ChFRj4inosKctdyzBDhOyyHdGk597q2OT6NTrCA1OvB/YDwfekQ==",
"license": "MIT",
"dependencies": {
"react-router": "7.14.2"
},
"engines": {
"node": ">=20.0.0"
},
"peerDependencies": {
"react": ">=18",
"react-dom": ">=18"
}
},
"node_modules/react-transition-group": {
"version": "4.4.5",
"resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz",
@@ -3140,6 +3192,12 @@
"upper-case-first": "^2.0.2"
}
},
"node_modules/set-cookie-parser": {
"version": "2.7.2",
"resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
"integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
"license": "MIT"
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+2 -1
View File
@@ -15,7 +15,8 @@
"highlight.js": "^11.11.1",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-multistep": "^7.0.0"
"react-multistep": "^7.0.0",
"react-router-dom": "^7.14.2"
},
"devDependencies": {
"@eslint/js": "^9.39.4",
+38 -30
View File
@@ -1,6 +1,6 @@
import '@blueprintjs/core/lib/css/blueprint.css';
import './App.css';
import { useState } from 'react';
import { Navigate, Route, Routes, useLocation, useNavigate } from 'react-router-dom';
import { Menu, MenuItem, MenuDivider } from '@blueprintjs/core';
import DashboardPage from './pages/DashboardPage';
import PipelinesPage from './pages/PipelinesPage';
@@ -11,12 +11,12 @@ import OpcPage from './opc/OpcPage';
import InfraPage from './pages/InfraPage';
import ChangesetsPage from './pages/ChangesetsPage';
function App() {
const [activeNav, setActiveNav] = useState('opc');
function Sidebar() {
const navigate = useNavigate();
const { pathname } = useLocation();
const at = (path: string) => pathname === path || pathname.startsWith(path + '/');
return (
<div className="cp-shell">
{/* ── Sidebar ── */}
<aside className="cp-sidebar">
<div className="cp-sidebar-brand">
<span className="brand-mark">CP</span>
@@ -25,17 +25,17 @@ function App() {
<div className="cp-sidebar-nav">
<Menu className="cp-sidebar-menu">
<MenuItem icon="cloud-upload" text="Deployments" active={activeNav === 'deployments'} onClick={() => setActiveNav('deployments')} />
<MenuItem icon="git-branch" text="Pipelines" active={activeNav === 'pipelines'} onClick={() => setActiveNav('pipelines')} />
<MenuItem icon="git-merge" text="Branch Ladder" active={activeNav === 'branches'} onClick={() => setActiveNav('branches')} />
<MenuItem icon="build" text="Image Build" active={activeNav === 'image-build'} onClick={() => setActiveNav('image-build')} />
<MenuItem icon="pulse" text="Build Monitor" active={activeNav === 'build-monitor'} onClick={() => setActiveNav('build-monitor')} />
<MenuItem icon="cloud-upload" text="Deployments" active={at('/deployments')} onClick={() => navigate('/deployments')} />
<MenuItem icon="git-branch" text="Pipelines" active={at('/pipelines')} onClick={() => navigate('/pipelines')} />
<MenuItem icon="git-merge" text="Branch Ladder" active={at('/branches')} onClick={() => navigate('/branches')} />
<MenuItem icon="build" text="Image Build" active={at('/image-build')} onClick={() => navigate('/image-build')} />
<MenuItem icon="pulse" text="Build Monitor" active={at('/build-monitor')} onClick={() => navigate('/build-monitor')} />
<MenuDivider />
<MenuItem icon="heat-grid" text="Infrastructure" active={activeNav === 'infra'} onClick={() => setActiveNav('infra')} />
<MenuItem icon="clipboard" text="OPC" active={activeNav === 'opc'} onClick={() => setActiveNav('opc')} />
<MenuItem icon="history" text="Changesets" active={activeNav === 'changesets'} onClick={() => setActiveNav('changesets')} />
<MenuItem icon="people" text="Clients" active={activeNav === 'clients'} onClick={() => setActiveNav('clients')} />
<MenuItem icon="cog" text="Settings" active={activeNav === 'settings'} onClick={() => setActiveNav('settings')} />
<MenuItem icon="heat-grid" text="Infrastructure" active={at('/infra')} onClick={() => navigate('/infra')} />
<MenuItem icon="clipboard" text="OPC" active={at('/opc')} onClick={() => navigate('/opc')} />
<MenuItem icon="history" text="Changesets" active={at('/changesets')} onClick={() => navigate('/changesets')} />
<MenuItem icon="people" text="Clients" active={at('/clients')} onClick={() => navigate('/clients')} />
<MenuItem icon="cog" text="Settings" active={at('/settings')} onClick={() => navigate('/settings')} />
</Menu>
</div>
@@ -49,21 +49,6 @@ function App() {
</div>
</div>
</aside>
{/* ── Main content ── */}
<main className="cp-main">
{activeNav === 'deployments' && <DashboardPage />}
{activeNav === 'pipelines' && <PipelinesPage />}
{activeNav === 'branches' && <BranchPage />}
{activeNav === 'image-build' && <ImageBuildPage />}
{activeNav === 'build-monitor' && <BuildMonitorPage />}
{activeNav === 'infra' && <InfraPage />}
{activeNav === 'opc' && <OpcPage />}
{activeNav === 'changesets' && <ChangesetsPage />}
{activeNav === 'clients' && <PlaceholderPage title="Clients" />}
{activeNav === 'settings' && <PlaceholderPage title="Settings" />}
</main>
</div>
);
}
@@ -76,4 +61,27 @@ function PlaceholderPage({ title }: { title: string }) {
);
}
function App() {
return (
<div className="cp-shell">
<Sidebar />
<main className="cp-main">
<Routes>
<Route path="/" element={<Navigate to="/opc" replace />} />
<Route path="/deployments" element={<DashboardPage />} />
<Route path="/pipelines" element={<PipelinesPage />} />
<Route path="/branches" element={<BranchPage />} />
<Route path="/image-build" element={<ImageBuildPage />} />
<Route path="/build-monitor" element={<BuildMonitorPage />} />
<Route path="/infra" element={<InfraPage />} />
<Route path="/opc" element={<OpcPage />} />
<Route path="/changesets" element={<ChangesetsPage />} />
<Route path="/clients" element={<PlaceholderPage title="Clients" />} />
<Route path="/settings" element={<PlaceholderPage title="Settings" />} />
</Routes>
</main>
</div>
);
}
export default App;
+49
View File
@@ -0,0 +1,49 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export interface ProjectDefinition {
name: string;
kind: 'DotnetProject' | 'NpmProject' | 'SolutionBuild';
relativePath: string;
}
export interface BuildRecord {
id: string;
kind: 'DockerImage' | 'DotnetProject' | 'NpmProject' | 'SolutionBuild';
target: string;
status: 'Running' | 'Succeeded' | 'Failed';
startedAt: string;
finishedAt?: string;
durationMs?: number;
commitSha?: string;
log: string[];
}
export async function getProjects(): Promise<ProjectDefinition[]> {
const res = await fetch(`${BASE_URL}/api/builds/projects`);
if (!res.ok) throw new Error(`Failed to get projects: ${res.statusText}`);
return res.json();
}
export async function getBuildHistory(): Promise<BuildRecord[]> {
const res = await fetch(`${BASE_URL}/api/builds/history`);
if (!res.ok) throw new Error(`Failed to get build history: ${res.statusText}`);
return res.json();
}
export function triggerProjectBuild(
projectName: string,
onLine: (line: string) => void,
onDone: (record: BuildRecord) => void,
onError: (err: Event) => void,
): EventSource {
const source = new EventSource(`${BASE_URL}/api/builds/${encodeURIComponent(projectName)}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.build) { onDone(msg.build as BuildRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
+18
View File
@@ -0,0 +1,18 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export interface GitCommit {
hash: string;
shortHash: string;
author: string;
date: string;
subject: string;
files: string[];
}
export async function getGitLog(path?: string, limit = 20): Promise<GitCommit[]> {
const params = new URLSearchParams({ limit: String(limit) });
if (path) params.set('path', path);
const res = await fetch(`${BASE_URL}/api/git/log?${params}`);
if (!res.ok) throw new Error(`Failed to get git log: ${res.statusText}`);
return res.json();
}
+51
View File
@@ -0,0 +1,51 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export interface ImageBuildStatus {
imageName: string | null;
builtAt: string | null;
lastMessage: string;
isBuilding: boolean;
}
export interface BuildHistoryRecord {
id: string;
status: 'Running' | 'Succeeded' | 'Failed';
startedAt: string;
durationMs: number | null;
commitSha: string | null;
imageDigest: string | null;
}
export async function getImageStatus(): Promise<ImageBuildStatus> {
const res = await fetch(`${BASE_URL}/api/image/status`);
if (!res.ok) throw new Error(`Failed to get image status: ${res.statusText}`);
return res.json();
}
export async function getImageBuildHistory(limit = 30): Promise<BuildHistoryRecord[]> {
const res = await fetch(`${BASE_URL}/api/image/history?limit=${limit}`);
if (!res.ok) throw new Error(`Failed to get build history: ${res.statusText}`);
return res.json();
}
export function triggerImageBuild(
onLine: (line: string) => void,
onDone: (success: boolean) => void,
onError: (err: Event) => void,
): EventSource {
const source = new EventSource(`${BASE_URL}/api/image/build-stream`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done) { onDone(true); source.close(); }
else if (msg.line) onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onDone(false); onError(e); };
return source;
}
export async function startImageBuild(): Promise<void> {
const res = await fetch(`${BASE_URL}/api/image/build`, { method: 'POST' });
if (!res.ok) throw new Error(`Build trigger failed: ${res.statusText}`);
}
+20
View File
@@ -36,6 +36,26 @@ export function streamComposeUp(onLine: (line: string) => void, onDone: () => vo
return src;
}
/** Force-recreates all containers and removes orphans — fixes name-conflict errors. */
export function streamComposeForceUp(onLine: (line: string) => void, onDone: () => void): EventSource {
const src = new EventSource(`${BASE_URL}/api/infra/compose/up-force/stream`);
src.onmessage = (e) => onLine(e.data);
src.onerror = () => { onDone(); src.close(); };
return src;
}
/**
* Nuke & Recreate — force-removes every known platform container by name first
* (kills orphans that --remove-orphans won't touch), then runs compose up fresh.
* Use this when Force Recreate still fails with "container name already in use".
*/
export function streamComposeNuke(onLine: (line: string) => void, onDone: () => void): EventSource {
const src = new EventSource(`${BASE_URL}/api/infra/compose/nuke/stream`);
src.onmessage = (e) => onLine(e.data);
src.onerror = () => { onDone(); src.close(); };
return src;
}
export function streamComposeDown(onLine: (line: string) => void, onDone: () => void): EventSource {
const src = new EventSource(`${BASE_URL}/api/infra/compose/down/stream`);
src.onmessage = (e) => onLine(e.data);
+1 -1
View File
@@ -328,7 +328,7 @@ export async function listGiteaBranches(repoKey?: string): Promise<GiteaBranch[]
export async function createGiteaBranch(
opcNumber: string,
opcTitle: string,
from = 'master',
from = 'main',
): Promise<GiteaBranch> {
const res = await fetch(`${BASE_URL}/api/gitea/branches`, {
method: 'POST',
@@ -0,0 +1,220 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export interface CommitInfo {
sha: string;
shortSha: string;
message: string;
author: string;
date: string;
}
export interface BranchStatus {
branch: string;
exists: boolean;
shortHash: string | null;
lastCommitSummary: string | null;
aheadOfNext: number;
behindNext: number;
unreleasedCommits: CommitInfo[];
tipSha: string | null;
}
export interface PromotionRecord {
id: string;
fromBranch: string;
toBranch: string;
requestedBy: string;
note: string | null;
status: 'Pending' | 'Running' | 'Succeeded' | 'Failed';
createdAt: string;
completedAt: string | null;
commitCount: number;
commitLines: string[];
log: string[];
}
export type ConformanceViolation = 'OK' | 'Missing' | 'Diverged' | 'Stale';
export type ConformanceSeverity = 'OK' | 'Info' | 'Warning' | 'Critical';
export interface BranchConformanceCheck {
branch: string;
sourceBranch: string | null;
violation: ConformanceViolation;
severity: ConformanceSeverity;
detail: string;
aheadOfSource: number;
behindSource: number;
fixSha: string | null;
}
export interface ConformanceReport {
repo: string;
isConformant: boolean;
checks: BranchConformanceCheck[];
}
export async function getLadderStatus(repo = 'Clarity'): Promise<BranchStatus[]> {
const res = await fetch(`${BASE_URL}/api/promotions/ladder?repo=${encodeURIComponent(repo)}`);
if (!res.ok) throw new Error(`Failed to get ladder status: ${res.statusText}`);
return res.json();
}
export async function getPromotionHistory(): Promise<PromotionRecord[]> {
const res = await fetch(`${BASE_URL}/api/promotions/history`);
if (!res.ok) throw new Error(`Failed to get promotion history: ${res.statusText}`);
return res.json();
}
export function triggerPromotion(
from: string,
to: string,
requestedBy: string,
note: string | undefined,
onLine: (line: string) => void,
onDone: (record: PromotionRecord) => void,
onError: (err: string) => void,
repo = 'Clarity',
): () => void {
let cancelled = false;
const controller = new AbortController();
(async () => {
try {
const res = await fetch(`${BASE_URL}/api/promotions/promote`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ from, to, requestedBy, note, repo }),
signal: controller.signal,
});
if (!res.ok || !res.body) { onError(res.statusText); return; }
const reader = res.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (!cancelled) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() ?? '';
for (const chunk of parts) {
const dataLine = chunk.replace(/^data:\s*/m, '').trim();
if (!dataLine) continue;
try {
const msg = JSON.parse(dataLine);
if (msg.done && msg.promotion) onDone(msg.promotion as PromotionRecord);
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* skip */ }
}
}
} catch (e) {
if (!cancelled) onError(e instanceof Error ? e.message : 'Unknown error');
}
})();
return () => { cancelled = true; controller.abort(); };
}
export function triggerCherryPick(
shas: string[],
from: string,
to: string,
requestedBy: string,
note: string | undefined,
onLine: (line: string) => void,
onDone: (record: PromotionRecord) => void,
onError: (err: string) => void,
repo = 'Clarity',
): () => void {
let cancelled = false;
const controller = new AbortController();
(async () => {
try {
const res = await fetch(`${BASE_URL}/api/promotions/cherry-pick`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ shas, from, to, requestedBy, note, repo }),
signal: controller.signal,
});
if (!res.ok || !res.body) { onError(res.statusText); return; }
const reader = res.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (!cancelled) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() ?? '';
for (const chunk of parts) {
const dataLine = chunk.replace(/^data:\s*/m, '').trim();
if (!dataLine) continue;
try {
const msg = JSON.parse(dataLine);
if (msg.done && msg.promotion) onDone(msg.promotion as PromotionRecord);
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* skip */ }
}
}
} catch (e) {
if (!cancelled) onError(e instanceof Error ? e.message : 'Unknown error');
}
})();
return () => { cancelled = true; controller.abort(); };
}
export async function resetBranch(branch: string, toSha: string, repo: string): Promise<void> {
const res = await fetch(`${BASE_URL}/api/promotions/reset`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ branch, toSha, repo }),
});
if (!res.ok) {
const body = await res.json().catch(() => ({}));
throw new Error((body as { error?: string }).error ?? res.statusText);
}
}
export async function getConformanceReport(repo = 'Clarity'): Promise<ConformanceReport> {
const res = await fetch(`${BASE_URL}/api/promotions/conformance?repo=${encodeURIComponent(repo)}`);
if (!res.ok) throw new Error(`Failed to get conformance report: ${res.statusText}`);
return res.json();
}
export async function getAllConformanceReports(): Promise<ConformanceReport[]> {
const res = await fetch(`${BASE_URL}/api/promotions/conformance/all`);
if (!res.ok) throw new Error(`Failed to get conformance reports: ${res.statusText}`);
return res.json();
}
export async function createLadderBranch(branch: string, fromSha: string, repo: string): Promise<void> {
const res = await fetch(`${BASE_URL}/api/promotions/create-branch`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ branch, fromSha, repo }),
});
if (!res.ok) {
const body = await res.json().catch(() => ({}));
throw new Error((body as { error?: string }).error ?? res.statusText);
}
}
// ── Build gate ───────────────────────────────────────────────────────────────────────────────
export interface BuildGate {
status: 'Green' | 'Red' | 'Running' | 'Unknown';
sha: string;
buildId: string | null;
buildStatus: string | null;
}
export async function getBuildGate(sha: string): Promise<BuildGate> {
const res = await fetch(`${BASE_URL}/api/promotions/build-gate?sha=${encodeURIComponent(sha)}`);
if (!res.ok) throw new Error(`Failed to get build gate: ${res.statusText}`);
return res.json();
}
+7 -290
View File
@@ -1,290 +1,7 @@
import type { ProvisioningProgressEvent, ProvisioningRequest, TenantRecord } from '../types/provisioning';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export async function submitProvisioningJob(request: ProvisioningRequest): Promise<string> {
const res = await fetch(`${BASE_URL}/api/provision`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!res.ok) throw new Error(`Failed to queue job: ${res.statusText}`);
const data = await res.json();
return data.id as string;
}
export async function getTenants(): Promise<TenantRecord[]> {
const res = await fetch(`${BASE_URL}/api/tenants`);
if (!res.ok) throw new Error(`Failed to load tenants: ${res.statusText}`);
return res.json();
}
export function subscribeToTenantLogs(
subdomain: string,
onLine: (line: string) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/tenants/${subdomain}/logs`);
source.onmessage = (e) => { if (e.data) onLine(e.data); };
source.onerror = onError;
return source;
}
export function subscribeToJobStream(
jobId: string,
onEvent: (event: ProvisioningProgressEvent) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/provision/${jobId}/stream`);
source.onmessage = (e) => {
try { onEvent(JSON.parse(e.data)); } catch { /* ignore */ }
};
source.onerror = onError;
return source;
}
export interface ImageBuildStatus {
imageName: string | null;
builtAt: string | null;
lastMessage: string;
isBuilding: boolean;
}
export async function getImageStatus(): Promise<ImageBuildStatus> {
const res = await fetch(`${BASE_URL}/api/image/status`);
if (!res.ok) throw new Error(`Failed to get image status: ${res.statusText}`);
return res.json();
}
/** Triggers a build and streams log lines. Calls onLine for each log chunk, onDone when finished. */
export function triggerImageBuild(
onLine: (line: string) => void,
onDone: (success: boolean) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/image/build-stream`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done) { onDone(true); source.close(); }
else if (msg.line) onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onDone(false); onError(e); };
return source;
}
/** POST to kick off the build — returns immediately; use subscribeToJobStream for progress */
export async function startImageBuild(): Promise<void> {
const res = await fetch(`${BASE_URL}/api/image/build`, { method: 'POST' });
if (!res.ok) throw new Error(`Build trigger failed: ${res.statusText}`);
}
// ── Release API ──────────────────────────────────────────────────────────────
export interface TenantReleaseResult {
subdomain: string;
containerName: string;
success: boolean;
error?: string;
}
export interface ReleaseRecord {
id: string;
environment: string;
imageName: string;
status: 'Running' | 'Succeeded' | 'PartialFailure' | 'Failed';
startedAt: string;
finishedAt?: string;
tenants: TenantReleaseResult[];
}
export async function getReleaseHistory(): Promise<ReleaseRecord[]> {
const res = await fetch(`${BASE_URL}/api/release/history`);
if (!res.ok) throw new Error(`Failed to get release history: ${res.statusText}`);
return res.json();
}
/** Triggers a release to the given environment and streams log lines as SSE. */
export function triggerRelease(
env: string,
onLine: (line: string) => void,
onDone: (record: ReleaseRecord) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/release/${env}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.release) { onDone(msg.release as ReleaseRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
// ── Project Build API ────────────────────────────────────────────────────────
export interface ProjectDefinition {
name: string;
kind: 'DotnetProject' | 'NpmProject';
relativePath: string;
}
export interface BuildRecord {
id: string;
kind: 'DockerImage' | 'DotnetProject' | 'NpmProject';
target: string;
status: 'Running' | 'Succeeded' | 'Failed';
startedAt: string;
finishedAt?: string;
durationMs?: number;
log: string[];
}
export async function getProjects(): Promise<ProjectDefinition[]> {
const res = await fetch(`${BASE_URL}/api/builds/projects`);
if (!res.ok) throw new Error(`Failed to get projects: ${res.statusText}`);
return res.json();
}
export async function getBuildHistory(): Promise<BuildRecord[]> {
const res = await fetch(`${BASE_URL}/api/builds/history`);
if (!res.ok) throw new Error(`Failed to get build history: ${res.statusText}`);
return res.json();
}
/** Triggers a project build and streams log lines. */
export function triggerProjectBuild(
projectName: string,
onLine: (line: string) => void,
onDone: (record: BuildRecord) => void,
onError: (err: Event) => void
): EventSource {
const source = new EventSource(`${BASE_URL}/api/builds/${encodeURIComponent(projectName)}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.build) { onDone(msg.build as BuildRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
// ── Git History API ──────────────────────────────────────────────────────────
export interface GitCommit {
hash: string;
shortHash: string;
author: string;
date: string;
subject: string;
files: string[];
}
export async function getGitLog(path?: string, limit = 20): Promise<GitCommit[]> {
const params = new URLSearchParams({ limit: String(limit) });
if (path) params.set('path', path);
const res = await fetch(`${BASE_URL}/api/git/log?${params}`);
if (!res.ok) throw new Error(`Failed to get git log: ${res.statusText}`);
return res.json();
}
// ── Promotion / Branch Ladder API ────────────────────────────────────────────
export interface BranchStatus {
branch: string;
exists: boolean;
shortHash: string | null;
lastCommitSummary: string | null;
aheadOfNext: number;
behindNext: number;
unreleasedLines: string[];
}
export interface PromotionRecord {
id: string;
fromBranch: string;
toBranch: string;
requestedBy: string;
note: string | null;
status: 'Pending' | 'Running' | 'Succeeded' | 'Failed';
createdAt: string;
completedAt: string | null;
commitCount: number;
commitLines: string[];
log: string[];
}
export async function getLadderStatus(): Promise<BranchStatus[]> {
const res = await fetch(`${BASE_URL}/api/promotions/ladder`);
if (!res.ok) throw new Error(`Failed to get ladder status: ${res.statusText}`);
return res.json();
}
export async function getPromotionHistory(): Promise<PromotionRecord[]> {
const res = await fetch(`${BASE_URL}/api/promotions/history`);
if (!res.ok) throw new Error(`Failed to get promotion history: ${res.statusText}`);
return res.json();
}
/** Triggers a promotion and streams SSE lines. Calls onDone with the final record. */
export function triggerPromotion(
from: string,
to: string,
requestedBy: string,
note: string | undefined,
onLine: (line: string) => void,
onDone: (record: PromotionRecord) => void,
onError: (err: string) => void,
): () => void {
let cancelled = false;
const controller = new AbortController();
(async () => {
try {
const res = await fetch(`${BASE_URL}/api/promotions/promote`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ from, to, requestedBy, note }),
signal: controller.signal,
});
if (!res.ok || !res.body) { onError(res.statusText); return; }
const reader = res.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (!cancelled) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() ?? '';
for (const chunk of parts) {
const dataLine = chunk.replace(/^data:\s*/m, '').trim();
if (!dataLine) continue;
try {
const msg = JSON.parse(dataLine);
if (msg.done && msg.promotion) onDone(msg.promotion as PromotionRecord);
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* skip */ }
}
}
} catch (e) {
if (!cancelled) onError(e instanceof Error ? e.message : 'Unknown error');
}
})();
return () => { cancelled = true; controller.abort(); };
}
// Barrel re-export split into domain modules. Import directly from the specific module for new code.
export * from './tenantApi';
export * from './imageApi';
export * from './releaseApi';
export * from './buildApi';
export * from './gitApi';
export * from './promotionApi';
@@ -0,0 +1,43 @@
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export interface TenantReleaseResult {
subdomain: string;
containerName: string;
success: boolean;
error?: string;
}
export interface ReleaseRecord {
id: string;
environment: string;
imageName: string;
status: 'Running' | 'Succeeded' | 'PartialFailure' | 'Failed';
startedAt: string;
finishedAt?: string;
tenants: TenantReleaseResult[];
opcNumbers: string[];
}
export async function getReleaseHistory(): Promise<ReleaseRecord[]> {
const res = await fetch(`${BASE_URL}/api/release/history`);
if (!res.ok) throw new Error(`Failed to get release history: ${res.statusText}`);
return res.json();
}
export function triggerRelease(
env: string,
onLine: (line: string) => void,
onDone: (record: ReleaseRecord) => void,
onError: (err: Event) => void,
): EventSource {
const source = new EventSource(`${BASE_URL}/api/release/${env}`);
source.onmessage = (e) => {
try {
const msg = JSON.parse(e.data);
if (msg.done && msg.release) { onDone(msg.release as ReleaseRecord); source.close(); }
else if (typeof msg.line === 'string') onLine(msg.line);
} catch { /* ignore */ }
};
source.onerror = (e) => { onError(e); };
return source;
}
+44
View File
@@ -0,0 +1,44 @@
import type { ProvisioningProgressEvent, ProvisioningRequest, TenantRecord } from '../types/provisioning';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
export async function submitProvisioningJob(request: ProvisioningRequest): Promise<string> {
const res = await fetch(`${BASE_URL}/api/provision`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
if (!res.ok) throw new Error(`Failed to queue job: ${res.statusText}`);
const data = await res.json();
return data.id as string;
}
export async function getTenants(): Promise<TenantRecord[]> {
const res = await fetch(`${BASE_URL}/api/tenants`);
if (!res.ok) throw new Error(`Failed to load tenants: ${res.statusText}`);
return res.json();
}
export function subscribeToTenantLogs(
subdomain: string,
onLine: (line: string) => void,
onError: (err: Event) => void,
): EventSource {
const source = new EventSource(`${BASE_URL}/api/tenants/${subdomain}/logs`);
source.onmessage = (e) => { if (e.data) onLine(e.data); };
source.onerror = onError;
return source;
}
export function subscribeToJobStream(
jobId: string,
onEvent: (event: ProvisioningProgressEvent) => void,
onError: (err: Event) => void,
): EventSource {
const source = new EventSource(`${BASE_URL}/api/provision/${jobId}/stream`);
source.onmessage = (e) => {
try { onEvent(JSON.parse(e.data)); } catch { /* ignore */ }
};
source.onerror = onError;
return source;
}
@@ -1,74 +1,127 @@
import { useEffect, useState, useRef } from 'react';
import { Button, Drawer, Intent, NonIdealState, Spinner, Tag, Tooltip } from '@blueprintjs/core';
import { useEffect, useState } from 'react';
import { Button, Collapse, Drawer, Icon, Intent, NonIdealState, Spinner, Tag, Tooltip } from '@blueprintjs/core';
import { html as diff2htmlHtml } from 'diff2html';
import 'diff2html/bundles/css/diff2html.min.css';
import hljs from 'highlight.js';
import 'highlight.js/styles/github.css';
import { getCommitDetail, type CommitDetail } from '../api/opcApi';
import { getCommitDetail, type CommitDetail, type CommitFile } from '../api/opcApi';
interface Props {
hash: string | null;
onClose: () => void;
}
function fileStatusIntent(status: string): Intent {
if (status === 'added') return Intent.SUCCESS;
if (status === 'deleted') return Intent.DANGER;
if (status === 'renamed') return Intent.WARNING;
return Intent.NONE;
}
function fileStatusIcon(status: string): string {
if (status === 'added') return 'plus';
if (status === 'deleted') return 'minus';
if (status === 'renamed') return 'arrow-right';
return 'edit';
}
function FileDiff({ file }: { file: CommitFile }) {
const [open, setOpen] = useState(true);
const diffHtml = file.patch
? diff2htmlHtml(file.patch, {
drawFileList: false,
matching: 'lines',
outputFormat: 'line-by-line',
renderNothingWhenEmpty: true,
})
: '';
const displayPath = file.status === 'renamed' && file.oldPath && file.oldPath !== file.path
? `${file.oldPath}${file.path}`
: file.path;
return (
<div className="gcd-file-section">
<button
className={`gcd-file-header ${open ? 'gcd-file-header--open' : ''}`}
onClick={() => setOpen(o => !o)}
type="button"
>
<Icon icon={open ? 'chevron-down' : 'chevron-right'} size={14} className="gcd-file-chevron" />
<Icon icon={fileStatusIcon(file.status)} size={13} intent={fileStatusIntent(file.status)} className="gcd-file-status-icon" />
<span className="gcd-file-path">{displayPath}</span>
<span className="gcd-file-stats">
{file.additions > 0 && <span className="gcd-adds">+{file.additions}</span>}
{file.deletions > 0 && <span className="gcd-dels">-{file.deletions}</span>}
</span>
</button>
<Collapse isOpen={open} keepChildrenMounted>
{diffHtml
? <div className="git-diff-container" dangerouslySetInnerHTML={{ __html: diffHtml }} />
: <div className="gcd-no-diff">Binary or empty file no textual diff available.</div>
}
</Collapse>
</div>
);
}
export function GitCommitDrawer({ hash, onClose }: Props) {
const [detail, setDetail] = useState<CommitDetail | null>(null);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const diffRef = useRef<HTMLDivElement>(null);
useEffect(() => {
if (!hash) { setDetail(null); setError(null); return; }
setLoading(true); setDetail(null); setError(null);
if (!hash) {
// Delay clearing so the closing animation doesn't flash blank
const t = setTimeout(() => { setDetail(null); setError(null); }, 300);
return () => clearTimeout(t);
}
setLoading(true);
setError(null);
getCommitDetail(hash)
.then(setDetail)
.then(d => { setDetail(d); setError(null); })
.catch(e => setError(String(e)))
.finally(() => setLoading(false));
}, [hash]);
// After diff HTML is injected, run highlight.js over code blocks
useEffect(() => {
if (detail && diffRef.current) {
diffRef.current.querySelectorAll<HTMLElement>('code[class]').forEach(el => {
hljs.highlightElement(el);
});
}
}, [detail]);
const combinedPatch = detail?.files.map(f => f.patch).join('\n') ?? '';
const diffHtml = combinedPatch
? diff2htmlHtml(combinedPatch, {
drawFileList: true,
matching: 'lines',
outputFormat: 'line-by-line',
renderNothingWhenEmpty: false,
})
: '';
const totalAdds = detail?.files.reduce((a, f) => a + f.additions, 0) ?? 0;
const totalDels = detail?.files.reduce((a, f) => a + f.deletions, 0) ?? 0;
return (
<Drawer
isOpen={!!hash}
onClose={onClose}
title={detail ? (
title={
detail ? (
<span className="git-drawer-title">
<code className="git-drawer-hash">{detail.shortHash}</code>
<span className="git-drawer-subject">{detail.subject}</span>
</span>
) : 'Commit Diff'}
) : 'Commit Diff'
}
size="70%"
position="right"
className="git-commit-drawer"
>
<div className="git-drawer-body">
{loading && <NonIdealState icon={<Spinner size={24} />} title="Loading diff…" />}
{error && <NonIdealState icon="error" intent={Intent.DANGER} title="Failed to load commit" description={error} />}
{/* Scrollable body */}
<div className="gcd-body">
{/* Loading overlay — keeps old content visible while fetching next */}
{loading && (
<div className="gcd-loading-overlay">
<Spinner size={28} />
</div>
)}
{detail && (
{error && (
<NonIdealState icon="error" intent={Intent.DANGER}
title="Failed to load commit" description={error} />
)}
{!error && detail && (
<>
{/* Metadata bar */}
<div className="git-commit-meta-bar">
<div className="git-commit-meta-left">
<Tooltip content="Copy full hash">
<Tooltip content="Copy full hash" placement="bottom">
<code
className="git-commit-hash-chip"
onClick={() => navigator.clipboard.writeText(detail.hash)}
@@ -81,26 +134,33 @@ export function GitCommitDrawer({ hash, onClose }: Props) {
<span className="git-commit-date">{detail.date}</span>
</div>
<div className="git-commit-meta-right">
<Tag intent={Intent.SUCCESS} minimal round icon="add">
+{detail.files.reduce((a, f) => a + f.additions, 0)}
{totalAdds > 0 && (
<Tag intent={Intent.SUCCESS} minimal round>+{totalAdds}</Tag>
)}
{totalDels > 0 && (
<Tag intent={Intent.DANGER} minimal round>-{totalDels}</Tag>
)}
<Tag minimal round>
{detail.files.length} file{detail.files.length !== 1 ? 's' : ''}
</Tag>
<Tag intent={Intent.DANGER} minimal round icon="remove">
-{detail.files.reduce((a, f) => a + f.deletions, 0)}
</Tag>
<Tag minimal round>{detail.files.length} file{detail.files.length !== 1 ? 's' : ''}</Tag>
</div>
</div>
{/* Commit body if multiline */}
{/* Extended commit message */}
{detail.body.trim() !== detail.subject.trim() && (
<pre className="git-commit-body">{detail.body.trim()}</pre>
)}
{/* Diff */}
{diffHtml
? <div ref={diffRef} className="git-diff-container" dangerouslySetInnerHTML={{ __html: diffHtml }} />
: <NonIdealState icon="git-commit" title="No diff" description="This commit has no file changes." />
}
{/* Per-file diffs */}
{detail.files.length === 0 ? (
<NonIdealState icon="git-commit" title="No file changes" />
) : (
<div className="gcd-files-list">
{detail.files.map(f => (
<FileDiff key={f.path} file={f} />
))}
</div>
)}
</>
)}
@@ -109,7 +169,8 @@ export function GitCommitDrawer({ hash, onClose }: Props) {
)}
</div>
<div className="git-drawer-footer">
{/* Footer — sticky at bottom */}
<div className="gcd-footer">
<Button text="Close" onClick={onClose} />
</div>
</Drawer>
@@ -1,6 +1,6 @@
import { useEffect, useRef, useState } from 'react';
import { Button, Callout, Intent, Tag } from '@blueprintjs/core';
import { getImageStatus, type ImageBuildStatus } from '../api/provisioningApi';
import { getImageStatus, type ImageBuildStatus } from '../api/imageApi';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
@@ -4,12 +4,14 @@ import ClientDetailsStep from './ClientDetailsStep';
import DeploymentConfigStep from './DeploymentConfigStep';
import ReviewStep from './ReviewStep';
import DeploymentLiveStep from './DeploymentLiveStep';
import { submitProvisioningJob } from '../../api/provisioningApi';
import { submitProvisioningJob } from '../../api/tenantApi';
import { defaultStackConfig } from '../../types/provisioning';
import type { ProvisioningRequest } from '../../types/provisioning';
const EMPTY: ProvisioningRequest = {
clientName: '', stateCode: '', subdomain: '', adminEmail: '',
siteCode: '', environment: 'fdev', tier: 'Shared',
stackConfig: defaultStackConfig('Shared'),
};
const STEP_LABELS = ['Client Details', 'Deployment Config', 'Review', 'Deploying'];
@@ -1,5 +1,6 @@
import { useEffect } from 'react';
import type { ProvisioningRequest, TenantEnvironment, TenantTier } from '../../types/provisioning';
import { ALLOWED_MODES, defaultStackConfig } from '../../types/provisioning';
import type { ComponentMode, ProvisioningRequest, StackConfig, TenantEnvironment, TenantTier } from '../../types/provisioning';
interface Props {
signalParent: (state: { isValid: boolean }) => void;
@@ -8,36 +9,31 @@ interface Props {
}
const ENVIRONMENTS: { value: TenantEnvironment; label: string; description: string }[] = [
{ value: 'fdev', label: 'Dev (fdev)', description: 'Feature development - fast provisioning, no production data.' },
{ value: 'uat', label: 'UAT', description: 'User acceptance testing - mirrors production configuration.' },
{ value: 'prod', label: 'Production', description: 'Live production environment. Full isolation enforced.' },
{ value: 'fdev', label: 'Dev (fdev)', description: 'Feature dev fast provisioning, no production data.' },
{ value: 'uat', label: 'UAT', description: 'User acceptance testing mirrors production config.' },
{ value: 'prod', label: 'Production', description: 'Live production. Full isolation enforced.' },
];
const TIERS: { value: TenantTier; label: string; description: string; badge: string }[] = [
{
value: 'Trial',
label: 'Trial',
badge: 'Sandbox',
description: 'Ephemeral all-in-one sandbox. Bundled Postgres, shared Keycloak and Vault. No persistent data guarantee.',
},
{
value: 'Shared',
label: 'Shared',
badge: 'Standard',
description: 'Shared Keycloak, Vault, Postgres and MinIO. Isolated by realm, namespace and bucket.',
},
{
value: 'Dedicated',
label: 'Dedicated',
badge: 'Professional',
description: 'Own sidecar containers per component (Postgres, Keycloak, Vault, MinIO) on the shared host.',
},
{
value: 'Enterprise',
label: 'Enterprise',
badge: 'Enterprise',
description: 'Full VM isolation per component. VpsDocker or VpsBareMetal, provisioned via Pulumi.',
},
const TIERS: { value: TenantTier; label: string; badge: string; description: string }[] = [
{ value: 'Trial', label: 'Trial', badge: 'Sandbox', description: 'Ephemeral all-in-one sandbox. No persistent data guarantee.' },
{ value: 'Shared', label: 'Shared', badge: 'Standard', description: 'Shared platform services, isolated by realm/schema/bucket.' },
{ value: 'Dedicated', label: 'Dedicated', badge: 'Professional', description: 'Own sidecar containers per component on the shared host.' },
{ value: 'Enterprise', label: 'Enterprise', badge: 'Enterprise', description: 'Full VM isolation per component, provisioned via Pulumi.' },
];
const MODE_LABELS: Record<ComponentMode, string> = {
SharedPlatform: 'Shared Platform',
Bundled: 'Bundled (in image)',
OwnContainer: 'Own Container',
VpsDocker: 'VPS — Docker',
VpsBareMetal: 'VPS — Bare Metal',
};
const COMPONENTS: { key: keyof StackConfig; label: string; description: string }[] = [
{ key: 'postgres', label: 'PostgreSQL', description: 'Relational database for tenant data.' },
{ key: 'keycloak', label: 'Keycloak', description: 'Identity & access management (realms, OIDC clients).' },
{ key: 'vault', label: 'Vault', description: 'Secrets management and dynamic credentials.' },
{ key: 'minio', label: 'MinIO', description: 'Object storage (S3-compatible).' },
];
export default function DeploymentConfigStep({ signalParent, data, onChange }: Props) {
@@ -45,10 +41,22 @@ export default function DeploymentConfigStep({ signalParent, data, onChange }: P
signalParent({ isValid: !!data.tier && !!data.environment });
}, [data.tier, data.environment, signalParent]);
function handleTierChange(tier: TenantTier) {
// Reset stackConfig to the default for the new tier so nothing is invalid
onChange({ tier, stackConfig: defaultStackConfig(tier) });
}
function handleComponentChange(key: keyof StackConfig, mode: ComponentMode) {
onChange({ stackConfig: { ...data.stackConfig, [key]: mode } });
}
const allowed = ALLOWED_MODES[data.tier];
return (
<div className="wizard-step">
<p className="step-description">Choose the deployment environment and infrastructure isolation tier.</p>
<p className="step-description">Choose the deployment environment, isolation tier, and per-component infrastructure mode.</p>
{/* ── Environment ───────────────────────────────────────── */}
<h4 style={{ marginBottom: '0.5rem' }}>Environment</h4>
<div className="tier-cards" style={{ marginBottom: '1.5rem' }}>
{ENVIRONMENTS.map((env) => (
@@ -66,14 +74,15 @@ export default function DeploymentConfigStep({ signalParent, data, onChange }: P
))}
</div>
{/* ── Isolation Tier ────────────────────────────────────── */}
<h4 style={{ marginBottom: '0.5rem' }}>Isolation Tier</h4>
<div className="tier-cards">
<div className="tier-cards" style={{ marginBottom: '1.5rem' }}>
{TIERS.map((tier) => (
<button
key={tier.value}
type="button"
className={`tier-card${data.tier === tier.value ? ' selected' : ''}`}
onClick={() => onChange({ tier: tier.value })}
onClick={() => handleTierChange(tier.value)}
>
<div className="tier-card-header">
<span className="tier-card-label">{tier.label}</span>
@@ -83,6 +92,40 @@ export default function DeploymentConfigStep({ signalParent, data, onChange }: P
</button>
))}
</div>
{/* ── Per-Component Stack Config ────────────────────────── */}
<h4 style={{ marginBottom: '0.25rem' }}>Stack Configuration</h4>
<p style={{ fontSize: '0.85rem', color: '#5f6b7c', marginBottom: '0.75rem' }}>
Defaults are set by the tier. Override individual components as needed.
</p>
<table className="stack-config-table">
<thead>
<tr>
<th>Component</th>
<th>Description</th>
<th>Mode</th>
</tr>
</thead>
<tbody>
{COMPONENTS.map(({ key, label, description }) => (
<tr key={key}>
<td><strong>{label}</strong></td>
<td style={{ color: '#5f6b7c', fontSize: '0.85rem' }}>{description}</td>
<td>
<select
className="stack-config-select"
value={data.stackConfig[key]}
onChange={(e) => handleComponentChange(key, e.target.value as ComponentMode)}
>
{allowed.map((mode) => (
<option key={mode} value={mode}>{MODE_LABELS[mode]}</option>
))}
</select>
</td>
</tr>
))}
</tbody>
</table>
</div>
);
}
@@ -1,6 +1,6 @@
import { useEffect, useRef, useState } from 'react';
import { AnchorButton, Callout, Intent, ProgressBar, Spinner, Tab, Tabs, Tag } from '@blueprintjs/core';
import { subscribeToJobStream } from '../../api/provisioningApi';
import { subscribeToJobStream } from '../../api/tenantApi';
import { tenantUrl } from '../../config';
import type { ProvisioningProgressEvent } from '../../types/provisioning';
@@ -1,15 +1,32 @@
import { Callout, HTMLTable, Intent, Tag } from '@blueprintjs/core';
import { tenantUrl } from '../../config';
import type { ProvisioningRequest } from '../../types/provisioning';
import type { ComponentMode, ProvisioningRequest } from '../../types/provisioning';
interface Props {
signalParent: (state: { isValid: boolean }) => void;
data: ProvisioningRequest;
}
const MODE_LABELS: Record<ComponentMode, string> = {
SharedPlatform: 'Shared Platform',
Bundled: 'Bundled (in image)',
OwnContainer: 'Own Container',
VpsDocker: 'VPS — Docker',
VpsBareMetal: 'VPS — Bare Metal',
};
const MODE_INTENTS: Record<ComponentMode, Intent> = {
SharedPlatform: Intent.NONE,
Bundled: Intent.PRIMARY,
OwnContainer: Intent.WARNING,
VpsDocker: Intent.DANGER,
VpsBareMetal: Intent.DANGER,
};
export default function ReviewStep({ data }: Props) {
const clientUrl = tenantUrl(data.subdomain);
const containerName = data.subdomain;
const sc = data.stackConfig;
return (
<div className="wizard-step">
@@ -28,6 +45,22 @@ export default function ReviewStep({ data }: Props) {
</tbody>
</HTMLTable>
<h4 style={{ margin: '1.25rem 0 0.5rem' }}>Stack Configuration</h4>
<HTMLTable striped bordered className="review-table">
<thead>
<tr>
<th>Component</th>
<th>Mode</th>
</tr>
</thead>
<tbody>
<tr><td>PostgreSQL</td><td><Tag intent={MODE_INTENTS[sc.postgres]} minimal round>{MODE_LABELS[sc.postgres]}</Tag></td></tr>
<tr><td>Keycloak</td><td><Tag intent={MODE_INTENTS[sc.keycloak]} minimal round>{MODE_LABELS[sc.keycloak]}</Tag></td></tr>
<tr><td>Vault</td><td><Tag intent={MODE_INTENTS[sc.vault]} minimal round>{MODE_LABELS[sc.vault]}</Tag></td></tr>
<tr><td>MinIO</td><td><Tag intent={MODE_INTENTS[sc.minio]} minimal round>{MODE_LABELS[sc.minio]}</Tag></td></tr>
</tbody>
</HTMLTable>
<Callout intent={Intent.WARNING} title="This provisions real infrastructure" style={{ marginTop: '1.5rem' }}>
Clicking Deploy will start a <code>{containerName}</code> Docker container running Clarity.Server,
create a Keycloak realm, unseal Vault, and register the subdomain route in the Gateway.
+266 -32
View File
@@ -285,6 +285,39 @@ body {
.review-table { width: 100%; font-size: 0.875rem; }
.review-table td:first-child { width: 150px; color: #738091; padding-right: 1rem; padding-bottom: 0.6rem; }
.review-table td:last-child { font-weight: 500; }
.review-table th { font-size: 0.78rem; text-transform: uppercase; letter-spacing: 0.04em; color: #738091; }
/* Stack config table in DeploymentConfigStep */
.stack-config-table {
width: 100%;
border-collapse: collapse;
font-size: 0.875rem;
}
.stack-config-table th,
.stack-config-table td {
padding: 0.5rem 0.75rem;
border: 1px solid #dce0e6;
vertical-align: middle;
}
.stack-config-table th {
background: #f6f7f9;
font-weight: 600;
text-align: left;
font-size: 0.78rem;
text-transform: uppercase;
letter-spacing: 0.04em;
color: #5f6b7c;
}
.stack-config-table tbody tr:hover { background: #f6f7f9; }
.stack-config-select {
width: 100%;
padding: 0.3rem 0.5rem;
border: 1px solid #b3bac5;
border-radius: 4px;
background: #fff;
font-size: 0.875rem;
cursor: pointer;
}
.wizard-footer-actions { display: flex; gap: 8px; align-items: center; }
@@ -771,30 +804,116 @@ body {
.opc-sdlc-pipeline {
display: flex;
align-items: center;
flex-wrap: wrap;
gap: 0.2rem;
margin-bottom: 0.35rem;
}
.opc-sdlc-stage-item {
display: flex;
align-items: center;
gap: 0.2rem;
align-items: flex-start;
flex-wrap: nowrap;
gap: 0;
overflow-x: auto;
padding-bottom: 0.25rem;
}
.opc-sdlc-arrow {
color: #8f99a8;
font-size: 0.8rem;
font-size: 1rem;
font-weight: 600;
margin: 0 0.1rem;
flex-shrink: 0;
align-self: center;
margin: 0 0.4rem;
user-select: none;
}
.opc-sdlc-furthest {
font-size: 0.75rem;
/* Individual branch box */
.opc-sdlc-box {
flex: 1 1 140px;
min-width: 130px;
max-width: 200px;
display: flex;
flex-direction: column;
border: 1px solid #dce0e6;
border-radius: 6px;
background: #fff;
overflow: hidden;
flex-shrink: 0;
}
.opc-sdlc-box--reached {
border-width: 2px;
}
.opc-sdlc-box-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 0.3rem 0.5rem;
background: #f6f7f9;
border-bottom: 1px solid #e5e8eb;
flex-shrink: 0;
}
.opc-sdlc-box-count {
font-size: 0.68rem;
color: #738091;
margin-top: 0.3rem;
background: #e5e8eb;
border-radius: 10px;
padding: 0 6px;
line-height: 1.5;
}
/* Scrollable body */
.opc-sdlc-box-body {
flex: 1;
overflow-y: auto;
max-height: 140px;
min-height: 60px;
padding: 0.3rem 0.4rem;
display: flex;
flex-direction: column;
gap: 0.15rem;
}
.opc-sdlc-sha-row {
display: flex;
align-items: baseline;
gap: 0.35rem;
padding: 0.1rem 0.2rem;
border-radius: 3px;
opacity: 0.35;
}
.opc-sdlc-sha-row--reached {
opacity: 1;
}
.opc-sdlc-sha {
font-family: 'Consolas', 'Courier New', monospace;
font-size: 0.7rem;
color: #2d72d2;
flex-shrink: 0;
}
.opc-sdlc-sha-msg {
font-size: 0.68rem;
color: #4a5568;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
flex: 1;
min-width: 0;
}
.opc-sdlc-box-empty {
font-size: 0.7rem;
color: #a3acb6;
font-style: italic;
padding: 0.2rem 0;
}
.opc-sdlc-box-pending {
font-size: 0.68rem;
color: #a3acb6;
font-style: italic;
margin-top: auto;
padding-top: 0.25rem;
border-top: 1px dashed #e5e8eb;
}
/* Commits section labels */
@@ -817,10 +936,50 @@ body {
}
/* ── Git Commit Drawer ──────────────────────────────────────────────────────── */
.git-commit-drawer .bp5-drawer-header {
/* Drawer shell: full-height flex column */
.git-commit-drawer.bp6-drawer {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
.git-commit-drawer .bp6-drawer-header {
flex-shrink: 0;
padding: 0.75rem 1rem;
}
/*
* .gcd-body is the scrollable content area.
* Blueprint v6 renders children directly inside .bp6-drawer — no body wrapper.
*/
.git-commit-drawer .gcd-body {
flex: 1 1 0; /* 0 basis — don't size from content, allow shrink */
min-height: 0; /* flex children won't shrink past content without this */
overflow-y: auto;
overflow-x: hidden;
padding: 1rem;
display: flex;
flex-direction: column;
gap: 1rem;
position: relative; /* loading overlay anchor */
}
/* Children of the scroll container must NOT shrink — if they do, content
* never overflows and the scrollbar never appears. */
.git-commit-drawer .gcd-body > * {
flex-shrink: 0;
}
/* Footer rendered as last child — sits below the scroll area */
.git-commit-drawer .gcd-footer {
flex-shrink: 0;
padding: 0.5rem 1rem;
display: flex;
justify-content: flex-end;
}
.git-drawer-title {
display: flex;
align-items: center;
@@ -838,6 +997,95 @@ body {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
}
/* Loading overlay — keeps old diff visible while fetching next commit */
.gcd-loading-overlay {
position: absolute;
inset: 0;
background: rgba(255, 255, 255, 0.7);
display: flex;
align-items: center;
justify-content: center;
z-index: 10;
pointer-events: none;
}
/* Per-file accordion */
.gcd-files-list {
display: flex;
flex-direction: column;
gap: 0;
border: 1px solid #dce0e6;
border-radius: 6px;
overflow: hidden;
margin: 0.75rem 0;
}
.gcd-file-section {
border-bottom: 1px solid #dce0e6;
}
.gcd-file-section:last-child {
border-bottom: none;
}
.gcd-file-header {
all: unset;
box-sizing: border-box;
display: flex;
align-items: center;
gap: 0.5rem;
width: 100%;
padding: 0.45rem 0.75rem;
background: #f6f8fa;
cursor: pointer;
user-select: none;
transition: background 0.1s;
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 0.78rem;
color: #1c2127;
}
.gcd-file-header:hover,
.gcd-file-header--open {
background: #edf2f7;
}
.gcd-file-chevron {
flex-shrink: 0;
color: #738091;
}
.gcd-file-status-icon {
flex-shrink: 0;
}
.gcd-file-path {
flex: 1;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
min-width: 0;
}
.gcd-file-stats {
display: flex;
gap: 0.4rem;
flex-shrink: 0;
font-size: 0.73rem;
font-family: 'JetBrains Mono', 'Fira Code', monospace;
}
.gcd-adds { color: #1a7f37; font-weight: 600; }
.gcd-dels { color: #cf222e; font-weight: 600; }
.gcd-no-diff {
padding: 0.6rem 1rem;
font-size: 0.8rem;
color: #738091;
font-style: italic;
background: #fafafa;
}
.git-drawer-subject {
font-size: 0.92rem;
font-weight: 600;
@@ -847,21 +1095,6 @@ body {
color: #1c2127;
}
.git-drawer-body {
flex: 1;
overflow-y: auto;
padding: 1rem;
display: flex;
flex-direction: column;
gap: 1rem;
}
.git-drawer-footer {
padding: 0.75rem 1rem;
border-top: 1px solid #d3d8de;
display: flex;
justify-content: flex-end;
}
.git-commit-meta-bar {
display: flex;
@@ -928,7 +1161,8 @@ body {
font-size: 0.78rem;
line-height: 1.45;
border-radius: 6px;
overflow: hidden;
overflow-x: auto; /* horizontal scroll for wide diffs, not clip */
overflow-y: visible;
border: 1px solid #d0d7de;
}
+3
View File
@@ -1,10 +1,13 @@
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client'
import { BrowserRouter } from 'react-router-dom'
import './index.css'
import App from './App.tsx'
createRoot(document.getElementById('root')!).render(
<StrictMode>
<BrowserRouter>
<App />
</BrowserRouter>
</StrictMode>,
)
+30 -33
View File
@@ -1,4 +1,4 @@
import { useState, useMemo, useEffect, useCallback } from 'react';
import { useState, useMemo, useEffect, useCallback, Fragment } from 'react';
import { GitCommitDrawer } from '../components/GitCommitDrawer';
import {
Button, Callout, Divider, Drawer, FormGroup,
@@ -76,18 +76,9 @@ const SDLC_STAGES: { branch: string; label: string; intent: Intent }[] = [
{ branch: 'develop', label: 'Dev', intent: Intent.PRIMARY },
{ branch: 'staging', label: 'Staging', intent: Intent.WARNING },
{ branch: 'uat', label: 'UAT', intent: Intent.DANGER },
{ branch: 'master', label: 'Production', intent: Intent.SUCCESS },
{ branch: 'main', label: 'Production', intent: Intent.SUCCESS },
];
function deriveSdlcSummary(coverage: BranchCoverage[]): { label: string; intent: Intent } | null {
for (let i = SDLC_STAGES.length - 1; i >= 0; i--) {
const stage = SDLC_STAGES[i];
const hit = coverage.find(c => c.branch === stage.branch);
if (hit?.contains) return { label: stage.label, intent: stage.intent };
}
return null;
}
// Aggregate per-repo branch coverage into a single view.
// A stage is "reached" only when every repo that recognised at least one hash
// reports contains=true for that branch. Repos that recognised no hashes are
@@ -487,42 +478,48 @@ function CommitsTab({ opc, isActive }: { opc: Opc; isActive: boolean }) {
{/* SDLC Delivery Chain */}
{coverage.length > 0 && (() => {
const summary = deriveSdlcSummary(coverage);
const allCommits = [
...autoCommits,
...pinned.map(p => ({ repoKey: 'pinned', hash: p.hash, shortHash: p.shortHash, author: p.pinnedBy, date: p.pinnedAt, subject: p.subject, files: [] })),
].filter((c, i, a) => a.findIndex(x => x.hash === c.hash) === i);
return (
<div className="opc-delivery-chain">
<div className="opc-field-label" style={{ marginBottom: '0.6rem' }}>Delivery Chain</div>
<div className="opc-field-label" style={{ marginBottom: '0.75rem' }}>Delivery Chain</div>
<div className="opc-sdlc-pipeline">
{SDLC_STAGES.map((stage, i) => {
const hit = coverage.find(c => c.branch === stage.branch);
const reached = hit?.contains ?? false;
return (
<div key={stage.branch} className="opc-sdlc-stage-item">
<Fragment key={stage.branch}>
{i > 0 && <span className="opc-sdlc-arrow"></span>}
<Tooltip content={
reached
? `All linked commits have reached ${stage.label}`
: hit
? `Not all linked commits have reached ${stage.label} yet`
: `${stage.label} branch not found locally`
}>
<Tag
intent={reached ? stage.intent : Intent.NONE}
icon={reached ? 'tick-circle' : 'circle'}
minimal={!reached}
round
>
<div className={`opc-sdlc-box${reached ? ' opc-sdlc-box--reached' : ''}`} style={{ borderColor: reached ? SDLC_STAGES[i].intent === 'primary' ? '#2d72d2' : SDLC_STAGES[i].intent === 'warning' ? '#c87619' : SDLC_STAGES[i].intent === 'danger' ? '#ac2f33' : '#1c6e42' : '#dce0e6' }}>
{/* Box header */}
<div className="opc-sdlc-box-header">
<Tag intent={reached ? stage.intent : Intent.NONE} minimal={!reached} round style={{ fontWeight: 600, fontSize: '0.72rem' }}>
{stage.label}
</Tag>
</Tooltip>
{reached && <span className="opc-sdlc-box-count">{allCommits.length}</span>}
</div>
{/* Scrollable SHA list */}
<div className="opc-sdlc-box-body">
{allCommits.length === 0 ? (
<span className="opc-sdlc-box-empty">No linked commits</span>
) : allCommits.map(c => (
<div key={c.hash} className={`opc-sdlc-sha-row${reached ? ' opc-sdlc-sha-row--reached' : ''}`} title={c.subject}>
<code className="opc-sdlc-sha">{c.shortHash}</code>
<span className="opc-sdlc-sha-msg">{c.subject}</span>
</div>
))}
{!reached && allCommits.length > 0 && (
<div className="opc-sdlc-box-pending">Not yet promoted</div>
)}
</div>
</div>
</Fragment>
);
})}
</div>
{summary && (
<div className="opc-sdlc-furthest">
Furthest: <strong>{summary.label}</strong>
</div>
)}
</div>
);
})()}
File diff suppressed because it is too large Load Diff
@@ -3,10 +3,8 @@ import {
Button, Callout, Intent, Tag, Spinner, NonIdealState,
Collapse, HTMLTable,
} from '@blueprintjs/core';
import {
getProjects, getBuildHistory, getGitLog,
type ProjectDefinition, type BuildRecord, type GitCommit,
} from '../api/provisioningApi';
import { getProjects, getBuildHistory, type ProjectDefinition, type BuildRecord } from '../api/buildApi';
import { getGitLog, type GitCommit } from '../api/gitApi';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
@@ -14,6 +12,7 @@ const KIND_INTENT: Record<string, Intent> = {
DotnetProject: Intent.PRIMARY,
NpmProject: Intent.WARNING,
DockerImage: Intent.NONE,
SolutionBuild: Intent.SUCCESS,
};
const STATUS_INTENT: Record<string, Intent> = {
@@ -64,7 +63,7 @@ function GitHistoryPanel({ relativePath }: { relativePath: string }) {
<p style={{ fontSize: '0.75rem', color: '#8f99a8', marginTop: '0.5rem' }}>No commits found for this path.</p>
)}
{commits.length > 0 && (
<HTMLTable className="bp5-html-table-condensed bp5-html-table-striped" style={{ width: '100%', marginTop: '0.5rem', fontSize: '0.72rem' }}>
<HTMLTable className="bp6-html-table-condensed bp6-html-table-striped" style={{ width: '100%', marginTop: '0.5rem', fontSize: '0.72rem' }}>
<thead>
<tr>
<th style={{ width: 60 }}>Commit</th>
@@ -250,9 +249,9 @@ export default function BuildMonitorPage() {
useEffect(() => { (async () => { await load(); })(); }, []);
// Find latest build per project
const lastBuildFor = (name: string): BuildRecord | undefined =>
history.find((b) => b.target.includes(name.split(' ')[0]) || b.target.endsWith(name));
// Find latest build per project — match exactly by relativePath (= build target)
const lastBuildFor = (project: ProjectDefinition): BuildRecord | undefined =>
history.find((b) => b.target === project.relativePath);
return (
<>
@@ -292,7 +291,7 @@ export default function BuildMonitorPage() {
<ProjectCard
key={p.name}
project={p}
lastBuild={lastBuildFor(p.name)}
lastBuild={lastBuildFor(p)}
onBuilt={load}
/>
))}
@@ -2,7 +2,7 @@ import { useEffect, useRef, useState } from 'react';
import { AnchorButton, Button, Callout, Intent, NonIdealState, Spinner, Tab, Tabs, Tag } from '@blueprintjs/core';
import DeployWizard from '../components/wizard/DeployWizard';
import { tenantUrl, CLARITY_DOMAIN } from '../config';
import { getTenants, subscribeToTenantLogs } from '../api/provisioningApi';
import { getTenants, subscribeToTenantLogs } from '../api/tenantApi';
import type { TenantRecord } from '../types/provisioning';
const ENV_INTENT: Record<string, Intent> = {
+321 -26
View File
@@ -1,9 +1,15 @@
import { useEffect, useRef, useState } from 'react';
import {
Button, Callout, Intent, Tag, Spinner,
HTMLTable, Card, Elevation,
HTMLTable, Card, Elevation, Tabs, Tab, type TabId,
FormGroup, InputGroup,
} from '@blueprintjs/core';
import { getImageStatus, getBuildHistory, type ImageBuildStatus, type BuildRecord } from '../api/provisioningApi';
import { getImageStatus, type ImageBuildStatus } from '../api/imageApi';
import { getBuildHistory, type BuildRecord } from '../api/buildApi';
import {
getInfraStatus, streamComposeUp, streamComposeForceUp, streamComposeNuke, streamComposeDown,
type InfraService,
} from '../api/infraApi';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
@@ -13,9 +19,13 @@ const STATUS_INTENT: Record<string, Intent> = {
Running: Intent.PRIMARY,
};
// ── Live terminal ─────────────────────────────────────────────────────────────
// ── Shared terminal ───────────────────────────────────────────────────────────
function BuildTerminal({ lines }: { lines: string[] }) {
function Terminal({ lines, height = 360, placeholder = 'Waiting for output…' }: {
lines: string[];
height?: number;
placeholder?: string;
}) {
const ref = useRef<HTMLDivElement>(null);
useEffect(() => {
@@ -33,7 +43,7 @@ function BuildTerminal({ lines }: { lines: string[] }) {
color: '#c9d1d9',
padding: '0.75rem 1rem',
borderRadius: 6,
height: 420,
height,
overflowY: 'auto',
whiteSpace: 'pre-wrap',
wordBreak: 'break-all',
@@ -41,10 +51,10 @@ function BuildTerminal({ lines }: { lines: string[] }) {
}}
>
{lines.length === 0 ? (
<span style={{ color: '#484f58' }}>Waiting for build output</span>
<span style={{ color: '#484f58' }}>{placeholder}</span>
) : (
lines.map((l, i) => {
const isError = l.startsWith('✖');
const isError = l.startsWith('✖') || l.toLowerCase().includes('error');
const isSuccess = l.startsWith('✔');
const isSep = l.startsWith('──');
const color = isError ? '#f85149' : isSuccess ? '#3fb950' : isSep ? '#484f58' : undefined;
@@ -89,9 +99,272 @@ function BuildHistoryTable({ records }: { records: BuildRecord[] }) {
);
}
// ── Platform tab ──────────────────────────────────────────────────────────────
function PlatformTab() {
const [services, setServices] = useState<InfraService[]>([]);
const [loading, setLoading] = useState(false);
const [composeBusy, setBusy] = useState<'up' | 'force' | 'nuke' | 'down' | null>(null);
const [lines, setLines] = useState<string[]>([]);
const sseRef = useRef<EventSource | null>(null);
const refresh = () => {
setLoading(true);
getInfraStatus()
.then(d => setServices(d.services))
.catch(() => {})
.finally(() => setLoading(false));
};
useEffect(() => { refresh(); }, []);
function startStream(
streamer: (onLine: (l: string) => void, onDone: () => void) => EventSource,
label: 'up' | 'force' | 'nuke' | 'down',
) {
sseRef.current?.close();
setLines([`▶ compose ${label}`]);
setBusy(label);
const src = streamer(
(line) => setLines(prev => [...prev, line]),
() => { setBusy(null); refresh(); },
);
sseRef.current = src;
}
const running = services.filter(s => s.status === 'running').length;
const statusIntent = services.length === 0 ? Intent.NONE
: running === services.length ? Intent.SUCCESS
: running === 0 ? Intent.DANGER
: Intent.WARNING;
return (
<div style={{ display: 'flex', flexDirection: 'column', gap: '1rem' }}>
<Card elevation={Elevation.ONE} style={{
display: 'flex', alignItems: 'center', gap: '0.75rem',
padding: '0.75rem 1rem', flexWrap: 'wrap',
}}>
{loading ? <Spinner size={16} /> : (
<Tag intent={statusIntent} round large>
{services.length === 0 ? 'Not checked' : `${running} / ${services.length} running`}
</Tag>
)}
<Button small icon="refresh" minimal onClick={refresh} loading={loading}>Refresh</Button>
<div style={{ display: 'flex', gap: '0.4rem', marginLeft: 'auto' }}>
<Button
small icon="play" intent={Intent.SUCCESS}
loading={composeBusy === 'up'} disabled={composeBusy !== null}
onClick={() => startStream(streamComposeUp, 'up')}
>Compose Up</Button>
<Button
small icon="refresh" intent={Intent.WARNING}
loading={composeBusy === 'force'} disabled={composeBusy !== null}
onClick={() => startStream(streamComposeForceUp, 'force')}
title="Force-recreate all containers and remove orphans. Fixes 'container name already in use' errors."
>Force Recreate</Button>
<Button
small icon="flame" intent={Intent.DANGER}
loading={composeBusy === 'nuke'} disabled={composeBusy !== null}
onClick={() => startStream(streamComposeNuke, 'nuke')}
title="Force-removes every platform container by name then runs compose up. Use when Force Recreate still fails with name conflicts."
>Nuke &amp; Recreate</Button>
<Button
small icon="stop" intent={Intent.DANGER}
loading={composeBusy === 'down'} disabled={composeBusy !== null}
onClick={() => startStream(streamComposeDown, 'down')}
>Compose Down</Button>
</div>
</Card>
{services.length > 0 && (
<div style={{ display: 'flex', gap: '0.4rem', flexWrap: 'wrap' }}>
{services.map(s => (
<Tag
key={s.container}
intent={s.status === 'running' ? Intent.SUCCESS : s.status === 'unhealthy' ? Intent.WARNING : Intent.DANGER}
minimal
title={s.uptime ? `Up ${s.uptime}` : undefined}
>
{s.container}
</Tag>
))}
</div>
)}
<Terminal lines={lines} height={300} placeholder="Run Compose Up or Force Recreate to see output…" />
</div>
);
}
// ── Verify tab ────────────────────────────────────────────────────────────────
function VerifyTab() {
const [ehContainer, setEhContainer] = useState('');
const [ehResult, setEhResult] = useState<string | null>(null);
const [ehLoading, setEhLoading] = useState(false);
const [ehError, setEhError] = useState<string | null>(null);
const [dnsContainer, setDnsContainer] = useState('');
const [dnsUrl, setDnsUrl] = useState('https://keycloak.clarity.test/health/ready');
const [dnsResult, setDnsResult] = useState<{ success: boolean; output: string; error: string } | null>(null);
const [dnsLoading, setDnsLoading] = useState(false);
const [subdomain, setSubdomain] = useState('');
const [artifact, setArtifact] = useState<string | null>(null);
const [artLoading, setArtLoading] = useState(false);
const [artError, setArtError] = useState<string | null>(null);
async function checkExtraHosts() {
setEhLoading(true); setEhResult(null); setEhError(null);
try {
const res = await fetch(`${BASE_URL}/api/image/verify/extra-hosts/${encodeURIComponent(ehContainer)}`);
const data = await res.json();
if (!res.ok) { setEhError(data.error ?? 'Not found'); return; }
setEhResult(JSON.stringify(data.extraHosts, null, 2));
} catch (e) {
setEhError(e instanceof Error ? e.message : 'Unknown error');
} finally { setEhLoading(false); }
}
async function runDnsTest() {
setDnsLoading(true); setDnsResult(null);
try {
const res = await fetch(`${BASE_URL}/api/image/verify/dns-test`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ container: dnsContainer, url: dnsUrl }),
});
const data = await res.json();
setDnsResult({ success: data.success, output: data.output ?? '', error: data.error ?? '' });
} catch (e) {
setDnsResult({ success: false, output: '', error: e instanceof Error ? e.message : 'Unknown error' });
} finally { setDnsLoading(false); }
}
async function viewArtifact() {
setArtLoading(true); setArtifact(null); setArtError(null);
try {
const res = await fetch(`${BASE_URL}/api/image/artifact/${encodeURIComponent(subdomain)}`);
const data = await res.json();
if (!res.ok) { setArtError(data.error ?? 'Not found'); return; }
setArtifact(data.content);
} catch (e) {
setArtError(e instanceof Error ? e.message : 'Unknown error');
} finally { setArtLoading(false); }
}
return (
<div style={{ display: 'flex', flexDirection: 'column', gap: '1.25rem' }}>
<Card elevation={Elevation.ONE}>
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>1 · Extra Hosts Check</h4>
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
Verifies <code>*.clarity.test host-gateway</code> was injected so OIDC discovery
traffic routes through nginx correctly.
</p>
<FormGroup label="Container name" labelFor="eh-container" style={{ marginBottom: '0.5rem' }}>
<InputGroup
id="eh-container"
value={ehContainer}
onChange={e => setEhContainer(e.target.value)}
placeholder="fdev-app-clarity-01000001"
rightElement={
<Button small minimal loading={ehLoading} intent={Intent.PRIMARY}
onClick={checkExtraHosts} disabled={!ehContainer}>
Check
</Button>
}
/>
</FormGroup>
{ehError && <Callout intent={Intent.DANGER} style={{ fontSize: '0.8rem' }}>{ehError}</Callout>}
{ehResult && (
<pre style={{
marginTop: '0.5rem', background: '#0d1117', color: '#3fb950',
padding: '0.5rem 0.75rem', borderRadius: 4, fontSize: '0.8rem',
border: '1px solid #30363d', overflowX: 'auto',
}}>{ehResult}</pre>
)}
</Card>
<Card elevation={Elevation.ONE}>
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>2 · DNS Resolution Test</h4>
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
Runs <code>curl</code> from inside the container to verify <code>*.clarity.test</code> resolves
through nginx the critical path for Keycloak JWT validation.
</p>
<div style={{ display: 'flex', gap: '0.75rem', flexWrap: 'wrap', alignItems: 'flex-end' }}>
<FormGroup label="Container" labelFor="dns-container" style={{ flex: '1 1 200px', marginBottom: 0 }}>
<InputGroup
id="dns-container"
value={dnsContainer}
onChange={e => setDnsContainer(e.target.value)}
placeholder="fdev-app-clarity-01000001"
/>
</FormGroup>
<FormGroup label="URL" labelFor="dns-url" style={{ flex: '2 1 280px', marginBottom: 0 }}>
<InputGroup id="dns-url" value={dnsUrl} onChange={e => setDnsUrl(e.target.value)} />
</FormGroup>
<Button
intent={Intent.PRIMARY} loading={dnsLoading}
disabled={!dnsContainer || !dnsUrl}
onClick={runDnsTest}
style={{ marginBottom: 0 }}
>Test DNS</Button>
</div>
{dnsResult && (
<Callout
intent={dnsResult.success ? Intent.SUCCESS : Intent.DANGER}
style={{ marginTop: '0.75rem', fontSize: '0.8rem' }}
>
{dnsResult.success
? '✔ Reachable — DNS and nginx routing is working correctly.'
: '✖ Unreachable — check nginx/dnsmasq or extra_hosts injection.'}
{(dnsResult.output || dnsResult.error) && (
<pre style={{ margin: '0.5rem 0 0', fontSize: '0.75rem', whiteSpace: 'pre-wrap', overflowX: 'auto' }}>
{dnsResult.output || dnsResult.error}
</pre>
)}
</Callout>
)}
</Card>
<Card elevation={Elevation.ONE}>
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>3 · Compose Artifact</h4>
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
View the generated <code>docker-compose.yml</code> saved to{' '}
<code>ClientAssets/{'{subdomain}'}/</code> after provisioning.
</p>
<FormGroup label="Subdomain" labelFor="art-subdomain" style={{ marginBottom: '0.5rem' }}>
<InputGroup
id="art-subdomain"
value={subdomain}
onChange={e => setSubdomain(e.target.value)}
placeholder="acme"
rightElement={
<Button small minimal loading={artLoading} intent={Intent.PRIMARY}
onClick={viewArtifact} disabled={!subdomain}>
View
</Button>
}
/>
</FormGroup>
{artError && <Callout intent={Intent.DANGER} style={{ fontSize: '0.8rem' }}>{artError}</Callout>}
{artifact && (
<pre style={{
marginTop: '0.5rem', background: '#0d1117', color: '#c9d1d9',
padding: '0.75rem 1rem', borderRadius: 4, fontSize: '0.75rem',
border: '1px solid #30363d', overflowX: 'auto', maxHeight: 400, overflowY: 'auto',
}}>{artifact}</pre>
)}
</Card>
</div>
);
}
// ── Page ──────────────────────────────────────────────────────────────────────
export default function ImageBuildPage() {
const [tab, setTab] = useState<TabId>('build');
const [status, setStatus] = useState<ImageBuildStatus | null>(null);
const [history, setHistory] = useState<BuildRecord[]>([]);
const [building, setBuilding] = useState(false);
@@ -162,17 +435,11 @@ export default function ImageBuildPage() {
<div className="page-header">
<div>
<h1>Image Build</h1>
<p>Build the <code style={{ fontSize: '0.85em' }}>clarity-server</code> Docker image from the current repo.</p>
<p>
Build and verify the <code style={{ fontSize: '0.85em' }}>clarity-server</code> Docker image.
Use <strong>Platform</strong> to manage infra, <strong>Verify</strong> to inspect a provisioned tenant.
</p>
</div>
<Button
icon="build"
intent={Intent.WARNING}
large
loading={building}
disabled={building}
onClick={handleBuild}
text={building ? 'Building…' : 'Build Image'}
/>
</div>
{/* ── Status bar ── */}
@@ -192,9 +459,7 @@ export default function ImageBuildPage() {
{status.lastMessage}
</Tag>
)}
{lastBuilt && (
<span style={{ fontSize: '0.8rem', color: '#8f99a8' }}>Last built: {lastBuilt}</span>
)}
{lastBuilt && <span style={{ fontSize: '0.8rem', color: '#8f99a8' }}>Last built: {lastBuilt}</span>}
</>
) : (
<Spinner size={16} />
@@ -207,25 +472,55 @@ export default function ImageBuildPage() {
</Callout>
)}
{/* ── Terminal ── */}
<div style={{ marginBottom: '1.5rem' }}>
{/* ── Tabs ── */}
<Tabs id="ibp-tabs" selectedTabId={tab} onChange={setTab} renderActiveTabPanelOnly>
<Tab
id="build"
title="Build"
panel={
<div style={{ display: 'flex', flexDirection: 'column', gap: '1.25rem', paddingTop: '1rem' }}>
<div>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Output</h3>
<div style={{ display: 'flex', gap: '0.4rem' }}>
{logs.length > 0 && !building && (
<Button minimal small icon="trash" text="Clear" onClick={() => setLogs([])} />
)}
<Button
icon="build" intent={Intent.WARNING}
loading={building} disabled={building}
onClick={handleBuild}
text={building ? 'Building…' : 'Build Image'}
/>
</div>
<BuildTerminal lines={logs} />
</div>
{/* ── History ── */}
<Terminal lines={logs} />
</div>
<div>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Build History</h3>
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>History</h3>
<Button minimal small icon="refresh" onClick={refreshStatus} />
</div>
<BuildHistoryTable records={history} />
</div>
</div>
}
/>
<Tab
id="platform"
title="Platform"
panel={<div style={{ paddingTop: '1rem' }}><PlatformTab /></div>}
/>
<Tab
id="verify"
title="Verify"
panel={<div style={{ paddingTop: '1rem' }}><VerifyTab /></div>}
/>
</Tabs>
</>
);
}
@@ -3,10 +3,8 @@ import {
Button, Callout, Intent, Tag, Spinner, HTMLTable,
NonIdealState,
} from '@blueprintjs/core';
import {
getReleaseHistory, getBuildHistory,
type ReleaseRecord, type BuildRecord,
} from '../api/provisioningApi';
import { getReleaseHistory, type ReleaseRecord } from '../api/releaseApi';
import { getBuildHistory, type BuildRecord } from '../api/buildApi';
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
@@ -145,6 +143,14 @@ function ReleaseHistoryTable({ records }: { records: ReleaseRecord[] }) {
{expanded === r.id && (
<tr key={r.id + '-detail'}>
<td colSpan={7} style={{ padding: '0.4rem 1rem 0.8rem' }}>
{r.opcNumbers?.length > 0 && (
<div style={{ display: 'flex', gap: '0.3rem', flexWrap: 'wrap', alignItems: 'center', marginBottom: '0.5rem', paddingBottom: '0.5rem', borderBottom: '1px solid #e5e8eb' }}>
<span style={{ fontSize: '0.72rem', color: '#8f99a8' }}>OPCs in this release:</span>
{r.opcNumbers.map(n => (
<Tag key={n} intent={Intent.PRIMARY} minimal round style={{ fontFamily: 'monospace', fontSize: '0.72rem' }}>{n}</Tag>
))}
</div>
)}
{r.tenants.map((t) => (
<div key={t.subdomain} style={{ display: 'flex', gap: '0.5rem', marginBottom: 2 }}>
<Tag intent={t.success ? Intent.SUCCESS : Intent.DANGER} minimal round>
@@ -226,7 +232,7 @@ export default function PipelinesPage() {
try {
const [r, b] = await Promise.all([getReleaseHistory(), getBuildHistory()]);
setReleases(r);
setBuilds(b.filter((b) => b.kind === 'DockerImage'));
setBuilds(b.filter((b) => b.kind === 'SolutionBuild'));
} finally {
setLoading(false);
}
@@ -252,7 +258,7 @@ export default function PipelinesPage() {
</section>
<section>
<h3 style={{ margin: '0 0 0.5rem' }}>Image Build History</h3>
<h3 style={{ margin: '0 0 0.5rem' }}>Solution Build History</h3>
{loading ? <Spinner size={20} /> : <BuildHistoryTable records={builds} />}
</section>
</>
@@ -1,6 +1,52 @@
export type TenantTier = 'Trial' | 'Shared' | 'Dedicated' | 'Enterprise';
export type TenantEnvironment = 'fdev' | 'uat' | 'prod';
export type ComponentMode =
| 'SharedPlatform'
| 'Bundled'
| 'OwnContainer'
| 'VpsDocker'
| 'VpsBareMetal';
export interface StackConfig {
postgres: ComponentMode;
keycloak: ComponentMode;
vault: ComponentMode;
minio: ComponentMode;
}
/** Mirrors StackConfig.DefaultForTier() on the backend. */
export function defaultStackConfig(tier: TenantTier): StackConfig {
switch (tier) {
case 'Trial':
return { postgres: 'Bundled', keycloak: 'SharedPlatform', vault: 'SharedPlatform', minio: 'SharedPlatform' };
case 'Shared':
return { postgres: 'SharedPlatform', keycloak: 'SharedPlatform', vault: 'SharedPlatform', minio: 'SharedPlatform' };
case 'Dedicated':
return { postgres: 'OwnContainer', keycloak: 'OwnContainer', vault: 'OwnContainer', minio: 'OwnContainer' };
case 'Enterprise':
return { postgres: 'VpsDocker', keycloak: 'VpsDocker', vault: 'VpsDocker', minio: 'VpsDocker' };
}
}
/**
* Allowed ComponentMode values per tier.
* Mirrors the table in StackConfig.cs.
*
* | Trial | Shared | Dedicated | Enterprise |
* SharedPlatform | ✅ | ✅ | ✅ | ✅ |
* Bundled | ✅ | ❌ | ❌ | ❌ |
* OwnContainer | ❌ | ❌ | ✅ | ✅ |
* VpsDocker | ❌ | ❌ | ❌ | ✅ |
* VpsBareMetal | ❌ | ❌ | ❌ | ✅ |
*/
export const ALLOWED_MODES: Record<TenantTier, ComponentMode[]> = {
Trial: ['SharedPlatform', 'Bundled'],
Shared: ['SharedPlatform'],
Dedicated: ['SharedPlatform', 'OwnContainer'],
Enterprise: ['SharedPlatform', 'OwnContainer', 'VpsDocker', 'VpsBareMetal'],
};
export interface ProvisioningRequest {
clientName: string;
stateCode: string;
@@ -9,6 +55,7 @@ export interface ProvisioningRequest {
siteCode: string;
environment: TenantEnvironment;
tier: TenantTier;
stackConfig: StackConfig;
}
export interface ProvisioningJob {
+10
View File
@@ -0,0 +1,10 @@
# Resolve *.clarity.test -> 127.0.0.1 so browser hits nginx on the host
address=/.clarity.test/127.0.0.1
# Don't read /etc/resolv.conf or /etc/hosts
no-resolv
no-hosts
# Forward everything else to Cloudflare
server=1.1.1.1
server=1.0.0.1
+33
View File
@@ -20,6 +20,8 @@ networks:
volumes:
postgres-data:
minio-data:
clarity-gitea-data:
external: true
services:
@@ -154,6 +156,37 @@ services:
aliases:
- nginx
# ── Gitea ─────────────────────────────────────────────────────────────────
# Platform source control. Hosts OPC, Clarity, and Gateway repos.
# Accessible at https://opc.clarity.test (nginx terminates TLS).
gitea:
image: gitea/gitea:latest
container_name: clarity-gitea
restart: unless-stopped
ports:
- "3000:3000"
- "2222:22"
environment:
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: postgres:5432
GITEA__database__NAME: giteadb
GITEA__database__USER: postgres
GITEA__database__PASSWD: postgres
GITEA__server__DOMAIN: opc.clarity.test
GITEA__server__ROOT_URL: https://opc.clarity.test
GITEA__server__SSH_DOMAIN: opc.clarity.test
GITEA__server__SSH_PORT: "2222"
GITEA__service__DISABLE_REGISTRATION: "true"
volumes:
- clarity-gitea-data:/data
depends_on:
postgres:
condition: service_healthy
networks:
clarity-net:
aliases:
- gitea
# ── Dnsmasq ───────────────────────────────────────────────────────────────
# Resolves *.clarity.test → 127.0.0.1 so browser requests hit nginx on the host.
dnsmasq:
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-01000000
server {
listen 443 ssl;
server_name fdev-app-clarity-01000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-01000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,15 @@
server {
listen 443 ssl;
server_name keycloak.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
proxy_pass http://keycloak:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
+15
View File
@@ -0,0 +1,15 @@
server {
listen 443 ssl;
server_name opc.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
proxy_pass http://gitea:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
+21
View File
@@ -0,0 +1,21 @@
events {
worker_connections 1024;
}
http {
resolver 127.0.0.11 valid=5s ipv6=off;
log_format main '$remote_addr - $remote_user [$time_local] "$request"'
'$status $body_bytes_sent "$http_referer"'
'"$http_user_agent"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
server {
listen 80 default_server;
return 301 https://$host$request_uri;
}
include /etc/nginx/conf.d/*.conf;
}
+1
View File
@@ -7,3 +7,4 @@
-- clarity_{tenant} → Created at provisioning time by ControlPlane.Worker.
SELECT 'CREATE DATABASE keycloakdb' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'keycloakdb')\gexec
SELECT 'CREATE DATABASE giteadb' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'giteadb')\gexec
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQKD0fec8IBFILcD9soLfKyeSeb8XQPsZE63naCPPgsEMNad1uWm5AdivoE7aI74pWxn7VHRnjNzgZ1PoM05c37HcP9EM221rVw/xBmLc9go4h7iScu0"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQL0hTOPrrWZBzY868/phzE5mKIH9g+Wu0zNC2N1493O4sBin4lJsoN8TP0DMTvyF3X0bvLCiO/hVSQmgyZk/YuEyh1L2DkLtzFNwiCEB4JTUlP0zfrcjH1eew1cj/APRVIVXFvZ1fL11xaYfZ536kf+jvkfsFamElXSLIEFXubfN0QfIGxHRTsb0fU/ZnNRXfwbOi1u+Zwu+GVpXoKKch57wvm9rDVcErKzVFdmkjAFx1Vv7UEv7ScoIr3YA5uogs/w5XoxvT+NxENepZ8bSO7e/4Ook3ITFCV5oJDTVzm9JlKU9vCMvE2kH95l/P86kSmkyOBqqftzzAODhY+OqQoi02pHzIkqghkORnc5hbGCiI3O0InRWe8="}
@@ -1 +0,0 @@
{"Value":"AAAAAQJuSCPrV0/c92Lxj7EijLdwfOiqDEsbKYo3FSxHknoDZ4x6TFWMdSFYLynodU35Zb/F2EgrrG+JVC+NjJGO8LXK"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQJ0tHJeOe3hB/Y/v/s6tAr0Yk5f4xPweqX3/NqZ3vhGfFPjqkFRaFh7Zb7vDOrvi+LDzQbZXFVozsNv9qrm2wT7w49db21zQqNJxcKWC559HbdnUh6zPaqJrEMvB1B+absTOQM/Ry/44WU4ksjS+FyLl7PPhirAlBio64Xg3k6/f9mf6Owk7Mppa0THnMjQgxjiN/qdPupZjpoMBjX2NN6lSLVtl72GIAIlykoUP1ilCKVBWRf5eCEfFFbTt7Fppar/QPr38nO2tuwn6datmPe7sCliv2AljfX11/LmGufdRrE+3nbMpavXwU0J57SgSaYdyyEoIUoM9uUc+jBE2J9UJG+p+tbUB5txDXaSnlfTx+4aGlEg9ENIkkhFjG3PttVF3PQwayKG82LT8RBrkXMr8Xxhi0MOtmPjh0Yw"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQK+nRD7vrobbyLyrIrj2jy2b5AMK2TCLMLXxZCMnCLMFzuucTUZUnGMC9ZSDxGDuji9FNU6WJLEaVYYWAvUSooWGETCLwqYDzy7O97FX0rOhpButpbQ"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQJYHbyPrPgmU/V8OFsoR0WXPA4eEW5g/pCrtrOUpZyfedqc302GnuckUktoS9UvrVR3kDWxkWsF2RuC9GX9N6ILyBtgYAwBPSnZpR+DlXEn/tvbNug="}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQL7v5kWu26J5/ZHsooCc1PxIeoa3vDQlekxxEDQ884Ez33ihan6K51IeRda1Bmnr22mPiT/mO1jnp9Mj782yUsKtYPVDcLoK3rSTi+T2f6NIZFKWn2OdkPP1G9wNajeOGywgKBIOBknJ41kiu+hvRHy0+P974sOmg8Gy2zeVHjU4jmkp4kojapimD/vF3k4FIwXdCLIOv/5yUzk/j3QD1kPmd0lO6MXHTK5HEmEqmmHsKtybyuHybJPG9y7sHVQY0yIrpnajzK+/rSvIhFAK7w8+7vRnUWb4cytuvdXejCWMiDOEl5Rd1+JDF3zVEJYs8GpMiL1qkuMjeEI9RfzpupTSq6iWCTYa8z96n9ttARjI7ZQabTux7vpoRQtOkbISt8fFEVZB8Ax2P/faG7Y0G/Gn5XcNrP+ShnRMTlq7k3MrdsXGLyWA6Q2"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQKAysqDoP86hKhBN25gnwy1oJFNXfStAz9AxR6BEBbABCJ0/Mc3baOeDCDNjWXfkilFN0mX7SFqO9qjsrFJi7Nckx352WfNmnIqBhIVxL4m3DBg4qZB+SGzS8//dqsh64rM5X0dvr8KMTzAjWKXSpGAuRb+CE6G0qiKmOGgJeXY+cg4WNE6qD6n6aDSXD3O"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQJ+SDrSC2oJcjhivUOgkb7f0fQjBlmH8Np8FK3/YCvsFm1JDdkHdp2PnsXvLgWjy2JXb65afoI5M7Hig7w+RELQnZZ938EYBDgqZuupK2mLkvBvxwF40NslJZpdSfB7rYGaY4SOUb0Z3xNbm8nBJvO3lVGfhcYgRpOJrAo9l5OI7hQG3/hCmKpJ7+dOhU0x6skKn2lOEzA3HJgU0BXd3lXpWJRGAgeAjTS00T8wEO0Osl3YeE8pWkZJUpAER7FYX/uv4dcC2VTINt87kM5DBXAdXTJ1Ku9F6PPP4fxV4wDHkdK0+EAxFxXE+qefXPkaNHQdpfh1vYb1sm5WNYP0HYLDyNwz9zipmJn2t+VcK9PB4csleA6gmROrSWscLqj9A6ZNdDuT9XiSqO5FuX4xoS5PTcnGSozlv1a+FEs9geBS9nmSbBPQXWcDq7TxhLFMKurwYMK0zoX0HzLqGa/V1Ev9AfrKHQflmCPlgA3xyjFbfirkq6BLEFSEPDTcEcjeh/7s/fimW4VBaWdDZ2htKS/gOG3lsQIAiftcokN/GQOrZLmd5Bx5iPQvkzqqy7p1nX0qSaCB3MUzbPbciuZ4E4uS6NWmSUBEP0HRi4Q6V39QlfR40TiokArfLmQ/6YfARZeo4FdSwKEC41RE3sTx2aPEvAl58KGORHIW++MDQITyq6xY+sdZm0faZRUuW81FGYQul4xa4Ji6DhEnjdqW8VaqMdE45ARm70JUzs8G1xOkE2tp9HmX7VdHrmvKMNn0S6bASYMXZ4Q19RO9fq7P6TmUoiRNIjBGJDlkflXX0EQQQBMyrn8aknicm/vpQggnqllNUAvZHaLTHWxafEXXy4Trfelbrm66JxzoYCxyKuaYUNlO8HTWHe1cMgln9PA="}
-1
View File
@@ -1 +0,0 @@
{"Value":"eyJ0eXBlIjoic2hhbWlyIiwic2VjcmV0X3NoYXJlcyI6MSwic2VjcmV0X3RocmVzaG9sZCI6MSwicGdwX2tleXMiOm51bGwsIm5vbmNlIjoiIiwiYmFja3VwIjpmYWxzZSwic3RvcmVkX3NoYXJlcyI6MSwibmFtZSI6IiJ9"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQJofIgTQJifNHQxphLayMBOChaYJlvFJHcc4zNPMBVvDr8uFy1XQzXLAysR6sgZjAZ4jhCWi2Pa0cVmTYI="}
@@ -1 +0,0 @@
{"Value":"AAAAAQLFETk9NejhTi3zbGl5M2UBwmLsnNdsVx1Qj3IEMyuYRa6hcfprmAJq78f9DSsiqD9XwYdKAw9+hhwDHXriG0yX"}
@@ -1 +0,0 @@
{"Value":"AAAAAQKdBPgKRn3+fSLmFoRgmwvnRG8A7VobciPCQ3E4PGOUOqFoqlCA1ClehtDoOEoCzqj378OA2T7C1UcJBFe48EawcsmGWIXzAKW0Dl+npXiU/htSJoI5kiYODIwnGAqmL1d+iG6d1AzFtaM="}
@@ -1 +0,0 @@
{"Value":"eyJuYW1lIjoidmF1bHQtY2x1c3Rlci0xOGE4YTBkYiIsImlkIjoiIn0="}
@@ -1 +0,0 @@
{"Value":"CkyIT1Q8Bh7F49lcdfgUokFwvgfeGgpTBw3VxRMlaD//Xtu/wBP0ELS+hudexNadGr+/WhirNmuo6OI7wpujIRWMKCgmWO6BDCydAsFxKgA="}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQI4IKlJtz2l7Xc1Yk4lOILn9ms2vTL8//cXd1cVLwZMlGjngu4PyQfHeJHyBuaGJUaCmeTKqpxWKV0YAgr+lWzLAZeJrxEZ8e20f8D/iBC9QMoIWmPdGo6HDkyq02O6ZEjjyFVcWTtUbwfn14ck2UD3aXyesWWZsRjrgC3MZjbh6MlOG2ZFNEgz"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQLHOajWUXAr2Y2W+33Q4ewbnAnIJ2/NGfak+XUE02QyJ+htVjlkxZlDVW6ZzqSuvCgAnpApDm/GQRgEXi7qP2j70QBMq8akyPa97eanit20sQq/urbsvA3ts8jdyZ07nG6loQEPLgbdVzv0+QfhnH2tZsowdkn2eSn3k7P3B4OUEk08WHWp9vXrVoWMbpxAPMliP+eOF883gkULsnjMC2e1pCV4UnqF+ygbV50fGqs9FMoUoAIh16JpL9QfSQol7RBirTSBdCKTb6YP0B0hcYiwHdWAaI/ESa9ypBSxROjNdR8+6Iozb/H/KasdrvfQXDQts/28hhdxGsgWlXoWURNQ73ODB8R3odXavZvgtuVth0c/ug0Ti+PQgZ1gi9LLfWoYmSZGResfCeWwKCLsryOSRHABd420f3lq2ueTVRK2VVWRYpxe3lA2oNtzxAseANHcZ1MaVzqJceB94pSWAxfjLG/CJXTAebxJQ6B+D8I2v4SDdIk7Uv+FrxgZfUBfCrn6J/gQVeoYwbzFaG4dSUHdVB7IO9F8eAt0DBHWKJuLUjUNjubUIv5i5J7drogWcd3gdNV4wJC9Bo5KuJfxz2P8/2w4jU0UPOKZjVnGsKsaguOc4YBTuZwnNR8XLtM7V+q+U4bDfPpsuRQFR4E6eWNUj6Uuw8gpr4G4wLrHjbYB5mMeIk/opK2CozXISvIaodqQug=="}
-15
View File
@@ -1,15 +0,0 @@
{
"unseal_keys_b64": [
"5mIIQ/WPHpVbsriMGAZn5ImI56L07C5bJKd1UU6p5ks="
],
"unseal_keys_hex": [
"e6620843f58f1e955bb2b88c180667e48988e7a2f4ec2e5b24a775514ea9e64b"
],
"unseal_shares": 1,
"unseal_threshold": 1,
"recovery_keys_b64": [],
"recovery_keys_hex": [],
"recovery_keys_shares": 0,
"recovery_keys_threshold": 0,
"root_token": "hvs.YLYBCUXgeJM3Gq4C10tWBWjw"
}
@@ -1 +0,0 @@
{"Value":"AAAAAQIfWee0pQLTtQuDDqy8ytv7UNTWBvdFDvtUCPUQKTo/huln/bpaEZ5iJ6nTZ2brU9KR/GJqSSIN0oI/kdEcvP5SQOwXTaV8jG6stYxylbRj5NVfPRTADOcYj7fKs1JThTm5W/f+riRufiFqtuVlLCGkJFxbfuIlQJ3Z6X3lGD8TVwa1D9zfleqYGAwlPDp9gAdTNvq+WCOFaBWzEWMFtP+KQGxOm9/9joJnafGMjVt7JpQccJAwjzt37maIEwA518C/RueqTjX7so1x5YxqWpJGDRYGMxAy6Jtbtuio8HVLyj72kN/zwffO9uvww5a7S2bJTgsdLRVIqeUNFGHGKi39779kooQdi803zwQ0TCKqXe3hMi2qgYTHPfb75wBFnhtY2K5zPac="}
@@ -1 +0,0 @@
{"Value":"AAAAAQK+IKss4D02NfjwAaN0qOZRgCBhPhQbHr9KTlXiQDvEXYUvoSd6/lqjLXaXwCn+V4wGHClEQF8wBAhPUnPCqKU8bew9kg3dkuEe3Vz9NhkeWChUiozVjsBSlShD1FLvPfLhNYZ8ZaZRkNkyiQGa0Gx2HwBiklkM1LML0lZLj9uywRR4+XWmxXXB4oVuunKzMVpr+wlc+65vpZW3yAkz5pZIxQxmMIMKkCI+ixep8Mr0X8YKgPdmZZaQhq7LNtoqGLWjgGek60FfQLkf5ZQ+eOSesz3pUTWom7dPHvIuw2eb/lMdk/eEvFuyyH6VWp0feFejY+J2RvnHFZtMeniWXQfaze3hCMSmy84Bs9PbEbhYtOYtoFIjGH++8RHvW0rrM/S8iql5jmSQL+jQQvIbpCP9VMD1YmNzfYaMGfheYSV8UriOs3qlNuH19F0lc+MH1/jWbD/uMyiySzvQ0GIg8iAx55o+ItB1o05Zx4Z3I01+nZPqIap/G+TElI3HODudde9WiLjoe2ltonVRGj1cd5hQt+ThuU0UxmHCOIOCIPzGtlT9jTJFaTF8X68F5Io4hAGr66xmRJ7iCmEvc3WGPGjLxa82XmLp0mR5dwCSDoxBWyoQi85UZ0ajgbZJwvbu1U4EbcEkXCvugX+wQkOLojsluSvA4OK1AjRv6IjX0yIkZjXe+VphxDrVa5j3hIfQ3s2sQqdsezEOvkvrCoTdK1zLHfuK9cpu7XJsegwA7vuup+RuPhrvBPZuaqTn3vppfdGJHIw6eLNrdhEddUjVkRY/PVAp7up2dyYuKQNGqF1Cd3oEfdVgZIKJxvAUxnDItq48ETxPQewwxoBJjlDJDQSyIoesFCIBbckTN8wxmIfeMtPtUTDObgnFkbR9pU6votcwmPFT5UUuE0gvWiH4fDoQHKUyqQd1ovgGD3WjYDxM7KpIHM8US0l+wbZpkQCvqnc4rQQUCmNb+RIkfw3s+ZygL6kjhykR5fAvy1VvIf4u7chO9nVaazVaNSRp"}
@@ -1 +0,0 @@
{"Value":"AAAAAQI+hY0O2nJzXRYJgPvTsTUSYQLALdyKMtZrzTVj4308UVDuIu9uKVbsjFR0x/4D8zmicpkmO08z8M4nXVKzgyvbDjTR"}
@@ -1 +0,0 @@
{"Value":"AAAAAQKyEH7X6VXpjmJmfx48nMGWltuY98lh4M8W0Xdnfd+qdswry0HKdR0KCy1OPLjJRlECPGCGCtg9TYVaVSL30CwK/M2bzPRnAnTtvrAqguxOEGbpFtlQQ672/vRG"}
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
{"Value":"AAAAAQKxfQFcpYjED242EUw3zOu9MqYyj5tcZrzogOixhZcBguLydso0Bpmg0pO8dAJDWtSQ4BTWKxZiNPuDmliAGKo2+90Z5Nqm81l1VdqGmTFsgRGvRes9GtFtyvEwkw/dPhDdDqHwVoiRfYCgIoa3ZU8Nc7KyjDnRncAmfNo+BYrhx3hzTpqrNLG1jWhmiG4Thi4mAT1wxOC/Ces8EiH6N8gKxbVVsBlbN659/5bj3QSHtHTEFwi1twBNKkvWTVam63Bc6d85Cd5Tk7YqkxwLquyObBnyWVLGimgwn8VMCTP/k6UdN8cZPol9FiSmV7101Ubdj3bFTRsRTfwSiN0e+SA2DZVXMIQjOSG4F0lFFrd/T8oJXwuyXeohwpzHGaXvdiY4mmLfvJugS4tHjTfKjYq1BGwqkayMdT6KWef5E8w1KNizKFfIyDrlLvrehRo8325JdH5aEVJ9MWctGlHc0es2tHskVoMIB6oEPG3SVM7MViUNuOyL0arN+0ouopnpAieiSfbsFKjXMPWbwhPXn2LaZaMVAvWzu8cWY5FuNP5R4Ks3mZBHid3xHx4PTJpmWs8KbtUNbTv8LcqMFNeJpIWwveEWrANH7WIj+jPCICcXkqoyZNSnS7TWGA=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQI4psbF6rPEbsOtxr9SQPkVMmkpxNHuLeXiL25zN4w/ljg8mW3eLF9xRsuCPrDa4LEfASEms3KxrwSviel0u8VEF5RaDioKhg7q1rhlt5tdFrU4R+tTqIAmYGGJQ4YAiHIDWDSJ9Z6YesngudEdg+A1hrvj9gGM3ZqRce40k4hBPEY+VBJyQKxZP03JSO+aS1u6XNNMp4I3s/nxj/qLnHJ1YmdD03qe/8CT2EhA98LWDnHxpogNDIZk9StpZUTWecTJhljbW8WZTbR6Tyde18aCCshMnDIKE7dk3R17gOltEEn6eE39+lREnzCpwMncbm33h6Ynv2d/eeNkw/QoMwpY/eL1Dvwg34mJuv0RmZC3ASw3P3l9xEmpPyxiCVm9MMDL08wj4Azfqmalh8pBGeAPmiHBWM3xRrtcCK6KlsCN5IP8c7BsmLTQdiP8ZqJvJLH23HBqJQ/aN3o25EXfsHoGIBtCg0HZBTVYOTwT5thTZKvPiv8P9mIPJvTNySCxdpqB+r0A+W3pIaotVX6XZt3bq5csi7T5nkP6MDF8Hi5XoR8Bc3FzF+w94roXiS72JgKarjqOW+bYGbcsO4jpjT+Ys0rLffLuZuzHP6/wGU9QypXL/gVcCQokYw=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQLLuumPS+6rqyf/5rBi6Af6FibA+r9IP9uJF2gFpQ=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQKluxV2V0Gu7EA54QJPkgqgasCEE9IUZ1NuNH3jzq2Yjua4IQTyDOACKUktyShjzY+X1RjVCIxWWz35RESuyS83fN6mnIiU7vNHJkdgJ8/XMf30eJDSdJM4PZx+zkGHoodbDWF102bQqPdOHNxw4eBQMpxxgsJ08RWnIYUq/3dTc67qKkEN2Lovv6Ov44JDnkoLN8kXo0BB/HRcHUS2Vi3FgLrq6iRJGizAlC9dyR2L3nDSWwUSFzeVnU1wV4LH37I7bosQ0JqIw2bZrrQQT743a+SzktVYPU6ZcNnBNkBhUVDEuB+hin8N4vLf9CABZjTTNJ3jgj+rK4miNxFCiOZabj/+1d5RV65zXdRgWGsgyPWVBZETuNZP7cwGcHE+TwQsm3n7MR1UP9NVxg6NLBeOYK57XxMNecRgdPqyaiIMd/aK05gxlqbvr57RxrdiuE9HCdKeJbxSVdxxh/dImOwS6k7+kTQf9AKuDzUSNPshyAcT0lN+0bbP1q+ath5dB42Ki2hemDmxLMcUa83Own/iwRviHzxxxBUML8fG/5fhiyktIBG720taic2A+noj6ZrBe/Pkaz+EFojc4vmGmYPPDCGPLCOnMT/pGK8t+783kLvMS0hJ4e6R6I5F04n+rxyHaVmppwmznQtxbcfPbbrSDN+8DxVHkfI3jDPgQZv2fwl7VDhpgZzUXSg+2tl0Li3rDHyU047AB62QgQ=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQLLBUYnOkTKLbah0IzcgfzXZsKjVCz8k9Ui7u4MXA=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQKxO0xOXslpRcRsOHd4gN35dAcW0sZSytFGzhvAAcCeSIDC061RamWxhUo6fsZvTz4="}
@@ -1 +0,0 @@
{"Value":"AAAAAQKUSY1mdYq8ms6WtjI9mtP34KbTunps2WO09LRBig=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQJNwfjNdXJVausmf1RtWb57MLRw6vKMlzDiLwac4A=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQLFDgHKNRgP4hzPyA0dJ5sD67Kwl4797lFPUtirEQ=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQLfl24m7T5OMeHVDK/+e0gqmfN01gIs47KB73sXJl5/AjxRh7tFzN8qTOuW5LYhWiClA9BRiKnhZp2Nl9VwjM8UScgkqwXkX1jyKdmva73SqjkfOUuk5HJK5s8/6BzhhpmBkXroLQq0tLFtuwBihXay+pFG6hkmIFZ2tvlkzp0BaOgZePdFO4wsKMAiLBIe8jS2j1HEIsdGAPCJFydnSMnH3q2ylXVIU63nPhGzgD5QqM7Z+lp1CGEPqu24K55bJKd8Pa64RNlk3xg2xyeTYYySuWfmqARWiKMSSF8nsUIy+nInT+ZDUEysfFY3f/zUItf9UhK9zDFU/oh9H/cXAQoCs4kf5ISCJkc5OR55kCjDiksIGNRTn7ZVG7WQmD5lHp/yuZ1Ynsg2AP1obWxrTsBnLqZd0nZc3QU+LbBE5a9l8DVAVcghyyZIyPgHb7fnUCTn8QPkNEmJQreWT5nWPydp6NwLu6KrJn0gMOzcnKV0wwfEeFZvh3mrPYUUCUKu+l3ymyZeJzpmvEXa3Ruly5jeF7h66+FPQnYyidleIDX+5IIlLFD21gFJSYNAdYPMb5XCu/e+9xC1wJaxlLCNwKzu5KOkAgXN7gt0PJs4e8sxGlxUVxBOVGFiwhLMFhdhPxg4bMNkf0rfqLwizmlCqxRh79Q4KQcJ8jpy6VyT3ZPfv2VPrsVPdU9NCM6gIuu+ZhW8VkvdUvYuvx7oLQ=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQK7XXTAHOgHyncRqiva4dCWEi93A2jzJMmlipyWgQ=="}
@@ -1 +0,0 @@
{"Value":"AAAAAQKkkgjIVViD29CtIh/CoqM8fspIkpsOvifO1pfg/LdZtm0="}
@@ -1 +0,0 @@
{"Value":"AAAAAQIfP6MnaRP2pH8kCvUC0ZdV1D4QYaAMiguZUr/7jpTYkiWqzlNx20xqJs+NeOUS0C3gb3nd/cSdjrJtfRfJRR+xOvlRQnHyipJwuuwG8DFubl7pdx2Deg7XBkcwH9T1ViMe3t+siFCTQ85JPeLDzWQ7ne87O6UVMsB09aX9t/SjCfYvqh8/CYnb2tUgBLeH6ZjsJw/VLyufYmEeM32TTWAITXNBX+5RDSHRyuLHYwJ77pfdFi1bzK6tMI6/VRmMmS/Enah+iiDqTtqQWgf017dI9U8pCstlO5IAklLGXTGzLPCYgK51umMDyW0RUSWVyAcgBKIF"}
-1
View File
@@ -1 +0,0 @@
{"Value":"AAAAAQLVJoxJczAsvFC4Q0s3M2+JWP1+mOoYyHQztr44n4iZ7ae57Qs1Yaa8B+UeTZ7Yp9pciH2cx+IoyBXHZH5a+RJswvyGg6NQdYwllvEFZ+tlcHT97wTNu0Kk1xtJtQ/KjuAlEzkifDpbWcil24yNjzYpdwIv9WJvr0Qdi/EQJkYXEB5VPn8KDAOJN/ki4qcKYbbHcGOHV7oMjqiybzkX5xoXumMaKMz48UsxN6JRmJ16VMODHZ2SG2ygm87TwGO9vUd23smkPyHMMPWueizQjBQ/Hdgy8wKiazjW1Nwf2lgzfTeeVJDMF7hV37CmopjQxsNNjNm7CWs8+yKa0sIfZvSTsEKXly6gLCTfvY0vWHKo0YyBSdHWDu4l+M7iE3anhircIYMmgYhimXWeo7Fp2T3Bgbkbd1KkPpgRUIZvIGtJfgvuaFEe5r21QvhruHvnGI8fVZ7KSFpeGSruSi7nQI8i42wZdwbOVFWie7ZpbCkI7ypAxivVHPJW0BeiX5vxsP0FOwxaH2paw/a6j6aBtMtJRbTAi6aXw/8mN6DthHo1yue3Swrd9C+SMS8lCDI7zzAc4I3LA3HTMinGd9TsN0pmCdj7pMYySOQa8QafdLA9K/rUsNB7kPPUomwz7J1fCh1SJ3F0XPI1qozM339PjRDYibUxq4iCnsm8BjatNxoWVQ2qHL5zs3fuNyBNLda/fg09kchvhZ91aAVnuYEYN4gZBmm0d3IEWQ3ZvgXp4JlSNKB9VozNNOqHTlYuBOFOCIbDGjeu6QjD/9KudfP4BrYwyOAugrSMVEHbDEjgmsyq2bKcW8AWhAzAKdkxQvEO34HMLEyIseFWkNMj5IImH9BlYO1RDH821BFXLz+7osJZfxImnYynXK0Y1Eo1Nz/siErIzIo9Um3vy6puERT/7GckGsfuLhrs6XvxH9QkBpPjl0NfNgkc1qhJjArW+MawqrQuD4/9vJrFM7Z9g/TNTGKKDXJsnqyG0B2lzJmQvJ7NzAFks+QhzHOtO3Ub/d+t+lbUFPEpa1t8ZTMZ1fmdLHIKuFKaVOUscMjMgghAAv+OpQ87RongKPOpzMNWtx07z8gD4PAYfVqLu8b14FsMEbBQwfsOXr8inPyaZALHXtIVhAGzWg4XHsFv3UQds+bnQf44nP/Z01sIJYWEQMQm0AQxbsk97VsE+F74kxNIFP3GMvQ76Hdu/O4lrYP/MLemFmjZqtiwJyI+0SO2uRAUjPIJnPYdhGdwLeSKIgiOSOgwwrZm23y5nvYp5E7cX8C4snCT6NPxPdmcmZAUai+0kgwUNurQMuS20CEXtcBARdJUUpfun0T/UoPk4op0+jdw4JhiYMHjZO8ADO0vlAiPJGv4LSqRkey2+RmAyuiGdjhFZjqkhiUaHwpok8erXXwjmIXJGAzZluSNZD2d+Un6PfTqTU4Uhunnk9ll7oiP651jbSjmhDNafC7noGn/k1un0cqhoG1GDvf9rOqfxUsZA/tsWda0NKM38GN1tHXKLO3G0Lr/Cc0/a0lolRW0gjSZadVXxgExBtYELcadLXk2LD9VzmUGQ8YnKi+IBJiKKGodWIKqYwrDZizeHml3jVZVEjH2fs02jTTS7x8W2EK0bUoLqilyQqL9dZ2q1j/t6XVFluXqU51NQyrxlzmYVm9D7shsOR7wxatbhSoBIUo6IApUNDVvVemXcbBnWwxDARiEAeab5t75HBWH/En2S6UMWNPmMOLtLF/FWG4hhkOuomQz6wV+NSGr/FMqSiiWgsC5Jo4RhtNRn4+r/jYOK4tqvmPUBX4A1/2Gn0I0/tu7bu5TG49YeRWIbZzl6Bam9LFfmjeeAdU8q3TyY7Mnu41NOFGk3HOfGUWQ8vYT9XfL/GxxWMB3az/Fdk20GEP7Ps0dAoJMjSzdJijhUPOZY0GqT+80Rz0XQ0dQyFpfBWxticydxNtunjtDZAu06aeP7dAyVJlTwC60vAlrKl2jx4D/5GrS/CexXPAFY9HthcueI3+E0eVDZaYZOOhypLBsZ4TeTW2dc3JBr4zKIfh8fxpEGCJ/Vhflu/rtLaf+WdrO+M69sHzBqfqYqGh7AFsZikG20kNo9TTNgHLGxJUHkkC2ViQe5OyLdG9wJkSQlJSgVyYy/nGawW+E9aDXrIPeqJ2ljD1tuy9q3dUqPJPWKpb9cdj+B9Klu1NmWpM+zpNdvm7jh4paEU/a5Itf/1hfoTbXpvcRLEOqffoKvwHB3/nW/u8jNyLec3l6rRuFyV6wJiG8qd1uB42mUZ0yqvDqiSrCbJKMgyI/ArQpsG8baOKdImfjOuNGIXVZAg2nREpL0HN//RbAZqvByh2Y2nCQkxVr/miK04eyLiq7zFO8f5EYth35dYKI6hV4P8r0h1fl3wwjJ6e8862tuqEtBOtuqBVqCnuGZAoLW9EeL0rZ7W20dEnGV8/ON8a+Npd2jBgZXJ54ytvgkP0QIwnXUBXjCSqY9fUIeNykiHLCZ56LEaxP1/7RX6mhPOOnZj4fMlipkEwoaYPeDa0x+yk22eO8d06F6MyEMNOA35HQI1wFpyZ9J0at+VFbS+T/GzoOeewoUssNz/ARLMJo6uk8mTmxuLUCO51rKNJ4406ezMwwM5mi/JSJtyQn6jUuTpYYkapHZcSA5MoHCSYcfWJy9J93fw1gsXZnm7SAVA7B8bxoyR3nuDFmc0ii/j+adqQDHDawoYKgUuzVSgKldImdYNWGtT6r0EwpLT1scfIaUwXx1krbAXwJvgup3DClE2GRvAHeM7oL/7Eo8Yp9y5suc3N/os+CYMWDzD62ZBu7Hz+Rp241Wy3IcozE8hJqb5kakoe3VeY+uD1VVX+MDxwl2wLiqIJWFHVBD7OAVsOQ6f4HdZfYor1FORNI7MQCuZf5Enbdq04WIZ5qnUYZ2KLEC21gqUZgm8qLvptPf7xrDCj+nHuEY3uVbCUzVOKzNC8rToVkFf0Xmmvh7Mj3jsjyizkLfz/CXvxl3J0m4hM/fDz8m4R6i+NfbLQdU1ght71F37/4J6tS+ygYlM6fL4yDjTgTUnlitJaGlMm8OVvUTkshcmeIkrp45iHIjC99JI+6AnZyks2KbHOVjvaWUma+Cfmx1k0xJWvaIzob4PiC0UnshsaQkTc+JLPrJ9WnUvH47GD8ozu6Jg9zj2mCB/+AJjDwOm5+CeQMeMGrpYYWiqQo24S8bCeV6frfuE2sI1/HLZ+RrHIgiD+GgrIo9vsfIfOGjsrBSKSCGqI1fAETHIqB4jxQvIV2NxejWG/qpCwGtsm5gbbvggMBoKhO6MNt2Yf97caWEDSn3wThAxd9j0C0KuDPFOzUoiFTeBvxMN544ta8sBUUPI3OZTYtsWImLWSsLEI3O+uoL9Oide5mJ3565VzYIfAQTPeDD6xMnzku30RZkvj2od459Aj8AmcEp7oeduID4qTSQSR8BOeLobSQQApvumfmwkzih5EQXqIszpC0pFTHcT/mJtZiwd5iB5QAvG0juYqyJHMPXvG6l9HkpXn7WlgumrABZxDnce3rZdiDnYjmxDOb+Tgw/Dp03/KLyiNKo+WoHMK1eCTkOTmpH97PFTUCNMyjWbeFPEdJ0wekr9ouG90a8uT4IXKTSJCnrHhoRc5Myv0/+tM="}
@@ -1 +0,0 @@
{"Value":"AAAAAQKXMKfcpQ12dOqQAcfvL15E4sP1Rwr1OgsddOy2axQdG5+3zdzNLK0J98sSs67NRPLVihR0bs3pZL0TxbLf9xRS5qGQRq1U5sKct0MtaQyvr7pNas9PKr5DgDXowFu7ZcldZsGq1GCQlk1k5fZbg50LC6VeRSDYacHpcevXGjD4qqSfJdTdSqLEy8pNwY1LkaN4sY1YfXoW64gg22HbsuaZQueuuj7eBTCoX4Ij772Ry/YIF0xho7aHZ67NGWRPCamqppQkyaKGTxDmJiTbTSJm3fy2ZXGfrPLIHsX+AuIsfZ7N/Q=="}

Some files were not shown because too many files have changed in this diff Show More