Compare commits
3 Commits
98049f3c50
...
e340b42223
| Author | SHA1 | Date | |
|---|---|---|---|
| e340b42223 | |||
| 378daf98d6 | |||
| 35fe82d225 |
@@ -1,5 +1,7 @@
|
||||
using ControlPlane.Api.Services;
|
||||
using System.Diagnostics;
|
||||
using System.Text.Json;
|
||||
using System.Text.RegularExpressions;
|
||||
using ControlPlane.Api.Services;
|
||||
|
||||
namespace ControlPlane.Api.Endpoints;
|
||||
|
||||
@@ -14,6 +16,11 @@ public static class ImageBuildEndpoints
|
||||
group.MapGet("/status", GetStatus);
|
||||
group.MapPost("/build", TriggerBuild);
|
||||
|
||||
// Post-provisioning verification helpers
|
||||
group.MapGet ("/verify/extra-hosts/{containerName}", GetExtraHosts);
|
||||
group.MapPost("/verify/dns-test", DnsTest);
|
||||
group.MapGet ("/artifact/{subdomain}", GetArtifact);
|
||||
|
||||
return app;
|
||||
}
|
||||
|
||||
@@ -72,4 +79,118 @@ public static class ImageBuildEndpoints
|
||||
await ctx.Response.WriteAsync("data: {\"done\":true}\n\n", ct);
|
||||
await ctx.Response.Body.FlushAsync(ct);
|
||||
}
|
||||
|
||||
// ── Post-provisioning verification endpoints ──────────────────────────────
|
||||
|
||||
private static readonly Regex SafeContainerName = new(@"^[a-zA-Z0-9_.\-]+$", RegexOptions.Compiled);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the ExtraHosts list for a running tenant container.
|
||||
/// e.g. GET /api/image/verify/extra-hosts/fdev-app-clarity-01000001
|
||||
/// </summary>
|
||||
private static async Task<IResult> GetExtraHosts(string containerName)
|
||||
{
|
||||
if (!SafeContainerName.IsMatch(containerName))
|
||||
return Results.BadRequest(new { error = "Invalid container name." });
|
||||
|
||||
var (code, output) = await DockerRunAsync($"inspect --format {{{{json .HostConfig.ExtraHosts}}}} {containerName}");
|
||||
if (code != 0 || string.IsNullOrWhiteSpace(output))
|
||||
return Results.NotFound(new { error = $"Container '{containerName}' not found or not running.", detail = output });
|
||||
|
||||
try
|
||||
{
|
||||
var hosts = JsonDocument.Parse(output.Trim()).RootElement;
|
||||
return Results.Ok(new { containerName, extraHosts = hosts });
|
||||
}
|
||||
catch
|
||||
{
|
||||
return Results.Ok(new { containerName, extraHosts = (object?)null, raw = output.Trim() });
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Runs <c>curl</c> from inside the container to verify *.clarity.test DNS resolves through nginx.
|
||||
/// POST /api/image/verify/dns-test body: { container, url }
|
||||
/// </summary>
|
||||
private static async Task<IResult> DnsTest(DnsTestRequest body)
|
||||
{
|
||||
if (!SafeContainerName.IsMatch(body.Container))
|
||||
return Results.BadRequest(new { error = "Invalid container name." });
|
||||
|
||||
// Only allow http/https URLs — prevents command injection via url field
|
||||
if (!body.Url.StartsWith("http://", StringComparison.OrdinalIgnoreCase) &&
|
||||
!body.Url.StartsWith("https://", StringComparison.OrdinalIgnoreCase))
|
||||
return Results.BadRequest(new { error = "URL must start with http:// or https://." });
|
||||
|
||||
var psi = new ProcessStartInfo("docker",
|
||||
$"exec {body.Container} curl -sf --max-time 10 --write-out \"\\nHTTP %{{http_code}}\" {body.Url}")
|
||||
{
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true,
|
||||
};
|
||||
|
||||
using var proc = Process.Start(psi);
|
||||
if (proc is null) return Results.Problem("Failed to start docker process.");
|
||||
|
||||
var stdout = await proc.StandardOutput.ReadToEndAsync();
|
||||
var stderr = await proc.StandardError.ReadToEndAsync();
|
||||
await proc.WaitForExitAsync();
|
||||
|
||||
return Results.Ok(new
|
||||
{
|
||||
success = proc.ExitCode == 0,
|
||||
exitCode = proc.ExitCode,
|
||||
output = stdout.Trim(),
|
||||
error = stderr.Trim(),
|
||||
});
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads the generated docker-compose.yml from ClientAssets/{subdomain}/.
|
||||
/// GET /api/image/artifact/{subdomain}
|
||||
/// </summary>
|
||||
private static async Task<IResult> GetArtifact(string subdomain, IConfiguration config)
|
||||
{
|
||||
// Restrict subdomain to safe characters — prevents path traversal
|
||||
if (!SafeContainerName.IsMatch(subdomain))
|
||||
return Results.BadRequest(new { error = "Invalid subdomain." });
|
||||
|
||||
var root = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
|
||||
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
|
||||
|
||||
// Use Path.GetFileName to ensure no directory traversal
|
||||
var safeSubdomain = Path.GetFileName(subdomain);
|
||||
var composePath = Path.GetFullPath(Path.Combine(root, safeSubdomain, "docker-compose.yml"));
|
||||
|
||||
// Verify the final path is still inside the ClientAssets root
|
||||
if (!composePath.StartsWith(Path.GetFullPath(root), StringComparison.OrdinalIgnoreCase))
|
||||
return Results.BadRequest(new { error = "Invalid subdomain path." });
|
||||
|
||||
if (!File.Exists(composePath))
|
||||
return Results.NotFound(new { error = $"No compose artifact found for '{subdomain}'." });
|
||||
|
||||
var content = await File.ReadAllTextAsync(composePath);
|
||||
return Results.Ok(new { subdomain, path = composePath, content });
|
||||
}
|
||||
|
||||
private static async Task<(int code, string? output)> DockerRunAsync(string args)
|
||||
{
|
||||
var psi = new ProcessStartInfo("docker", args)
|
||||
{
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true,
|
||||
};
|
||||
using var proc = Process.Start(psi);
|
||||
if (proc is null) return (-1, null);
|
||||
var output = await proc.StandardOutput.ReadToEndAsync();
|
||||
var err = await proc.StandardError.ReadToEndAsync();
|
||||
await proc.WaitForExitAsync();
|
||||
return (proc.ExitCode, string.IsNullOrWhiteSpace(output) ? err : output);
|
||||
}
|
||||
|
||||
private record DnsTestRequest(string Container, string Url);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ public static class InfraEndpoints
|
||||
g.MapPost("/{container}/stop", (string container) => ServiceAction(container, "stop"));
|
||||
g.MapPost("/{container}/restart",(string container) => ServiceAction(container, "restart"));
|
||||
g.MapGet ("/compose/up/stream", ComposeUpStream);
|
||||
g.MapGet ("/compose/up-force/stream", ComposeUpForceStream);
|
||||
g.MapGet ("/compose/nuke/stream", ComposeNukeStream);
|
||||
g.MapGet ("/compose/down/stream", ComposeDownStream);
|
||||
|
||||
return app;
|
||||
@@ -121,20 +123,58 @@ public static class InfraEndpoints
|
||||
: Results.Problem(output ?? "Docker command failed", statusCode: 500);
|
||||
}
|
||||
|
||||
// Starts all platform services; --remove-orphans cleans up containers with stale names
|
||||
// (e.g. a leftover clarity-dnsmasq that causes the "name already in use" conflict).
|
||||
private static Task ComposeUpStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
|
||||
StreamComposeOutput(ctx, config, "up --pull missing", ct);
|
||||
StreamComposeOutput(ctx, config, "up -d --remove-orphans", ct);
|
||||
|
||||
// Force-recreates every container regardless of config drift — use after image or compose changes.
|
||||
private static Task ComposeUpForceStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
|
||||
StreamComposeOutput(ctx, config, "up -d --force-recreate --remove-orphans", ct);
|
||||
|
||||
// Nuke: force-removes every known platform container by name first (kills orphans that
|
||||
// --remove-orphans won't touch because they belong to a different compose project),
|
||||
// then runs a fresh compose up.
|
||||
private static async Task ComposeNukeStream(HttpContext ctx, IConfiguration config, CancellationToken ct)
|
||||
{
|
||||
ctx.Response.Headers.ContentType = "text/event-stream";
|
||||
ctx.Response.Headers.CacheControl = "no-cache";
|
||||
ctx.Response.Headers.Connection = "keep-alive";
|
||||
|
||||
async Task Send(string line)
|
||||
{
|
||||
await ctx.Response.WriteAsync($"data: {line}\n\n", ct);
|
||||
await ctx.Response.Body.FlushAsync(ct);
|
||||
}
|
||||
|
||||
await Send("▶ Removing all known platform containers…");
|
||||
foreach (var container in PlatformContainers)
|
||||
{
|
||||
var (code, _) = await DockerAsync($"rm -f {container}");
|
||||
await Send(code == 0
|
||||
? $" ✔ removed {container}"
|
||||
: $" · {container} not found (skipped)");
|
||||
}
|
||||
|
||||
await Send("▶ Running compose up…");
|
||||
await StreamComposeOutput(ctx, config, "up -d", ct, skipHeaders: true);
|
||||
}
|
||||
|
||||
private static Task ComposeDownStream(HttpContext ctx, IConfiguration config, CancellationToken ct) =>
|
||||
StreamComposeOutput(ctx, config, "down", ct);
|
||||
|
||||
private static async Task StreamComposeOutput(
|
||||
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct)
|
||||
HttpContext ctx, IConfiguration config, string composeArgs, CancellationToken ct,
|
||||
bool skipHeaders = false)
|
||||
{
|
||||
var infraDir = ResolveInfraPath(config);
|
||||
|
||||
if (!skipHeaders)
|
||||
{
|
||||
ctx.Response.Headers.ContentType = "text/event-stream";
|
||||
ctx.Response.Headers.CacheControl = "no-cache";
|
||||
ctx.Response.Headers.Connection = "keep-alive";
|
||||
}
|
||||
|
||||
var channel = System.Threading.Channels.Channel.CreateUnbounded<string?>(
|
||||
new System.Threading.Channels.UnboundedChannelOptions { SingleWriter = false, SingleReader = true });
|
||||
|
||||
@@ -27,4 +27,19 @@ public class SagaContext
|
||||
// Written by PulumiStep (DedicatedVM/Enterprise tier) — target host details for subsequent steps
|
||||
public string? VmIpAddress { get; set; }
|
||||
public string? VmSshKeyPath { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Per-component resolved endpoints for this provisioning job.
|
||||
/// Keyed by component name: "Keycloak", "Vault", "Postgres", "Minio".
|
||||
/// Built by ProvisioningWorker before the saga starts; OwnContainer host ports
|
||||
/// are resolved and written back by InfrastructureProvisioningStep.
|
||||
/// </summary>
|
||||
public Dictionary<string, ResolvedEndpoint> ResolvedTopology { get; init; } =
|
||||
new(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
/// <summary>
|
||||
/// Absolute path to the generated docker-compose.yml for this tenant.
|
||||
/// Non-null only for OwnContainer tenants.
|
||||
/// </summary>
|
||||
public string? ComposeFilePath { get; set; }
|
||||
}
|
||||
|
||||
@@ -22,5 +22,8 @@ public enum ComponentMode
|
||||
VpsDocker,
|
||||
|
||||
/// <summary>Own VM with the component running as a native OS process (no Docker).</summary>
|
||||
VpsBareMetal
|
||||
VpsBareMetal,
|
||||
|
||||
/// <summary>Component is not provisioned for this tenant (feature not elected).</summary>
|
||||
Disabled
|
||||
}
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
namespace ControlPlane.Core.Models;
|
||||
|
||||
/// <summary>
|
||||
/// The fully-resolved network addresses for one infrastructure component for a specific tenant.
|
||||
/// Built by ProvisioningWorker at job start from StackConfig + ClarityInfraOptions.
|
||||
/// Carried through SagaContext and persisted in TenantRecord at saga completion.
|
||||
///
|
||||
/// Design principle: Clarity.Server always talks to PublicUrl (goes through nginx/dnsmasq).
|
||||
/// The Worker uses AdminUrl (direct host-accessible URL) for admin API calls during provisioning.
|
||||
/// InternalUrl is injected into container env vars for container-to-container communication.
|
||||
/// </summary>
|
||||
public sealed record ResolvedEndpoint
|
||||
{
|
||||
/// <summary>Mode elected for this component.</summary>
|
||||
public ComponentMode Mode { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// URL the Worker process uses to call this component's admin API.
|
||||
/// Worker runs on the host machine:
|
||||
/// SharedPlatform → http://localhost:{exposedPort} (docker-compose exposes to host)
|
||||
/// OwnContainer → http://localhost:{ephemeralPort} (resolved by InfrastructureProvisioningStep)
|
||||
/// VPS → operator-supplied external URL
|
||||
/// </summary>
|
||||
public string AdminUrl { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Public DNS URL injected into Clarity.Server and surfaced in the TenantRecord.
|
||||
/// Always routes through nginx/dnsmasq — no direct Docker DNS leaks to app code.
|
||||
/// SharedPlatform → https://keycloak.clarity.test
|
||||
/// OwnContainer → https://kc.{subdomain}.clarity.test
|
||||
/// </summary>
|
||||
public string PublicUrl { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>
|
||||
/// Docker-internal URL for container-to-container communication on the managed network.
|
||||
/// SharedPlatform → http://keycloak:8080
|
||||
/// OwnContainer → http://kc-{subdomain}:8080
|
||||
/// </summary>
|
||||
public string InternalUrl { get; init; } = string.Empty;
|
||||
|
||||
/// <summary>Docker container name, if the Worker manages this component.</summary>
|
||||
public string? ContainerName { get; init; }
|
||||
|
||||
/// <summary>
|
||||
/// Admin username for this component instance.
|
||||
/// Null for SharedPlatform (read from Keycloak:AdminUser config at call time).
|
||||
/// Explicitly set for OwnContainer sidecars.
|
||||
/// </summary>
|
||||
public string? AdminUser { get; init; }
|
||||
|
||||
/// <summary>Admin password for this component instance. See AdminUser.</summary>
|
||||
public string? AdminPassword { get; init; }
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
using System.Diagnostics;
|
||||
using System.Text;
|
||||
using ControlPlane.Core.Config;
|
||||
using ControlPlane.Core.Messages;
|
||||
using ControlPlane.Core.Models;
|
||||
using Docker.DotNet;
|
||||
using Docker.DotNet.Models;
|
||||
using MassTransit;
|
||||
@@ -85,6 +88,16 @@ public class ClarityContainerService(
|
||||
{
|
||||
NetworkMode = Infra.Network,
|
||||
RestartPolicy = new RestartPolicy { Name = RestartPolicyKind.UnlessStopped },
|
||||
// Map *.clarity.test domains to the Docker host gateway so that Clarity.Server,
|
||||
// running inside a container, can reach nginx (which routes *.clarity.test).
|
||||
// This is required for Keycloak OIDC discovery and JWT iss-claim validation —
|
||||
// Keycloak issues tokens with iss=https://keycloak.clarity.test/realms/...
|
||||
// and Clarity.Server must be able to reach that URL for OIDC metadata.
|
||||
ExtraHosts =
|
||||
[
|
||||
$"keycloak.{Infra.Domain}:host-gateway",
|
||||
$"{subdomain}.{Infra.Domain}:host-gateway",
|
||||
],
|
||||
},
|
||||
Labels = new Dictionary<string, string>
|
||||
{
|
||||
@@ -107,6 +120,7 @@ public class ClarityContainerService(
|
||||
logger.LogInformation("Started container {Name} on {Network} (image: {Image})", name, Infra.Network, ImageName);
|
||||
|
||||
await WriteNginxConfigAsync(subdomain, name, jobId, cancellationToken);
|
||||
await WriteComposeArtifactAsync(environment, subdomain, keycloakRealm, name, cancellationToken);
|
||||
|
||||
return name;
|
||||
}
|
||||
@@ -355,4 +369,353 @@ public class ClarityContainerService(
|
||||
logger.LogWarning(ex, "Could not connect '{Container}' to '{Network}' — tenant JWT validation may fail.", containerName, network);
|
||||
}
|
||||
}
|
||||
|
||||
// ── ClientAssets / compose artifact helpers ──────────────────────────────
|
||||
|
||||
private string ClientAssetsFolder(string subdomain)
|
||||
{
|
||||
var root = config["ClientAssets__Folder"] ?? config["ClientAssets:Folder"]
|
||||
?? Path.GetFullPath(Path.Combine(AppContext.BaseDirectory, "..", "ClientAssets"));
|
||||
return Path.Combine(root, subdomain);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a docker-compose.yml to ClientAssets/{subdomain}/ documenting the SharedPlatform
|
||||
/// clarity-server deployment. The file is an audit artifact — it is NOT executed by the Worker.
|
||||
/// </summary>
|
||||
private async Task WriteComposeArtifactAsync(
|
||||
string environment,
|
||||
string subdomain,
|
||||
string keycloakRealm,
|
||||
string containerName,
|
||||
CancellationToken ct)
|
||||
{
|
||||
var folder = ClientAssetsFolder(subdomain);
|
||||
try
|
||||
{
|
||||
Directory.CreateDirectory(folder);
|
||||
|
||||
var content = $$$"""
|
||||
# Auto-generated by ControlPlane.Worker — do not edit manually.
|
||||
# Tenant: {{{subdomain}}}
|
||||
# Tier: SharedPlatform
|
||||
# Generated: {{{DateTimeOffset.UtcNow:O}}}
|
||||
name: clarity-{{{subdomain}}}
|
||||
|
||||
services:
|
||||
app-{{{subdomain}}}:
|
||||
image: {{{ImageName}}}
|
||||
restart: unless-stopped
|
||||
container_name: {{{containerName}}}
|
||||
environment:
|
||||
ASPNETCORE_ENVIRONMENT: Production
|
||||
ASPNETCORE_URLS: http://+:8080
|
||||
TenantSubdomain: {{{subdomain}}}
|
||||
Keycloak__BaseUrl: {{{Infra.KeycloakPublicUrl}}}
|
||||
Keycloak__InternalUrl: {{{Infra.KeycloakInternalUrl}}}
|
||||
Keycloak__Realm: {{{keycloakRealm}}}
|
||||
Vault__Address: {{{Infra.VaultInternalUrl}}}
|
||||
# ConnectionStrings__postgresdb: (persisted in TenantRecord)
|
||||
networks:
|
||||
- clarity-net
|
||||
extra_hosts:
|
||||
- "keycloak.{{{Infra.Domain}}}:host-gateway"
|
||||
- "{{{subdomain}}}.{{{Infra.Domain}}}:host-gateway"
|
||||
labels:
|
||||
clarity.managed: "true"
|
||||
clarity.subdomain: {{{subdomain}}}
|
||||
clarity.env: {{{environment}}}
|
||||
|
||||
networks:
|
||||
clarity-net:
|
||||
external: true
|
||||
""";
|
||||
|
||||
var composePath = Path.Combine(folder, "docker-compose.yml");
|
||||
await File.WriteAllTextAsync(composePath, content, ct);
|
||||
logger.LogInformation("Wrote compose artifact for {Subdomain} → {Path}", subdomain, composePath);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
// Non-fatal — the container is already running; the artifact is an audit record.
|
||||
logger.LogWarning(ex, "Could not write compose artifact for {Subdomain}.", subdomain);
|
||||
}
|
||||
}
|
||||
|
||||
// ── OwnContainer — sidecar lifecycle ─────────────────────────────────────
|
||||
|
||||
/// <summary>
|
||||
/// OwnContainer tier — generates a per-tenant docker-compose.yml for sidecar services
|
||||
/// (Keycloak, Vault, Postgres, MinIO as elected by StackConfig), writes it to
|
||||
/// ClientAssets/{subdomain}/docker-compose.yml, and runs <c>docker compose up -d</c>.
|
||||
/// Returns the absolute path to the compose file.
|
||||
/// </summary>
|
||||
public async Task<string> GenerateAndRunSidecarsAsync(
|
||||
ProvisioningJob job,
|
||||
Dictionary<string, ResolvedEndpoint> topology,
|
||||
CancellationToken ct)
|
||||
{
|
||||
var folder = ClientAssetsFolder(job.Subdomain);
|
||||
Directory.CreateDirectory(folder);
|
||||
|
||||
var content = BuildSidecarCompose(job);
|
||||
var composePath = Path.Combine(folder, "docker-compose.yml");
|
||||
await File.WriteAllTextAsync(composePath, content, ct);
|
||||
logger.LogInformation("[{JobId}] Wrote sidecar compose → {Path}", job.Id, composePath);
|
||||
|
||||
await RunDockerComposeAsync(composePath, "up -d", job.Id, ct);
|
||||
logger.LogInformation("[{JobId}] Sidecar containers started.", job.Id);
|
||||
|
||||
return composePath;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// After sidecars are started, inspects each OwnContainer component's Docker container
|
||||
/// to resolve its ephemeral host port, then rewrites the topology AdminUrl to
|
||||
/// <c>http://localhost:{hostPort}</c> so downstream saga steps can call admin APIs.
|
||||
/// </summary>
|
||||
public async Task UpdateTopologyWithHostPortsAsync(
|
||||
Dictionary<string, ResolvedEndpoint> topology,
|
||||
CancellationToken ct)
|
||||
{
|
||||
using var docker = CreateClient();
|
||||
|
||||
foreach (var (component, endpoint) in topology.ToList())
|
||||
{
|
||||
if (endpoint.Mode != ComponentMode.OwnContainer) continue;
|
||||
if (string.IsNullOrWhiteSpace(endpoint.ContainerName)) continue;
|
||||
|
||||
try
|
||||
{
|
||||
var inspect = await docker.Containers.InspectContainerAsync(endpoint.ContainerName, ct);
|
||||
var firstBinding = inspect.NetworkSettings.Ports
|
||||
.SelectMany(p => p.Value ?? [])
|
||||
.FirstOrDefault(b => !string.IsNullOrWhiteSpace(b.HostPort));
|
||||
|
||||
if (firstBinding is not null)
|
||||
{
|
||||
topology[component] = endpoint with { AdminUrl = $"http://localhost:{firstBinding.HostPort}" };
|
||||
logger.LogInformation("Resolved {Component} host port → {Url}", component, topology[component].AdminUrl);
|
||||
}
|
||||
else
|
||||
{
|
||||
logger.LogWarning("No host port binding found for {Component} container {Name}.", component, endpoint.ContainerName);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
logger.LogWarning(ex, "Could not resolve host port for {Component} container {Name}.", component, endpoint.ContainerName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tears down all sidecar containers for a tenant by running
|
||||
/// <c>docker compose down -v</c> against the stored compose file.
|
||||
/// Called from InfrastructureProvisioningStep.CompensateAsync.
|
||||
/// </summary>
|
||||
public async Task TearDownComposeProjectAsync(string subdomain, CancellationToken ct)
|
||||
{
|
||||
var composePath = Path.Combine(ClientAssetsFolder(subdomain), "docker-compose.yml");
|
||||
if (!File.Exists(composePath))
|
||||
{
|
||||
logger.LogWarning("No compose file found for {Subdomain} — nothing to tear down.", subdomain);
|
||||
return;
|
||||
}
|
||||
|
||||
await RunDockerComposeAsync(composePath, "down -v", Guid.Empty, ct);
|
||||
logger.LogInformation("Tore down sidecar containers for {Subdomain}.", subdomain);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Builds the docker-compose YAML content for OwnContainer sidecar services.
|
||||
/// Services are included conditionally based on StackConfig. clarity-net is
|
||||
/// declared as an external network so all sidecars join the shared platform network.
|
||||
///
|
||||
/// All services include <c>extra_hosts: host-gateway</c> entries for *.clarity.test so that
|
||||
/// intra-container calls that go through nginx (e.g. OIDC discovery) route correctly.
|
||||
/// </summary>
|
||||
private string BuildSidecarCompose(ProvisioningJob job)
|
||||
{
|
||||
var s = job.Subdomain;
|
||||
var stack = job.StackConfig;
|
||||
var sb = new StringBuilder();
|
||||
|
||||
sb.AppendLine($"""
|
||||
# Auto-generated by ControlPlane.Worker — do not edit manually.
|
||||
# Tenant: {s} | Tier: {job.Tier}
|
||||
# Generated: {DateTimeOffset.UtcNow:O}
|
||||
name: clarity-{s}
|
||||
|
||||
services:
|
||||
""");
|
||||
|
||||
// ── Postgres ──────────────────────────────────────────────────────────
|
||||
if (stack.Postgres == ComponentMode.OwnContainer)
|
||||
{
|
||||
sb.AppendLine($$"""
|
||||
pg-{{s}}:
|
||||
image: postgres:16
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: clarity
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-clarity-dev}
|
||||
POSTGRES_DB: clarity
|
||||
expose:
|
||||
- "5432"
|
||||
ports:
|
||||
- "127.0.0.1::5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U clarity"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- clarity-net
|
||||
labels:
|
||||
clarity.managed: "true"
|
||||
clarity.subdomain: {{s}}
|
||||
clarity.component: postgres
|
||||
""");
|
||||
}
|
||||
|
||||
// ── Keycloak ──────────────────────────────────────────────────────────
|
||||
if (stack.Keycloak == ComponentMode.OwnContainer)
|
||||
{
|
||||
var kcHostname = $"kc.{s}.{Infra.Domain}";
|
||||
var dependsBlock = stack.Postgres == ComponentMode.OwnContainer
|
||||
? $"""
|
||||
depends_on:
|
||||
pg-{s}:
|
||||
condition: service_healthy
|
||||
"""
|
||||
: string.Empty;
|
||||
|
||||
sb.AppendLine($$"""
|
||||
kc-{{s}}:
|
||||
image: quay.io/keycloak/keycloak:latest
|
||||
restart: unless-stopped
|
||||
command: start-dev
|
||||
environment:
|
||||
KEYCLOAK_ADMIN: admin
|
||||
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD:-admin}
|
||||
KC_DB: postgres
|
||||
KC_DB_URL_HOST: pg-{{s}}
|
||||
KC_DB_URL_DATABASE: keycloak
|
||||
KC_DB_USERNAME: clarity
|
||||
KC_DB_PASSWORD: ${POSTGRES_PASSWORD:-clarity-dev}
|
||||
KC_HOSTNAME: {{kcHostname}}
|
||||
KC_HOSTNAME_STRICT: "false"
|
||||
KC_HTTP_ENABLED: "true"
|
||||
expose:
|
||||
- "8080"
|
||||
ports:
|
||||
- "127.0.0.1::8080"
|
||||
networks:
|
||||
- clarity-net
|
||||
extra_hosts:
|
||||
- "{{kcHostname}}:host-gateway"
|
||||
{{dependsBlock}}
|
||||
labels:
|
||||
clarity.managed: "true"
|
||||
clarity.subdomain: {{s}}
|
||||
clarity.component: keycloak
|
||||
""");
|
||||
}
|
||||
|
||||
// ── Vault ─────────────────────────────────────────────────────────────
|
||||
if (stack.Vault == ComponentMode.OwnContainer)
|
||||
{
|
||||
sb.AppendLine($$"""
|
||||
vault-{{s}}:
|
||||
image: hashicorp/vault:latest
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- IPC_LOCK
|
||||
environment:
|
||||
VAULT_DEV_ROOT_TOKEN_ID: ${VAULT_TOKEN:-vault-dev-root}
|
||||
VAULT_DEV_LISTEN_ADDRESS: "0.0.0.0:8200"
|
||||
expose:
|
||||
- "8200"
|
||||
ports:
|
||||
- "127.0.0.1::8200"
|
||||
networks:
|
||||
- clarity-net
|
||||
labels:
|
||||
clarity.managed: "true"
|
||||
clarity.subdomain: {{s}}
|
||||
clarity.component: vault
|
||||
""");
|
||||
}
|
||||
|
||||
// ── MinIO ─────────────────────────────────────────────────────────────
|
||||
if (stack.Minio == ComponentMode.OwnContainer)
|
||||
{
|
||||
sb.AppendLine($$"""
|
||||
minio-{{s}}:
|
||||
image: minio/minio:latest
|
||||
restart: unless-stopped
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minio}
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minio-dev}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
ports:
|
||||
- "127.0.0.1::9000"
|
||||
- "127.0.0.1::9001"
|
||||
networks:
|
||||
- clarity-net
|
||||
labels:
|
||||
clarity.managed: "true"
|
||||
clarity.subdomain: {{s}}
|
||||
clarity.component: minio
|
||||
""");
|
||||
}
|
||||
|
||||
sb.AppendLine("""
|
||||
networks:
|
||||
clarity-net:
|
||||
external: true
|
||||
""");
|
||||
|
||||
return sb.ToString();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Runs <c>docker compose -f {composePath} {args}</c> as a child process.
|
||||
/// Streams stdout/stderr to the logger and throws on non-zero exit.
|
||||
/// </summary>
|
||||
private async Task RunDockerComposeAsync(string composePath, string args, Guid jobId, CancellationToken ct)
|
||||
{
|
||||
var psi = new ProcessStartInfo("docker")
|
||||
{
|
||||
Arguments = $"compose -f \"{composePath}\" {args}",
|
||||
WorkingDirectory = Path.GetDirectoryName(composePath)!,
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
};
|
||||
|
||||
using var process = Process.Start(psi)
|
||||
?? throw new InvalidOperationException("Failed to start docker compose process.");
|
||||
|
||||
var stdoutTask = process.StandardOutput.ReadToEndAsync(ct);
|
||||
var stderrTask = process.StandardError.ReadToEndAsync(ct);
|
||||
|
||||
await process.WaitForExitAsync(ct);
|
||||
|
||||
var stdout = await stdoutTask;
|
||||
var stderr = await stderrTask;
|
||||
|
||||
if (!string.IsNullOrWhiteSpace(stdout))
|
||||
logger.LogInformation("[docker compose] {Output}", stdout.Trim());
|
||||
if (!string.IsNullOrWhiteSpace(stderr))
|
||||
logger.LogInformation("[docker compose stderr] {Output}", stderr.Trim());
|
||||
|
||||
if (process.ExitCode != 0)
|
||||
throw new InvalidOperationException(
|
||||
$"'docker compose {args}' exited with code {process.ExitCode}. See logs for details.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,28 @@ public class KeycloakAdminClient
|
||||
_http = new HttpClient { BaseAddress = new Uri(_baseUrl) };
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a KeycloakAdminClient for a specific base URL and credentials.
|
||||
/// Used by KeycloakStep to target SharedPlatform or OwnContainer Keycloak instances
|
||||
/// using the resolved topology rather than static DI configuration.
|
||||
/// </summary>
|
||||
public static KeycloakAdminClient ForUrl(
|
||||
string adminUrl, string adminUser, string adminPassword,
|
||||
ILogger<KeycloakAdminClient> logger)
|
||||
=> new(adminUrl, adminUser, adminPassword, logger);
|
||||
|
||||
private KeycloakAdminClient(
|
||||
string adminUrl, string adminUser, string adminPassword,
|
||||
ILogger<KeycloakAdminClient> logger)
|
||||
{
|
||||
_logger = logger;
|
||||
_baseUrl = adminUrl.TrimEnd('/');
|
||||
_adminUser = adminUser;
|
||||
_adminPassword = adminPassword;
|
||||
_logger.LogInformation("KeycloakAdminClient base URL: {Url}, user: {User}", _baseUrl, _adminUser);
|
||||
_http = new HttpClient { BaseAddress = new Uri(_baseUrl) };
|
||||
}
|
||||
|
||||
private async Task AuthorizeAsync(CancellationToken ct)
|
||||
{
|
||||
var form = new FormUrlEncodedContent(new Dictionary<string, string>
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
using ControlPlane.Core.Config;
|
||||
using ControlPlane.Core.Interfaces;
|
||||
using ControlPlane.Core.Models;
|
||||
using ControlPlane.Worker.Services;
|
||||
using Microsoft.Extensions.Options;
|
||||
|
||||
namespace ControlPlane.Worker.Steps;
|
||||
|
||||
/// <summary>
|
||||
/// First saga step — maps to "Infrastructure Provisioning" in the frontend step tracker.
|
||||
///
|
||||
/// SharedPlatform tenants:
|
||||
/// Probes Keycloak and Vault health endpoints so the saga fails fast with a clear,
|
||||
/// actionable message if infra/docker-compose.yml isn't running — rather than timing
|
||||
/// out inside KeycloakStep with a cryptic connection refused.
|
||||
///
|
||||
/// OwnContainer tenants (Dedicated / Enterprise tiers):
|
||||
/// Generates a per-tenant docker-compose.yml to ClientAssets/{subdomain}/,
|
||||
/// starts all sidecar containers (Keycloak, Vault, Postgres, MinIO as elected),
|
||||
/// then resolves the ephemeral host ports into SagaContext.ResolvedTopology so
|
||||
/// downstream steps (KeycloakStep etc.) can call sidecar admin APIs from the host.
|
||||
/// </summary>
|
||||
public class InfrastructureProvisioningStep(
|
||||
ClarityContainerService containers,
|
||||
IConfiguration config,
|
||||
IOptions<ClarityInfraOptions> infraOptions,
|
||||
ILogger<InfrastructureProvisioningStep> logger) : ISagaStep
|
||||
{
|
||||
public string StepName => "Infrastructure Provisioning";
|
||||
|
||||
public async Task ExecuteAsync(SagaContext context, CancellationToken ct)
|
||||
{
|
||||
var job = context.Job;
|
||||
|
||||
var allSharedPlatform =
|
||||
job.StackConfig.Keycloak == ComponentMode.SharedPlatform &&
|
||||
job.StackConfig.Vault == ComponentMode.SharedPlatform &&
|
||||
job.StackConfig.Postgres == ComponentMode.SharedPlatform &&
|
||||
job.StackConfig.Minio == ComponentMode.SharedPlatform;
|
||||
|
||||
if (allSharedPlatform)
|
||||
{
|
||||
logger.LogInformation("[{JobId}] SharedPlatform tier — verifying platform services are reachable.", job.Id);
|
||||
await VerifySharedPlatformAsync(context, ct);
|
||||
}
|
||||
else
|
||||
{
|
||||
logger.LogInformation("[{JobId}] OwnContainer tier — generating compose manifest and starting sidecars.", job.Id);
|
||||
var composeFile = await containers.GenerateAndRunSidecarsAsync(job, context.ResolvedTopology, ct);
|
||||
context.ComposeFilePath = composeFile;
|
||||
await containers.UpdateTopologyWithHostPortsAsync(context.ResolvedTopology, ct);
|
||||
logger.LogInformation("[{JobId}] Sidecars started. Compose file: {File}", job.Id, composeFile);
|
||||
}
|
||||
|
||||
context.Job.CompletedSteps |= CompletedSteps.InfrastructureProvisioned;
|
||||
}
|
||||
|
||||
public async Task CompensateAsync(SagaContext context, CancellationToken ct)
|
||||
{
|
||||
if (!string.IsNullOrWhiteSpace(context.ComposeFilePath))
|
||||
{
|
||||
logger.LogWarning("[{JobId}] Compensating: tearing down sidecar containers.", context.Job.Id);
|
||||
await containers.TearDownComposeProjectAsync(context.Job.Subdomain, ct);
|
||||
}
|
||||
}
|
||||
|
||||
// ── SharedPlatform health probes ─────────────────────────────────────────
|
||||
|
||||
private async Task VerifySharedPlatformAsync(SagaContext context, CancellationToken ct)
|
||||
{
|
||||
using var http = new HttpClient { Timeout = TimeSpan.FromSeconds(10) };
|
||||
|
||||
var kcUrl = context.ResolvedTopology.TryGetValue("Keycloak", out var kc)
|
||||
? kc.AdminUrl : "http://localhost:8080";
|
||||
var vaultUrl = context.ResolvedTopology.TryGetValue("Vault", out var vault)
|
||||
? vault.AdminUrl : "http://localhost:8200";
|
||||
|
||||
await ProbeAsync(http, $"{kcUrl}/health/ready", "Keycloak", ct);
|
||||
// Vault returns non-200 on sealed/standby — any HTTP response means it's running
|
||||
await ProbeAsync(http, $"{vaultUrl}/v1/sys/health", "Vault", ct, acceptAnyHttpResponse: true);
|
||||
}
|
||||
|
||||
private async Task ProbeAsync(
|
||||
HttpClient http, string url, string serviceName, CancellationToken ct,
|
||||
bool acceptAnyHttpResponse = false)
|
||||
{
|
||||
try
|
||||
{
|
||||
var resp = await http.GetAsync(url, ct);
|
||||
if (!acceptAnyHttpResponse && !resp.IsSuccessStatusCode)
|
||||
throw new InvalidOperationException(
|
||||
$"{serviceName} at {url} returned HTTP {(int)resp.StatusCode}. Is it healthy?");
|
||||
logger.LogInformation("{Service} is reachable at {Url} ({Status}).",
|
||||
serviceName, url, (int)resp.StatusCode);
|
||||
}
|
||||
catch (HttpRequestException ex)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"{serviceName} is not reachable at {url}. " +
|
||||
$"Run `docker compose up -d` from OPC/infra/ before provisioning. ({ex.Message})", ex);
|
||||
}
|
||||
catch (TaskCanceledException ex) when (!ct.IsCancellationRequested)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"{serviceName} health check at {url} timed out. Is the service running?", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"Logging": {
|
||||
"LogLevel": {
|
||||
"Default": "Information",
|
||||
"Microsoft.Hosting.Lifetime": "Information"
|
||||
}
|
||||
},
|
||||
|
||||
// ── Keycloak ──────────────────────────────────────────────────────────────────
|
||||
// Worker runs on the host machine → use localhost URLs for admin API calls.
|
||||
// These are the shared platform Keycloak credentials from infra/docker-compose.yml.
|
||||
// Aspire no longer injects these — they live here.
|
||||
"Keycloak": {
|
||||
"AuthServerUrl": "http://localhost:8080",
|
||||
"AdminUser": "admin",
|
||||
"AdminPassword": "Admin1234!",
|
||||
"Realm": "master",
|
||||
"Resource": "admin-cli"
|
||||
},
|
||||
|
||||
// ── Vault ─────────────────────────────────────────────────────────────────────
|
||||
// Worker uses localhost:8200 for admin calls.
|
||||
// Vault__KeysFile is machine-specific → still injected by Aspire AppHost.
|
||||
"Vault": {
|
||||
"Address": "http://localhost:8200",
|
||||
"ContainerAddress": "http://vault:8200"
|
||||
},
|
||||
|
||||
// ── ClarityInfraOptions (Clarity section) ─────────────────────────────────────
|
||||
// These values describe what gets injected INTO tenant containers at docker run time.
|
||||
// Containers live on clarity-net → use Docker DNS names (keycloak, vault, postgres).
|
||||
// Nginx/dnsmasq surface these at public DNS names for the browser.
|
||||
"Clarity": {
|
||||
"Domain": "clarity.test",
|
||||
"Network": "clarity-net",
|
||||
"KeycloakPublicUrl": "https://keycloak.clarity.test",
|
||||
"KeycloakInternalUrl": "http://keycloak:8080",
|
||||
"VaultInternalUrl": "http://vault:8200",
|
||||
"NginxCertPath": "/etc/nginx/certs/clarity.test.crt",
|
||||
"NginxCertKeyPath": "/etc/nginx/certs/clarity.test.key"
|
||||
},
|
||||
|
||||
// ── Docker ───────────────────────────────────────────────────────────────────
|
||||
"Docker": {
|
||||
"Socket": "npipe://./pipe/docker_engine",
|
||||
"ClarityServerImage": "clarity-server:latest"
|
||||
},
|
||||
|
||||
// ── Connection strings ────────────────────────────────────────────────────────
|
||||
// platformdb: the shared infra postgres from infra/docker-compose.yml.
|
||||
// Worker connects on localhost:5432 for tenant DB provisioning (MigrationStep).
|
||||
// Aspire-managed opcdb (port 5433) is injected separately by AppHost via .WithReference.
|
||||
"ConnectionStrings": {
|
||||
"platformdb": "Host=localhost;Port=5432;Username=postgres;Password=postgres"
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,26 @@ export function streamComposeUp(onLine: (line: string) => void, onDone: () => vo
|
||||
return src;
|
||||
}
|
||||
|
||||
/** Force-recreates all containers and removes orphans — fixes name-conflict errors. */
|
||||
export function streamComposeForceUp(onLine: (line: string) => void, onDone: () => void): EventSource {
|
||||
const src = new EventSource(`${BASE_URL}/api/infra/compose/up-force/stream`);
|
||||
src.onmessage = (e) => onLine(e.data);
|
||||
src.onerror = () => { onDone(); src.close(); };
|
||||
return src;
|
||||
}
|
||||
|
||||
/**
|
||||
* Nuke & Recreate — force-removes every known platform container by name first
|
||||
* (kills orphans that --remove-orphans won't touch), then runs compose up fresh.
|
||||
* Use this when Force Recreate still fails with "container name already in use".
|
||||
*/
|
||||
export function streamComposeNuke(onLine: (line: string) => void, onDone: () => void): EventSource {
|
||||
const src = new EventSource(`${BASE_URL}/api/infra/compose/nuke/stream`);
|
||||
src.onmessage = (e) => onLine(e.data);
|
||||
src.onerror = () => { onDone(); src.close(); };
|
||||
return src;
|
||||
}
|
||||
|
||||
export function streamComposeDown(onLine: (line: string) => void, onDone: () => void): EventSource {
|
||||
const src = new EventSource(`${BASE_URL}/api/infra/compose/down/stream`);
|
||||
src.onmessage = (e) => onLine(e.data);
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import { useEffect, useRef, useState } from 'react';
|
||||
import {
|
||||
Button, Callout, Intent, Tag, Spinner,
|
||||
HTMLTable, Card, Elevation,
|
||||
HTMLTable, Card, Elevation, Tabs, Tab, type TabId,
|
||||
FormGroup, InputGroup,
|
||||
} from '@blueprintjs/core';
|
||||
import { getImageStatus, getBuildHistory, type ImageBuildStatus, type BuildRecord } from '../api/provisioningApi';
|
||||
import {
|
||||
getInfraStatus, streamComposeUp, streamComposeForceUp, streamComposeNuke, streamComposeDown,
|
||||
type InfraService,
|
||||
} from '../api/infraApi';
|
||||
|
||||
const BASE_URL = import.meta.env.VITE_API_URL ?? '';
|
||||
|
||||
@@ -13,9 +18,13 @@ const STATUS_INTENT: Record<string, Intent> = {
|
||||
Running: Intent.PRIMARY,
|
||||
};
|
||||
|
||||
// ── Live terminal ─────────────────────────────────────────────────────────────
|
||||
// ── Shared terminal ───────────────────────────────────────────────────────────
|
||||
|
||||
function BuildTerminal({ lines }: { lines: string[] }) {
|
||||
function Terminal({ lines, height = 360, placeholder = 'Waiting for output…' }: {
|
||||
lines: string[];
|
||||
height?: number;
|
||||
placeholder?: string;
|
||||
}) {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -33,7 +42,7 @@ function BuildTerminal({ lines }: { lines: string[] }) {
|
||||
color: '#c9d1d9',
|
||||
padding: '0.75rem 1rem',
|
||||
borderRadius: 6,
|
||||
height: 420,
|
||||
height,
|
||||
overflowY: 'auto',
|
||||
whiteSpace: 'pre-wrap',
|
||||
wordBreak: 'break-all',
|
||||
@@ -41,10 +50,10 @@ function BuildTerminal({ lines }: { lines: string[] }) {
|
||||
}}
|
||||
>
|
||||
{lines.length === 0 ? (
|
||||
<span style={{ color: '#484f58' }}>Waiting for build output…</span>
|
||||
<span style={{ color: '#484f58' }}>{placeholder}</span>
|
||||
) : (
|
||||
lines.map((l, i) => {
|
||||
const isError = l.startsWith('✖');
|
||||
const isError = l.startsWith('✖') || l.toLowerCase().includes('error');
|
||||
const isSuccess = l.startsWith('✔');
|
||||
const isSep = l.startsWith('──');
|
||||
const color = isError ? '#f85149' : isSuccess ? '#3fb950' : isSep ? '#484f58' : undefined;
|
||||
@@ -89,9 +98,272 @@ function BuildHistoryTable({ records }: { records: BuildRecord[] }) {
|
||||
);
|
||||
}
|
||||
|
||||
// ── Platform tab ──────────────────────────────────────────────────────────────
|
||||
|
||||
function PlatformTab() {
|
||||
const [services, setServices] = useState<InfraService[]>([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [composeBusy, setBusy] = useState<'up' | 'force' | 'nuke' | 'down' | null>(null);
|
||||
const [lines, setLines] = useState<string[]>([]);
|
||||
const sseRef = useRef<EventSource | null>(null);
|
||||
|
||||
const refresh = () => {
|
||||
setLoading(true);
|
||||
getInfraStatus()
|
||||
.then(d => setServices(d.services))
|
||||
.catch(() => {})
|
||||
.finally(() => setLoading(false));
|
||||
};
|
||||
|
||||
useEffect(() => { refresh(); }, []);
|
||||
|
||||
function startStream(
|
||||
streamer: (onLine: (l: string) => void, onDone: () => void) => EventSource,
|
||||
label: 'up' | 'force' | 'nuke' | 'down',
|
||||
) {
|
||||
sseRef.current?.close();
|
||||
setLines([`▶ compose ${label}…`]);
|
||||
setBusy(label);
|
||||
const src = streamer(
|
||||
(line) => setLines(prev => [...prev, line]),
|
||||
() => { setBusy(null); refresh(); },
|
||||
);
|
||||
sseRef.current = src;
|
||||
}
|
||||
|
||||
const running = services.filter(s => s.status === 'running').length;
|
||||
const statusIntent = services.length === 0 ? Intent.NONE
|
||||
: running === services.length ? Intent.SUCCESS
|
||||
: running === 0 ? Intent.DANGER
|
||||
: Intent.WARNING;
|
||||
|
||||
return (
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '1rem' }}>
|
||||
<Card elevation={Elevation.ONE} style={{
|
||||
display: 'flex', alignItems: 'center', gap: '0.75rem',
|
||||
padding: '0.75rem 1rem', flexWrap: 'wrap',
|
||||
}}>
|
||||
{loading ? <Spinner size={16} /> : (
|
||||
<Tag intent={statusIntent} round large>
|
||||
{services.length === 0 ? 'Not checked' : `${running} / ${services.length} running`}
|
||||
</Tag>
|
||||
)}
|
||||
<Button small icon="refresh" minimal onClick={refresh} loading={loading}>Refresh</Button>
|
||||
<div style={{ display: 'flex', gap: '0.4rem', marginLeft: 'auto' }}>
|
||||
<Button
|
||||
small icon="play" intent={Intent.SUCCESS}
|
||||
loading={composeBusy === 'up'} disabled={composeBusy !== null}
|
||||
onClick={() => startStream(streamComposeUp, 'up')}
|
||||
>Compose Up</Button>
|
||||
<Button
|
||||
small icon="refresh" intent={Intent.WARNING}
|
||||
loading={composeBusy === 'force'} disabled={composeBusy !== null}
|
||||
onClick={() => startStream(streamComposeForceUp, 'force')}
|
||||
title="Force-recreate all containers and remove orphans. Fixes 'container name already in use' errors."
|
||||
>Force Recreate</Button>
|
||||
<Button
|
||||
small icon="flame" intent={Intent.DANGER}
|
||||
loading={composeBusy === 'nuke'} disabled={composeBusy !== null}
|
||||
onClick={() => startStream(streamComposeNuke, 'nuke')}
|
||||
title="Force-removes every platform container by name then runs compose up. Use when Force Recreate still fails with name conflicts."
|
||||
>Nuke & Recreate</Button>
|
||||
<Button
|
||||
small icon="stop" intent={Intent.DANGER}
|
||||
loading={composeBusy === 'down'} disabled={composeBusy !== null}
|
||||
onClick={() => startStream(streamComposeDown, 'down')}
|
||||
>Compose Down</Button>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
{services.length > 0 && (
|
||||
<div style={{ display: 'flex', gap: '0.4rem', flexWrap: 'wrap' }}>
|
||||
{services.map(s => (
|
||||
<Tag
|
||||
key={s.container}
|
||||
intent={s.status === 'running' ? Intent.SUCCESS : s.status === 'unhealthy' ? Intent.WARNING : Intent.DANGER}
|
||||
minimal
|
||||
title={s.uptime ? `Up ${s.uptime}` : undefined}
|
||||
>
|
||||
{s.container}
|
||||
</Tag>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Terminal lines={lines} height={300} placeholder="Run Compose Up or Force Recreate to see output…" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Verify tab ────────────────────────────────────────────────────────────────
|
||||
|
||||
function VerifyTab() {
|
||||
const [ehContainer, setEhContainer] = useState('');
|
||||
const [ehResult, setEhResult] = useState<string | null>(null);
|
||||
const [ehLoading, setEhLoading] = useState(false);
|
||||
const [ehError, setEhError] = useState<string | null>(null);
|
||||
|
||||
const [dnsContainer, setDnsContainer] = useState('');
|
||||
const [dnsUrl, setDnsUrl] = useState('https://keycloak.clarity.test/health/ready');
|
||||
const [dnsResult, setDnsResult] = useState<{ success: boolean; output: string; error: string } | null>(null);
|
||||
const [dnsLoading, setDnsLoading] = useState(false);
|
||||
|
||||
const [subdomain, setSubdomain] = useState('');
|
||||
const [artifact, setArtifact] = useState<string | null>(null);
|
||||
const [artLoading, setArtLoading] = useState(false);
|
||||
const [artError, setArtError] = useState<string | null>(null);
|
||||
|
||||
async function checkExtraHosts() {
|
||||
setEhLoading(true); setEhResult(null); setEhError(null);
|
||||
try {
|
||||
const res = await fetch(`${BASE_URL}/api/image/verify/extra-hosts/${encodeURIComponent(ehContainer)}`);
|
||||
const data = await res.json();
|
||||
if (!res.ok) { setEhError(data.error ?? 'Not found'); return; }
|
||||
setEhResult(JSON.stringify(data.extraHosts, null, 2));
|
||||
} catch (e) {
|
||||
setEhError(e instanceof Error ? e.message : 'Unknown error');
|
||||
} finally { setEhLoading(false); }
|
||||
}
|
||||
|
||||
async function runDnsTest() {
|
||||
setDnsLoading(true); setDnsResult(null);
|
||||
try {
|
||||
const res = await fetch(`${BASE_URL}/api/image/verify/dns-test`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ container: dnsContainer, url: dnsUrl }),
|
||||
});
|
||||
const data = await res.json();
|
||||
setDnsResult({ success: data.success, output: data.output ?? '', error: data.error ?? '' });
|
||||
} catch (e) {
|
||||
setDnsResult({ success: false, output: '', error: e instanceof Error ? e.message : 'Unknown error' });
|
||||
} finally { setDnsLoading(false); }
|
||||
}
|
||||
|
||||
async function viewArtifact() {
|
||||
setArtLoading(true); setArtifact(null); setArtError(null);
|
||||
try {
|
||||
const res = await fetch(`${BASE_URL}/api/image/artifact/${encodeURIComponent(subdomain)}`);
|
||||
const data = await res.json();
|
||||
if (!res.ok) { setArtError(data.error ?? 'Not found'); return; }
|
||||
setArtifact(data.content);
|
||||
} catch (e) {
|
||||
setArtError(e instanceof Error ? e.message : 'Unknown error');
|
||||
} finally { setArtLoading(false); }
|
||||
}
|
||||
|
||||
return (
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '1.25rem' }}>
|
||||
|
||||
<Card elevation={Elevation.ONE}>
|
||||
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>1 · Extra Hosts Check</h4>
|
||||
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
|
||||
Verifies <code>*.clarity.test → host-gateway</code> was injected so OIDC discovery
|
||||
traffic routes through nginx correctly.
|
||||
</p>
|
||||
<FormGroup label="Container name" labelFor="eh-container" style={{ marginBottom: '0.5rem' }}>
|
||||
<InputGroup
|
||||
id="eh-container"
|
||||
value={ehContainer}
|
||||
onChange={e => setEhContainer(e.target.value)}
|
||||
placeholder="fdev-app-clarity-01000001"
|
||||
rightElement={
|
||||
<Button small minimal loading={ehLoading} intent={Intent.PRIMARY}
|
||||
onClick={checkExtraHosts} disabled={!ehContainer}>
|
||||
Check
|
||||
</Button>
|
||||
}
|
||||
/>
|
||||
</FormGroup>
|
||||
{ehError && <Callout intent={Intent.DANGER} style={{ fontSize: '0.8rem' }}>{ehError}</Callout>}
|
||||
{ehResult && (
|
||||
<pre style={{
|
||||
marginTop: '0.5rem', background: '#0d1117', color: '#3fb950',
|
||||
padding: '0.5rem 0.75rem', borderRadius: 4, fontSize: '0.8rem',
|
||||
border: '1px solid #30363d', overflowX: 'auto',
|
||||
}}>{ehResult}</pre>
|
||||
)}
|
||||
</Card>
|
||||
|
||||
<Card elevation={Elevation.ONE}>
|
||||
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>2 · DNS Resolution Test</h4>
|
||||
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
|
||||
Runs <code>curl</code> from inside the container to verify <code>*.clarity.test</code> resolves
|
||||
through nginx — the critical path for Keycloak JWT validation.
|
||||
</p>
|
||||
<div style={{ display: 'flex', gap: '0.75rem', flexWrap: 'wrap', alignItems: 'flex-end' }}>
|
||||
<FormGroup label="Container" labelFor="dns-container" style={{ flex: '1 1 200px', marginBottom: 0 }}>
|
||||
<InputGroup
|
||||
id="dns-container"
|
||||
value={dnsContainer}
|
||||
onChange={e => setDnsContainer(e.target.value)}
|
||||
placeholder="fdev-app-clarity-01000001"
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup label="URL" labelFor="dns-url" style={{ flex: '2 1 280px', marginBottom: 0 }}>
|
||||
<InputGroup id="dns-url" value={dnsUrl} onChange={e => setDnsUrl(e.target.value)} />
|
||||
</FormGroup>
|
||||
<Button
|
||||
intent={Intent.PRIMARY} loading={dnsLoading}
|
||||
disabled={!dnsContainer || !dnsUrl}
|
||||
onClick={runDnsTest}
|
||||
style={{ marginBottom: 0 }}
|
||||
>Test DNS</Button>
|
||||
</div>
|
||||
{dnsResult && (
|
||||
<Callout
|
||||
intent={dnsResult.success ? Intent.SUCCESS : Intent.DANGER}
|
||||
style={{ marginTop: '0.75rem', fontSize: '0.8rem' }}
|
||||
>
|
||||
{dnsResult.success
|
||||
? '✔ Reachable — DNS and nginx routing is working correctly.'
|
||||
: '✖ Unreachable — check nginx/dnsmasq or extra_hosts injection.'}
|
||||
{(dnsResult.output || dnsResult.error) && (
|
||||
<pre style={{ margin: '0.5rem 0 0', fontSize: '0.75rem', whiteSpace: 'pre-wrap', overflowX: 'auto' }}>
|
||||
{dnsResult.output || dnsResult.error}
|
||||
</pre>
|
||||
)}
|
||||
</Callout>
|
||||
)}
|
||||
</Card>
|
||||
|
||||
<Card elevation={Elevation.ONE}>
|
||||
<h4 style={{ margin: '0 0 0.4rem', fontSize: '0.9rem' }}>3 · Compose Artifact</h4>
|
||||
<p style={{ margin: '0 0 0.75rem', fontSize: '0.8rem', color: '#8f99a8' }}>
|
||||
View the generated <code>docker-compose.yml</code> saved to{' '}
|
||||
<code>ClientAssets/{'{subdomain}'}/</code> after provisioning.
|
||||
</p>
|
||||
<FormGroup label="Subdomain" labelFor="art-subdomain" style={{ marginBottom: '0.5rem' }}>
|
||||
<InputGroup
|
||||
id="art-subdomain"
|
||||
value={subdomain}
|
||||
onChange={e => setSubdomain(e.target.value)}
|
||||
placeholder="acme"
|
||||
rightElement={
|
||||
<Button small minimal loading={artLoading} intent={Intent.PRIMARY}
|
||||
onClick={viewArtifact} disabled={!subdomain}>
|
||||
View
|
||||
</Button>
|
||||
}
|
||||
/>
|
||||
</FormGroup>
|
||||
{artError && <Callout intent={Intent.DANGER} style={{ fontSize: '0.8rem' }}>{artError}</Callout>}
|
||||
{artifact && (
|
||||
<pre style={{
|
||||
marginTop: '0.5rem', background: '#0d1117', color: '#c9d1d9',
|
||||
padding: '0.75rem 1rem', borderRadius: 4, fontSize: '0.75rem',
|
||||
border: '1px solid #30363d', overflowX: 'auto', maxHeight: 400, overflowY: 'auto',
|
||||
}}>{artifact}</pre>
|
||||
)}
|
||||
</Card>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Page ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
export default function ImageBuildPage() {
|
||||
const [tab, setTab] = useState<TabId>('build');
|
||||
const [status, setStatus] = useState<ImageBuildStatus | null>(null);
|
||||
const [history, setHistory] = useState<BuildRecord[]>([]);
|
||||
const [building, setBuilding] = useState(false);
|
||||
@@ -162,17 +434,11 @@ export default function ImageBuildPage() {
|
||||
<div className="page-header">
|
||||
<div>
|
||||
<h1>Image Build</h1>
|
||||
<p>Build the <code style={{ fontSize: '0.85em' }}>clarity-server</code> Docker image from the current repo.</p>
|
||||
<p>
|
||||
Build and verify the <code style={{ fontSize: '0.85em' }}>clarity-server</code> Docker image.
|
||||
Use <strong>Platform</strong> to manage infra, <strong>Verify</strong> to inspect a provisioned tenant.
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
icon="build"
|
||||
intent={Intent.WARNING}
|
||||
large
|
||||
loading={building}
|
||||
disabled={building}
|
||||
onClick={handleBuild}
|
||||
text={building ? 'Building…' : 'Build Image'}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* ── Status bar ── */}
|
||||
@@ -192,9 +458,7 @@ export default function ImageBuildPage() {
|
||||
{status.lastMessage}
|
||||
</Tag>
|
||||
)}
|
||||
{lastBuilt && (
|
||||
<span style={{ fontSize: '0.8rem', color: '#8f99a8' }}>Last built: {lastBuilt}</span>
|
||||
)}
|
||||
{lastBuilt && <span style={{ fontSize: '0.8rem', color: '#8f99a8' }}>Last built: {lastBuilt}</span>}
|
||||
</>
|
||||
) : (
|
||||
<Spinner size={16} />
|
||||
@@ -207,25 +471,55 @@ export default function ImageBuildPage() {
|
||||
</Callout>
|
||||
)}
|
||||
|
||||
{/* ── Terminal ── */}
|
||||
<div style={{ marginBottom: '1.5rem' }}>
|
||||
{/* ── Tabs ── */}
|
||||
<Tabs id="ibp-tabs" selectedTabId={tab} onChange={setTab} renderActiveTabPanelOnly>
|
||||
|
||||
<Tab
|
||||
id="build"
|
||||
title="Build"
|
||||
panel={
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '1.25rem', paddingTop: '1rem' }}>
|
||||
<div>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
|
||||
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Output</h3>
|
||||
<div style={{ display: 'flex', gap: '0.4rem' }}>
|
||||
{logs.length > 0 && !building && (
|
||||
<Button minimal small icon="trash" text="Clear" onClick={() => setLogs([])} />
|
||||
)}
|
||||
<Button
|
||||
icon="build" intent={Intent.WARNING}
|
||||
loading={building} disabled={building}
|
||||
onClick={handleBuild}
|
||||
text={building ? 'Building…' : 'Build Image'}
|
||||
/>
|
||||
</div>
|
||||
<BuildTerminal lines={logs} />
|
||||
</div>
|
||||
|
||||
{/* ── History ── */}
|
||||
<Terminal lines={logs} />
|
||||
</div>
|
||||
<div>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '0.5rem' }}>
|
||||
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>Build History</h3>
|
||||
<h3 style={{ margin: 0, fontSize: '0.9rem', color: '#8f99a8', textTransform: 'uppercase', letterSpacing: '0.05em' }}>History</h3>
|
||||
<Button minimal small icon="refresh" onClick={refreshStatus} />
|
||||
</div>
|
||||
<BuildHistoryTable records={history} />
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
|
||||
<Tab
|
||||
id="platform"
|
||||
title="Platform"
|
||||
panel={<div style={{ paddingTop: '1rem' }}><PlatformTab /></div>}
|
||||
/>
|
||||
|
||||
<Tab
|
||||
id="verify"
|
||||
title="Verify"
|
||||
panel={<div style={{ paddingTop: '1rem' }}><VerifyTab /></div>}
|
||||
/>
|
||||
|
||||
</Tabs>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user