using ControlPlane.Core.Models; using ControlPlane.Core.Services; using Docker.DotNet; using Docker.DotNet.Models; namespace ControlPlane.Api.Services; /// /// Drives `docker build` for the clarity-server image via the Docker SDK. /// Streams each build log line to the provided callback so the API endpoint /// can forward it as SSE to the control plane UI in real time. /// Persists build history via BuildHistoryService. /// public class ImageBuildService( IConfiguration config, BuildHistoryService history, ILogger logger) { private static readonly SemaphoreSlim _lock = new(1, 1); public bool IsBuilding => _lock.CurrentCount == 0; public string ImageName => config["Docker:ClarityServerImage"] ?? "clarity-server:latest"; public async Task GetStatusAsync() { var builds = await history.GetBuildsAsync(); var last = builds.FirstOrDefault(b => b.Kind == BuildKind.DockerImage); return new ImageBuildStatus( last?.Target, last?.FinishedAt, last?.Status.ToString() ?? "Never built", IsBuilding); } /// /// Runs docker build and streams each log line to . /// Returns true on success, false if the build failed or was already running. /// public async Task BuildAsync( string repoRoot, Action onLine, CancellationToken ct) { if (!await _lock.WaitAsync(TimeSpan.Zero, ct)) { onLine("⚠️ A build is already in progress."); return false; } var record = await history.CreateBuildAsync(BuildKind.DockerImage, ImageName); try { var socketUri = config["Docker:Socket"] ?? "npipe://./pipe/docker_engine"; using var docker = new DockerClientConfiguration(new Uri(socketUri)).CreateClient(); var (repo, tag) = SplitImageTag(ImageName); var dockerfilePath = "Clarity/Clarity.Server/Dockerfile"; void Log(string line) { onLine(line); record.Log.Add(line); } Log($"▶ Building {ImageName} from {repoRoot}"); Log($" Dockerfile: {dockerfilePath}"); Log("──────────────────────────────────────"); var buildParams = new ImageBuildParameters { Dockerfile = dockerfilePath, Tags = [$"{repo}:{tag}"], Remove = true, ForceRemove = true, }; bool success = true; string? errorDetail = null; await docker.Images.BuildImageFromDockerfileAsync( buildParams, await CreateTarballAsync(repoRoot, ct), authConfigs: null, headers: null, new Progress(msg => { if (!string.IsNullOrWhiteSpace(msg.Stream)) Log(msg.Stream.TrimEnd('\n')); if (msg.Error is not null) { success = false; errorDetail = msg.Error.Message; Log($"✖ {msg.Error.Message}"); } }), ct); Log("──────────────────────────────────────"); if (success) Log($"✔ {ImageName} built successfully at {DateTimeOffset.UtcNow:u}"); else Log($"✖ Build failed: {errorDetail}"); await history.CompleteBuildAsync(record, success ? BuildStatus.Succeeded : BuildStatus.Failed); logger.LogInformation("Image build {Result} for {Image}", success ? "succeeded" : "failed", ImageName); return success; } catch (Exception ex) { record.Log.Add($"Exception: {ex.Message}"); await history.CompleteBuildAsync(record, BuildStatus.Failed); onLine($"✖ Exception during build: {ex.Message}"); logger.LogError(ex, "Image build threw an exception."); return false; } finally { _lock.Release(); } } /// /// Packs the entire repo root into a tar stream for the Docker build context. /// Respects .dockerignore if present. /// private static async Task CreateTarballAsync(string repoRoot, CancellationToken ct) { // Use docker's own CLI to create the tarball via stdin — avoids reimplementing // .dockerignore parsing. Fall back to a pure managed tar if CLI isn't available. // For simplicity we use a managed approach: stream the directory as a tar. var ms = new MemoryStream(); await Task.Run(() => TarHelper.Pack(repoRoot, ms), ct); ms.Position = 0; return ms; } private static (string repo, string tag) SplitImageTag(string image) { var colon = image.LastIndexOf(':'); return colon < 0 ? (image, "latest") : (image[..colon], image[(colon + 1)..]); } } public record ImageBuildStatus( string? ImageName, DateTimeOffset? BuiltAt, string LastMessage, bool IsBuilding);