OPC # 0001: Extract OPC into standalone repo

This commit is contained in:
amadzarak
2026-04-25 17:26:42 -04:00
commit 42383bdc03
170 changed files with 21365 additions and 0 deletions
+160
View File
@@ -0,0 +1,160 @@
using Scalar.Aspire;
var builder = DistributedApplication.CreateBuilder(args);
// ─────────────────────────────────────────────────────────────────────────────
// Platform infrastructure (Keycloak, Vault, MinIO, Nginx, Dnsmasq) is
// managed by infra/docker-compose.yml — NOT Aspire.
// Run `docker compose up -d` from the infra/ folder before starting this host.
//
// Fixed dev URLs (hardcoded to match infra/docker-compose.yml):
// Keycloak → http://localhost:8080
// Vault → http://localhost:8200
// MinIO → http://localhost:9000
//
// ControlPlane owns: opc-postgres (opcdb + giteadb), RabbitMQ, Gitea.
// ─────────────────────────────────────────────────────────────────────────────
// Shared paths
var clientAssetsPath = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "ClientAssets"));
var nginxConfDPath = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "infra", "nginx", "conf.d"));
var vaultKeysFile = Path.GetFullPath(Path.Combine(builder.AppHostDirectory, "..", "infra", "vault", "data", "init.json"));
#region CONTROLPLANE POSTGRES
// ControlPlane owns this — isolated from platform infra postgres.
// Override via: dotnet user-secrets set "Parameters:cp-postgres-password" "yourpassword"
var cpPostgresPassword = builder.AddParameter("cp-postgres-password", "controlplane-dev", secret: true);
var cpPostgres = builder.AddPostgres("opc-postgres", password: cpPostgresPassword)
.WithLifetime(ContainerLifetime.Persistent)
.WithDataVolume("opc-postgres-data")
.WithHostPort(5433)
.WithPgAdmin();
var controlPlaneDb = cpPostgres.AddDatabase("opcdb");
var giteaDb = cpPostgres.AddDatabase("giteadb");
#endregion
#region GITEA
// Gitea is ControlPlane's code management component — owns its own DB on opc-postgres.
var gitea = builder.AddContainer("gitea", "gitea/gitea", "latest")
.WithHttpEndpoint(port: 3000, targetPort: 3000, name: "http")
.WithEndpoint(port: 2222, targetPort: 22, name: "ssh")
.WithVolume("clarity-gitea-data", "/data")
.WithEnvironment("GITEA__database__DB_TYPE", "postgres")
.WithEnvironment("GITEA__database__HOST", "host.docker.internal:5433")
.WithEnvironment("GITEA__database__NAME", "giteadb")
.WithEnvironment("GITEA__database__USER", "postgres")
.WithEnvironment("GITEA__database__PASSWD", "controlplane-dev")
.WithEnvironment("GITEA__server__DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__ROOT_URL", "http://opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_DOMAIN", "opc.clarity.test")
.WithEnvironment("GITEA__server__SSH_PORT", "2222")
.WithEnvironment("GITEA__service__DISABLE_REGISTRATION", "true")
.WaitFor(giteaDb)
.WithLifetime(ContainerLifetime.Persistent);
#endregion
#region RABBITMQ
var rabbitPassword = builder.AddParameter("rabbitmq-password", "clarity-rabbit", secret: true);
var rabbit = builder.AddRabbitMQ("rabbitmq", password: rabbitPassword)
.WithLifetime(ContainerLifetime.Persistent)
.WithManagementPlugin();
#endregion
#region CONTROLPLANE API
var api = builder.AddProject<Projects.ControlPlane_Api>("controlplane-api")
.WithReference(rabbit)
.WaitFor(rabbit)
.WithReference(controlPlaneDb)
.WaitFor(controlPlaneDb)
.WithEnvironment("Gitea__BaseUrl", gitea.GetEndpoint("http"))
.WithEnvironment("ClientAssets__Folder", clientAssetsPath)
.WithEnvironment("Docker__RepoRoot", builder.AppHostDirectory.Replace("ControlPlane.AppHost", "").TrimEnd('\\', '/'))
.WithExternalHttpEndpoints();
#endregion
#region PROVISIONING WORKER
builder.AddProject<Projects.ControlPlane_Worker>("controlplane-worker")
.WithReference(rabbit)
.WaitFor(rabbit)
// Vault — fixed dev address from infra/docker-compose.yml
.WithEnvironment("Vault__Address", "http://localhost:8200")
.WithEnvironment("Vault__ContainerAddress", "http://vault:8200")
.WithEnvironment("Vault__KeysFile", vaultKeysFile)
// Keycloak — fixed dev address from infra/docker-compose.yml
.WithEnvironment("Keycloak__AuthServerUrl", "http://localhost:8080")
.WithEnvironment("Keycloak__ContainerUrl", "https://keycloak.clarity.test")
.WithEnvironment("Keycloak__Realm", "master")
.WithEnvironment("Keycloak__Resource", "admin-cli")
.WithEnvironment("Keycloak__AdminUser", "admin")
.WithEnvironment("Keycloak__AdminPassword", "Admin1234!")
// Gateway
.WithEnvironment("Gateway__TenantBaseUrl", "https://{subdomain}.clarity.test")
// ClarityInfraOptions
.WithEnvironment("Clarity__Domain", "clarity.test")
.WithEnvironment("Clarity__Network", "clarity-net")
.WithEnvironment("Clarity__KeycloakPublicUrl", "https://keycloak.clarity.test")
.WithEnvironment("Clarity__KeycloakInternalUrl", "http://keycloak:8080")
.WithEnvironment("Clarity__VaultInternalUrl", "http://vault:8200")
.WithEnvironment("Clarity__NginxCertPath", "/etc/nginx/certs/clarity.test.crt")
.WithEnvironment("Clarity__NginxCertKeyPath", "/etc/nginx/certs/clarity.test.key")
// Nginx conf.d — points to infra/nginx/conf.d so platform nginx picks up tenant configs
.WithEnvironment("Nginx__ConfDPath", nginxConfDPath)
.WithEnvironment("ClientAssets__Folder", clientAssetsPath)
// Platform Postgres connection string for tenant database provisioning (infra/docker-compose.yml)
.WithEnvironment("ConnectionStrings__platformdb",
"Host=localhost;Port=5432;Username=postgres;Password=postgres")
.WithReference(controlPlaneDb)
.WaitFor(controlPlaneDb);
#endregion
#region CONTROLPLANE UI
builder.AddViteApp("controlplane-ui", "../clarity.controlplane")
.WithReference((IResourceBuilder<IResourceWithServiceDiscovery>)api)
.WaitFor(api);
#endregion
#region CLARITY-NET connect RabbitMQ to platform network
// Ensures RabbitMQ (the one container Aspire owns) is reachable from tenant containers
// on clarity-net. All other platform containers are already on clarity-net via docker-compose.
builder.Eventing.Subscribe<AfterResourcesCreatedEvent>(async (@event, ct) =>
{
const string network = "clarity-net";
await Task.Delay(TimeSpan.FromSeconds(4), ct);
var (inspectCode, _) = await DockerOutputAsync($"network inspect {network}", ct);
if (inspectCode != 0)
await DockerOutputAsync($"network create {network}", ct);
var (idCode, idOut) = await DockerOutputAsync("ps --filter name=rabbitmq --format {{.ID}}", ct);
if (idCode == 0 && !string.IsNullOrWhiteSpace(idOut))
{
var containerId = idOut.Trim().Split('\n')[0].Trim();
await DockerOutputAsync($"network connect --alias rabbitmq {network} {containerId}", ct);
}
});
#endregion
#region SCALAR API DOCS
var scalar = builder.AddScalarApiReference();
scalar.WithApiReference(api);
#endregion
builder.Build().Run();
static async Task<(int ExitCode, string Output)> DockerOutputAsync(string args, CancellationToken ct)
{
var psi = new System.Diagnostics.ProcessStartInfo("docker", args)
{
RedirectStandardOutput = true,
RedirectStandardError = true,
UseShellExecute = false
};
using var proc = System.Diagnostics.Process.Start(psi)!;
var output = await proc.StandardOutput.ReadToEndAsync(ct);
await proc.WaitForExitAsync(ct);
return (proc.ExitCode, output);
}
@@ -0,0 +1,38 @@
<Project Sdk="Aspire.AppHost.Sdk/13.2.2">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UserSecretsId>controlplane-apphost-$(MSBuildProjectName)</UserSecretsId>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\ControlPlane.Api\ControlPlane.Api.csproj" />
<ProjectReference Include="..\ControlPlane.Worker\ControlPlane.Worker.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Aspire.Hosting.JavaScript" />
<PackageReference Include="Aspire.Hosting.PostgreSQL" />
<PackageReference Include="Aspire.Hosting.Keycloak" />
<PackageReference Include="Aspire.Hosting.RabbitMQ" />
<PackageReference Include="CommunityToolkit.Aspire.Hosting.Minio" />
<PackageReference Include="Scalar.Aspire" />
</ItemGroup>
<ItemGroup>
<None Update="KeycloakConfig\realm-export.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
<None Update="VaultConfig\vault.hcl">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
<None Update="VaultConfig\entrypoint.sh">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>
@@ -0,0 +1,17 @@
# Resolve all *.clarity.local subdomains to the loopback address.
# nginx (bound to port 80 on the host) then routes by subdomain to the correct tenant container.
address=/.clarity.test/127.0.0.1
# Don't read /etc/resolv.conf or /etc/hosts from the container — we are the resolver
no-resolv
no-hosts
# Forward everything that isn't clarity.local to Cloudflare DNS
server=1.1.1.1
server=8.8.8.8
# Listen on all interfaces inside the container
listen-address=0.0.0.0
# Log queries — useful during initial setup, can be removed later
log-queries
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDGzCCAgOgAwIBAgIUS0kgcdXIrlOk/K6g2bfLDRycqk8wDQYJKoZIhvcNAQEL
BQAwGTEXMBUGA1UEAwwOKi5jbGFyaXR5LnRlc3QwHhcNMjYwNDI0MjIwMDUzWhcN
MjgwNzI3MjIwMDUzWjAZMRcwFQYDVQQDDA4qLmNsYXJpdHkudGVzdDCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMWAJ62tsrnMaMnF3NR2Yfv1LKS9IRfm
sTtTWba7D8fcs9JXGlEn+vMa10AjV91yaSQoQdwLCOwkF58CmLBs0K+vvPoLgvcZ
BQxVrBj0t1YlTwLcez8vEgb2tHKGo914T/YLh+clF8oig9tIIiTNbngUGabpWUym
vPllDQ8nB0m4IkHbMAhgdDUG9X5Vc/lWHW6gxhRiUQt7HLqWJ2lLleQR5qEqRQx+
RmtseS11jhzwDYf1VVzQ2AE2tUaq82p0cZAF8uFZnESuv1Hcu+1KBfjCaGXJ/485
gg1q01sYhAkX0LAK/CqRBOd7zp9cDm3NX0tLBj4Gek6h0kFGkmRtAmcCAwEAAaNb
MFkwHQYDVR0OBBYEFJNI82Atz7k2pa2IZECO9aG30dnHMA8GA1UdEwEB/wQFMAMB
Af8wJwYDVR0RBCAwHoIOKi5jbGFyaXR5LnRlc3SCDGNsYXJpdHkudGVzdDANBgkq
hkiG9w0BAQsFAAOCAQEAO5MyjFXcOZeEwPJRel8Mvg1HRwu97tL/BB9Hb13JWzdx
FBBqwOdRrG8IB7byXLjH1ng4xMM+WI9yeZ29bV/PcrZwermGNzU+ob1SrvJYh0hb
sX0zeXKjKDGMsdlyZAERnvGOxlPzNtYRpeSD7h3qKtuzJiReCNdGzSh+2bLfxEIb
wTJJNgnXRA4GGK5zghmzOEpq/w8sqpB4hLz9OK8a33QOKp79LrfyT1B9uZq4uHZ8
SvTX89KZOGmUQraF/6QvL3CcMutwzf4unKxyaStflrcGjCn/eEe8Ea3IWL1EwU8K
9JvyDvWgv7oib7FA2BZGbYvT+wsFjiFBzTcWUX132g==
-----END CERTIFICATE-----
@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFgCetrbK5zGjJ
xdzUdmH79SykvSEX5rE7U1m2uw/H3LPSVxpRJ/rzGtdAI1fdcmkkKEHcCwjsJBef
ApiwbNCvr7z6C4L3GQUMVawY9LdWJU8C3Hs/LxIG9rRyhqPdeE/2C4fnJRfKIoPb
SCIkzW54FBmm6VlMprz5ZQ0PJwdJuCJB2zAIYHQ1BvV+VXP5Vh1uoMYUYlELexy6
lidpS5XkEeahKkUMfkZrbHktdY4c8A2H9VVc0NgBNrVGqvNqdHGQBfLhWZxErr9R
3LvtSgX4wmhlyf+POYINatNbGIQJF9CwCvwqkQTne86fXA5tzV9LSwY+BnpOodJB
RpJkbQJnAgMBAAECggEAGc9MICXNb/t3DDtHxxorZuZc7bBrpTh4G9UiKb+badZ9
R3UrksSDRobQ72hPALkFZXy/Upa8lUOINLb9pjyqLvNr4k9jz4/c+YYupdpBJUhd
4XVXw+OOWwudfEP9ISGqbXCHU50k1T0adysfjyirkZSq34WqLlqx4nOit8K1cJwc
5+jvApwOPz6zf9kFJYjybbUSPO8bFLVTpjs3hgUzaCMkYMn6R/5bR5SMeqCbZILB
fkGm+KaeS3cIY7PhDhSoiWJUR5/ZsaoT5s1IM5aGTe62XVY5eoMixYEibx/e68XC
eL3eWO304QU6AgMKHFhtTKFpnJHlyV/gu084/xWC7QKBgQD9lrkRgDDMXfuDtFRr
LiQ3QFEmmj0m2ekHIpdZDY3rJ0bbQzTw4cqWs437qMKcTczK70mfxp/IjPoky+8i
bSlm/pR+U/YwsgK0dxGLzHbIQYYQdI4BjBsysNOvxnKUxRciAMpIW5ULGKYUkCde
dhH5c2Rmve0yq6MYJ8DCOTXCwwKBgQDHYOd50Tjw5i+a5wcHEsfY+r/Vsu1u1BrS
/sdpJ+dKxx50TQO4F7tnrugwJ9cvxPDGQApDHFbIwn70zQuDNvYLD2CTtwHoJHx/
wuP3p0Rw3DmhKI9CN0oXclqNV3PZ54PZ2M5HEl0zkpoIse4YtWc0uyO6RKVHHtPr
jGjTKeZ/jQKBgAc7XinGmx2o7HxUDzhDR5sfxXCxY18RRdkDPoe2oD59j0K/hun7
tnhXxIvRw0ML4PREoLfixTnF83hLLJWxwUWDqx5zLIk0+mjFIIX5HcYWQEmF2Wrn
4PqwGklgAnKFsGQy25H2sqhvWoUpm0XRXi/b/5gCgJo6VNtiftfLI+JbAoGAC496
3H1dJ9qw9/JdXfOg0tv3M5TkX4C87W8IcPh3WMai5Wtxw8Lcgu6JWAF3YLWyoEwm
TC3gelOMuPUKrdkJ+yoxF1+NJMC410+dmEaCmWirjsSjSdua2DExPvDLLt9VrdP8
YfKWpN7jP43RmG0sRspzD+HbE3yeHRJPIa9URiECgYEAyxOOXDCQSPifgIRZe5hr
u+WsMukUypizXq36/ydCfMD7HcPOgO6bNkNsh6WlaaNrFQwR2O96V0BvrSAI242a
bTEyUx7fTwoZmn/8O6/WIwkyYolixNYbClcAIopbOXxJ9bJ1KqS47mHv1RrQ8FqN
OpJWMvrAktqNT5tjDeIj6mc=
-----END PRIVATE KEY-----
@@ -0,0 +1,2 @@
# Placeholder so the conf.d directory is tracked by git and exists at container mount time.
# The provisioning worker writes per-tenant .conf files here at runtime.
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-01000000
server {
listen 443 ssl;
server_name fdev-app-clarity-01000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-01000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-02000000
server {
listen 443 ssl;
server_name fdev-app-clarity-02000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-02000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-03000000
server {
listen 443 ssl;
server_name fdev-app-clarity-03000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-03000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,19 @@
# Auto-generated by ControlPlane.Worker — do not edit manually.
# Tenant: fdev-app-clarity-04000000
server {
listen 443 ssl;
server_name fdev-app-clarity-04000000.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
# Docker DNS resolves the container name on the managed network
set $upstream http://fdev-app-clarity-04000000:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,21 @@
server {
listen 443 ssl;
server_name opc.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
# Git over HTTP needs larger body and longer timeouts
client_max_body_size 100m;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
location / {
set $upstream http://gitea:3000;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,16 @@
server {
listen 443 ssl;
server_name keycloak.clarity.test;
ssl_certificate /etc/nginx/certs/clarity.test.crt;
ssl_certificate_key /etc/nginx/certs/clarity.test.key;
location / {
set $upstream http://keycloak:8080;
proxy_pass $upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
@@ -0,0 +1,27 @@
events {
worker_connections 1024;
}
http {
# Use Docker's embedded DNS resolver so container names resolve dynamically.
# This is critical — without it nginx resolves upstream names at startup only
# and won't pick up newly provisioned tenant containers.
resolver 127.0.0.11 valid=5s ipv6=off;
# Shared log format
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
# Redirect all HTTP → HTTPS
server {
listen 80 default_server;
return 301 https://$host$request_uri;
}
# Pick up per-tenant server blocks dropped by the provisioning worker
include /etc/nginx/conf.d/*.conf;
}
@@ -0,0 +1,31 @@
{
"$schema": "https://json.schemastore.org/launchsettings.json",
"profiles": {
"https": {
"commandName": "Project",
"dotnetRunMessages": true,
"launchBrowser": true,
"applicationUrl": "https://controlplane.dev.localhost:17000;http://controlplane.dev.localhost:15000",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development",
"DOTNET_ENVIRONMENT": "Development",
"ASPIRE_DASHBOARD_OTLP_ENDPOINT_URL": "https://localhost:21000",
"ASPIRE_DASHBOARD_MCP_ENDPOINT_URL": "https://localhost:21001",
"ASPIRE_RESOURCE_SERVICE_ENDPOINT_URL": "https://localhost:21002"
}
},
"http": {
"commandName": "Project",
"dotnetRunMessages": true,
"launchBrowser": true,
"applicationUrl": "http://controlplane.dev.localhost:15000",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Development",
"DOTNET_ENVIRONMENT": "Development",
"ASPIRE_DASHBOARD_OTLP_ENDPOINT_URL": "http://localhost:21000",
"ASPIRE_DASHBOARD_MCP_ENDPOINT_URL": "http://localhost:21001",
"ASPIRE_RESOURCE_SERVICE_ENDPOINT_URL": "http://localhost:21002"
}
}
}
}
@@ -0,0 +1,48 @@
#!/bin/sh
set -e
KEYS_FILE="/vault/file/init.json"
VAULT_ADDR="http://127.0.0.1:8200"
export VAULT_ADDR
# Start Vault server in the background
vault server -config=/vault/config/vault.hcl &
VAULT_PID=$!
# Wait for Vault to be ready
echo "[vault-init] Waiting for Vault to start..."
until vault status > /dev/null 2>&1 || vault status 2>&1 | grep -q "Sealed\|Initialized"; do
sleep 1
done
echo "[vault-init] Vault is up."
# Check if already initialised
INIT_STATUS=$(vault status -format=json 2>/dev/null | grep '"initialized"' | grep -c "true" || true)
if [ "$INIT_STATUS" = "0" ]; then
echo "[vault-init] First run — initialising Vault..."
vault operator init -key-shares=1 -key-threshold=1 -format=json > "$KEYS_FILE"
echo "[vault-init] Keys saved to $KEYS_FILE"
fi
# Unseal using saved key
UNSEAL_KEY=$(grep '"unseal_keys_b64"' "$KEYS_FILE" -A1 | grep '"' | tail -1 | tr -d ' ",' )
ROOT_TOKEN=$(grep '"root_token"' "$KEYS_FILE" | sed 's/.*: *"\(.*\)".*/\1/')
echo "[vault-init] Unsealing..."
vault operator unseal "$UNSEAL_KEY"
echo "[vault-init] Vault is unsealed. Root token is stored in $KEYS_FILE"
# Authenticate and bootstrap Transit engine + master key (idempotent)
export VAULT_TOKEN="$ROOT_TOKEN"
echo "[vault-init] Enabling Transit secrets engine..."
vault secrets enable -path=clarity-transit transit 2>/dev/null || echo "[vault-init] clarity-transit already enabled."
echo "[vault-init] Creating master-key..."
vault write -f clarity-transit/keys/master-key 2>/dev/null || echo "[vault-init] master-key already exists."
echo "[vault-init] Vault bootstrap complete."
# Keep container alive by waiting on the Vault process
wait $VAULT_PID
@@ -0,0 +1,13 @@
storage "file" {
path = "/vault/file"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = true
}
ui = true
disable_mlock = true
# Auto-unseal using a static shamir key — dev convenience only, never use in prod
@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDDTCCAfWgAwIBAgIURU3028kH3veUBjTtDis5N5SYI9AwDQYJKoZIhvcNAQEL
BQAwGTEXMBUGA1UEAwwOKi5sb2NhbHRlc3QubWUwHhcNMjYwNDI0MTYwNzU3WhcN
MzYwNDIxMTYwNzU3WjAZMRcwFQYDVQQDDA4qLmxvY2FsdGVzdC5tZTCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBALiZjuDCZ7uBicnk1ko6nlJIf/Zn2thr
ArBA9FD1wtMm0tWMA66fQ+STlkTw2LOlsjIk9d4A3s7jGhVyAikLqylm8in3WVWT
X4Ms5FB7lXqGEsuMI6Fq8l+Xw5boWE15XRGoOEPqaazfIvy4utF9Dk1TLXAv+Svv
dTTek7phU3hzWxzOTdk9fVhHdYqJy0ZjaxJxyUbTDPRf+IHad/0iWWpZaRuP5QEz
J0zujXEvJdFUVXOcPqSs0SdkaKqYbxegHwUK5ALQSVzH7CYHR4+Np6ChUw8+RFid
b9dQH2pzm9h7iaKD58AWLLB/D2uHBnSPkOahWY8oizlNRxsSuY7/x4cCAwEAAaNN
MEswJwYDVR0RBCAwHoIOKi5sb2NhbHRlc3QubWWCDGxvY2FsdGVzdC5tZTALBgNV
HQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggEB
ALZ/RP2JFDz4QODzy+ESg5DlgQQ3CTyDn9DwR8Pojzpq+MdJQ3+g48qsCS2FwR8W
h18DCfeemrutGHGBcX6dNbjy43oFwbvdDEaK1/m82Rmr4F/u3AdpxJpXXGEBoO9O
rg2+nXQEGFwZapUnAVGUB3Iihx5FRw1Rbi910aF6TN67Og6pUf/8Jut/M5TzAiDN
scil2PpC2mWvHzGV+gBZT0lOpfo+dRlE+zzEBWt4WpZWj3bF+WbwzR2bsd2JGZsp
OtV4ErupppsGYliKi2cJG9ceqG0zEc/hUtG2SfmZvfKOxZ2p0M6SXJDHueoAOkh1
zu/AQ0cjPBLoOy6ahVHvg20=
-----END CERTIFICATE-----
@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC4mY7gwme7gYnJ
5NZKOp5SSH/2Z9rYawKwQPRQ9cLTJtLVjAOun0Pkk5ZE8NizpbIyJPXeAN7O4xoV
cgIpC6spZvIp91lVk1+DLORQe5V6hhLLjCOhavJfl8OW6FhNeV0RqDhD6mms3yL8
uLrRfQ5NUy1wL/kr73U03pO6YVN4c1sczk3ZPX1YR3WKictGY2sScclG0wz0X/iB
2nf9IllqWWkbj+UBMydM7o1xLyXRVFVznD6krNEnZGiqmG8XoB8FCuQC0Elcx+wm
B0ePjaegoVMPPkRYnW/XUB9qc5vYe4mig+fAFiywfw9rhwZ0j5DmoVmPKIs5TUcb
ErmO/8eHAgMBAAECggEAAnNe5AnCZXYCbBpQhv9XcG6BZgJRksJZd4D7Fm62G3XB
T1pCs9IvwRujj8gsN6kIn1NI2xNOZWNZ7QpITovP6HOSRYbsElL34BXzQPiZT5gc
ePtiR+0VQkt8vxf6lHNRWmDAPREQ3UxDs7zKhEqBCLzslXYkSH0892Ibf6nImF8w
7meMsH4SxPFY16WBxWjyJNdy+TVw0BYFdPiUxE52PaIplgVZJqvmmuMUYcmOVale
lXGeWGMdvFp3Tilbj2rpnJ5p7I5av59TmIzXon/bGguhYhwus+1e8rs3WYWqibHf
bwB03kuGFaiSvuVncX3DvdBnvrz9tlCaipU+aciGUQKBgQDaQ378oDbmX1gk96/7
3ZiU67Vqnone4X88SxiYOafwmT5NVnJYMjtbN775NCUK4aR7lYo2lodl9CW096UN
Xic186jFGey3NoqCLoVodeFe/XscZMSS+TE5FLi4B4Ih/bgpcDzDQ8++5oRYiwWk
Z1/GKOc8MxXhhZDf9wOhgWBfVwKBgQDYhBPbeJJaE3k7pREBF7abDERFbfruC4Xh
181kCIZ3oMKGj4YKtIjoLnCocOAo/uhM9DnY/cBvR+CykWpH0nBfcDE9lknvpxUn
fTitwytfjKWwE3/Z9BRK/ieBaYXwEn38KgYZJJNseZLlYTgDfAKKt4tppAQ3Tdww
9DFo47IrUQKBgQCPSWBEWKmx80XafwB5SLCyk0s2A35fY4oz+tjaln855GCSRP4s
CE4PRDmLQEBRNHDW8QUbcRbSR8W5WBpy/CyhrqRNQQe1/4hOjlvmh/y8b4wyx7SF
CDLYVlIt/j/gMMCF87jwN8RaftrDhgDePT8SyCeFzcO/mf/SCEfJ7zVlYQKBgH5A
be/RG83ogw3Tj9nKQRGiEoFFw0dhcr0hgEOvcPF6zVN3h1rgsOBqjAi8YQmmskCF
POIZ/Ucma5DUmFuvCxWrrxrRcuWK0RwIua8hGj6KHedRR4EJAXhFQTYGGTLHJa2P
t6SbnldngM++Y9IsUrMeme2M1WSGQzpMei9GbpMxAoGBAIgFav2bGZCScvp+Y917
j5rKMLv8AN6nC3BQoraxMKl0YpCS8F58YHAfxKlmgR1Ll16reJLv5qAzSM7jViII
7vmiPGrpRnz1mUHVhVBfNF1UKIRmmJKtARlrbrVibGtLubtzBZOLh0bfzmnYH9Z8
ncBozZmPeJAtzGfvw+7BNoM9
-----END PRIVATE KEY-----
+16
View File
@@ -0,0 +1,16 @@
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
CN = *.localtest.me
[v3_req]
subjectAltName = @alt_names
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[alt_names]
DNS.1 = *.localtest.me
DNS.2 = localtest.me