Better launchpad

This commit is contained in:
2025-12-13 16:16:15 +08:00
parent 0e0ff24195
commit 0d2f78b4e7
14 changed files with 989 additions and 283 deletions

36
.env.example Normal file
View File

@@ -0,0 +1,36 @@
# This is an example .env file for the Turbine project.
# Copy this to .env and fill in the values.
# --- Third-Party Image Names ---
CACHE_IMAGE=valkey/valkey:latest
QUEUE_IMAGE=nats:latest
# --- Service Ports ---
# Ports for .NET services
RING_PORT=5002
PASS_PORT=5003
DRIVE_PORT=5004
SPHERE_PORT=5005
DEVELOP_PORT=5006
INSIGHT_PORT=5007
ZONE_PORT=5008
# Alternate ports for gRPC/HTTPS, to avoid collisions in local development
RING_ALTPORT=5012
PASS_ALTPORT=5013
DRIVE_ALTPORT=5014
SPHERE_ALTPORT=5015
DEVELOP_ALTPORT=5016
INSIGHT_ALTPORT=5017
ZONE_ALTPORT=5018
# --- .NET Service Image Names ---
# These should point to the images you have built or pulled.
GATEWAY_IMAGE=turbine/gateway:latest
RING_IMAGE=turbine/ring:latest
PASS_IMAGE=turbine/pass:latest
DRIVE_IMAGE=turbine/drive:latest
SPHERE_IMAGE=turbine/sphere:latest
DEVELOP_IMAGE=turbine/develop:latest
INSIGHT_IMAGE=turbine/insight:latest
ZONE_IMAGE=turbine/zone:latest

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env

View File

@@ -6,34 +6,54 @@ A modular service framework.
The `launchpad` is a CLI tool located in `pkg/launchpad` designed to simplify development and production workflows for the entire Turbine project. It acts as a process manager that can run all defined services concurrently for development and generate a `docker-compose.yml` file for production deployments. The `launchpad` is a CLI tool located in `pkg/launchpad` designed to simplify development and production workflows for the entire Turbine project. It acts as a process manager that can run all defined services concurrently for development and generate a `docker-compose.yml` file for production deployments.
### Configuration ### Configuration (`launchpad.toml`)
The launchpad is configured via a `launchpad.toml` file in the project root. This file defines all the services, including how to run them in development and how to build them for production. The launchpad is configured via a `launchpad.toml` file in the project root. This file defines all the services, their configurations, and the networks they use.
**`launchpad.toml` example:** **`launchpad.toml` example:**
```toml ```toml
# Defines variables required by the configuration.
# These should be supplied in a .env file.
[variables]
required = ["CACHE_PASSWORD", "QUEUE_PASSWORD", "RING_IMAGE", "RING_PORT"]
# Defines docker networks.
[networks]
aspire = {}
# Service definitions
[[services]] [[services]]
name = "gateway" name = "cache"
type = "go" type = "docker" # For third-party docker images
path = "./pkg/gateway"
[services.dev]
command = "go run ./main.go"
[services.prod] [services.prod]
dockerfile = "./pkg/gateway/Dockerfile" image = "docker.io/library/redis:7.4"
image = "turbine/gateway:latest" command = ["/bin/sh", "-c", "redis-server --requirepass $$REDIS_PASSWORD"]
ports = ["8080:8080"] environment = ["REDIS_PASSWORD=${CACHE_PASSWORD}"]
expose = ["6379"]
networks = ["aspire"]
[[services]] [[services]]
name = "orders-api" name = "ring"
type = "dotnet" type = "dotnet"
path = "../turbine-dotnet-services/orders-api" path = "../turbine-dotnet-services/ring"
[services.dev] [services.dev]
command = "dotnet watch run" command = "dotnet watch run"
[services.prod] [services.prod]
dockerfile = "../turbine-dotnet-services/orders-api/Dockerfile" image = "${RING_IMAGE}"
image = "turbine/orders-api:latest" environment = [
"HTTP_PORTS=${RING_PORT}",
"ConnectionStrings__cache=cache:6379,password=${CACHE_PASSWORD}",
]
volumes = ["./keys:/app/keys", "./settings/ring.json:/app/appsettings.json"]
expose = ["${RING_PORT}", "5001"]
networks = ["aspire"]
depends_on = ["cache", "queue"]
``` ```
### Environment Variables (`.env`)
For the `deploy` command to work, you must create a `.env` file in the project root containing the variables defined in `launchpad.toml`. An example is provided in `.env.example`.
### Commands ### Commands
To use the launchpad, run its `main.go` file with one of the following commands: To use the launchpad, run its `main.go` file with one of the following commands:
@@ -45,13 +65,14 @@ Starts all services defined in `launchpad.toml` in development mode. Each servic
go run ./pkg/launchpad/main.go dev go run ./pkg/launchpad/main.go dev
``` ```
#### Production Docker Compose Generation (`prod-gen`) #### Production Deployment (`deploy`)
Generates a `docker-compose.yml` file in the project root based on the `prod` configuration of all services in `launchpad.toml`. This file can be used to build and run all services as Docker containers. Generates a `docker-compose.yml` file in the project root based on the `prod` configuration of all services in `launchpad.toml`. It substitutes variables from your `.env` file. This file can be used to build and run all services as Docker containers.
```bash ```bash
go run ./pkg/launchpad/main.go prod-gen go run ./pkg/launchpad/main.go deploy
``` ```
## Registrar ## Registrar
The Registrar is the service discovery system of the DysonNetwork. The Registrar is the service discovery system of the DysonNetwork.

194
docker-compose.yml Normal file
View File

@@ -0,0 +1,194 @@
version: "3.8"
services:
cache:
image: docker.io/library/redis:7.4
command:
- /bin/sh
- -c
- redis-server --requirepass $$REDIS_PASSWORD
expose:
- "6379"
environment:
REDIS_PASSWORD: your_strong_redis_password
networks:
- solar-network
develop:
image: turbine/develop:latest
expose:
- ${DEVELOP_PORT}
- ${DEVELOP_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5006"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: develop
volumes:
- ./settings/develop.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
- ring
- sphere
- drive
networks:
- solar-network
drive:
image: turbine/drive:latest
expose:
- ${DRIVE_PORT}
- ${DRIVE_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5004"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: drive
volumes:
- ./settings/drive.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
- ring
networks:
- solar-network
gateway:
image: turbine/gateway:latest
ports:
- 5001:8080
expose:
- "8080"
environment:
ASPNETCORE_ENVIRONMENT: Production
OTEL_SERVICE_NAME: gateway
volumes:
- ./keys:/app/keys
- ./settings/gateway.json:/app/appsettings.json
depends_on:
- ring
- pass
- drive
- sphere
- develop
- insight
- zone
networks:
- solar-network
insight:
image: turbine/insight:latest
expose:
- ${INSIGHT_PORT}
- ${INSIGHT_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5007"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: insight
volumes:
- ./settings/insight.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
- ring
- sphere
- drive
networks:
- solar-network
pass:
image: turbine/pass:latest
expose:
- ${PASS_PORT}
- ${PASS_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5003"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: pass
volumes:
- ./keys:/app/keys
- ./settings/pass.json:/app/appsettings.json
depends_on:
- cache
- queue
- ring
- develop
- drive
networks:
- solar-network
queue:
image: docker.io/library/nats:2.11
command:
- --user
- nats
- --pass
- ${QUEUE_PASSWORD}
- -js
expose:
- "4222"
networks:
- solar-network
ring:
image: turbine/ring:latest
expose:
- ${RING_PORT}
- ${RING_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5002"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: ring
volumes:
- ./keys:/app/keys
- ./settings/ring.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
networks:
- solar-network
sphere:
image: turbine/sphere:latest
expose:
- ${SPHERE_PORT}
- ${SPHERE_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5005"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: sphere
volumes:
- ./keys:/app/keys
- ./settings/sphere.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
- ring
- drive
networks:
- solar-network
zone:
image: turbine/zone:latest
ports:
- 8192:${ZONE_PORT}
expose:
- ${ZONE_PORT}
- ${ZONE_ALTPORT}
environment:
ASPNETCORE_FORWARDEDHEADERS_ENABLED: "true"
HTTP_PORTS: "5008"
HTTPS_PORTS: "5001"
OTEL_SERVICE_NAME: zone
volumes:
- ./sites:/app/sites
- ./settings/zone.json:/app/appsettings.json
depends_on:
- cache
- queue
- pass
- ring
- sphere
- drive
networks:
- solar-network

2
go.mod
View File

@@ -4,9 +4,11 @@ go 1.25.5
require ( require (
github.com/gofiber/fiber/v3 v3.0.0-rc.3 github.com/gofiber/fiber/v3 v3.0.0-rc.3
github.com/joho/godotenv v1.5.1
github.com/rs/zerolog v1.34.0 github.com/rs/zerolog v1.34.0
github.com/spf13/viper v1.21.0 github.com/spf13/viper v1.21.0
go.etcd.io/etcd/client/v3 v3.6.6 go.etcd.io/etcd/client/v3 v3.6.6
gopkg.in/yaml.v3 v3.0.1
) )
require ( require (

2
go.sum
View File

@@ -35,6 +35,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=

View File

@@ -1,61 +1,280 @@
# launchpad.toml # This file configures the Launchpad tool for the Turbine project.
# It defines all services, their development commands, and their production deployment configuration.
# An array of services that the launchpad can manage. [variables]
required = [
"GATEWAY_IMAGE",
"CACHE_IMAGE",
"QUEUE_IMAGE",
"RING_IMAGE",
"PASS_IMAGE",
"DRIVE_IMAGE",
"SPHERE_IMAGE",
"DEVELOP_IMAGE",
"INSIGHT_IMAGE",
"ZONE_IMAGE",
"RING_PORT",
"PASS_PORT",
"DRIVE_PORT",
"SPHERE_PORT",
"DEVELOP_PORT",
"INSIGHT_PORT",
"ZONE_PORT",
"RING_ALTPORT",
"PASS_ALTPORT",
"DRIVE_ALTPORT",
"SPHERE_ALTPORT",
"DEVELOP_ALTPORT",
"INSIGHT_ALTPORT",
"ZONE_ALTPORT",
]
[networks]
solar-network = {}
# -------------------------------------------------
# Service Definitions
# -------------------------------------------------
# --- Go Services ---
[[services]] [[services]]
name = "gateway" name = "gateway"
type = "go" type = "go"
# Path to the service's source code, relative to the project root.
path = "./pkg/gateway" path = "./pkg/gateway"
# --- Development Configuration ---
# Used by the 'launchpad dev' command.
[services.dev] [services.dev]
# Command to run the service from source.
command = "go run ./main.go" command = "go run ./main.go"
[services.dev.healthcheck]
# --- Production Configuration --- tcp_ports = [8080]
# Used by the 'launchpad prod-gen' command to generate docker-compose.yml.
[services.prod] [services.prod]
dockerfile = "./pkg/gateway/Dockerfile" dockerfile = "./pkg/gateway/Dockerfile"
image = "turbine/gateway:latest" image = "turbine/gateway:latest"
ports = ["8080:8080"] ports = ["8080:8080"]
depends_on = ["etcd"] networks = ["solar-network"]
[services.prod.depends_on]
etcd = { condition = "service_healthy" }
config = { condition = "service_started" }
[[services]] [[services]]
name = "config" name = "config"
type = "go" type = "go"
path = "./pkg/config" path = "./pkg/config"
[services.dev] [services.dev]
command = "go run ./main.go" command = "go run ./main.go"
[services.dev.healthcheck]
tcp_ports = [8081]
[services.prod] [services.prod]
dockerfile = "./pkg/config/Dockerfile" dockerfile = "./pkg/config/Dockerfile"
image = "turbine/config:latest" image = "turbine/config:latest"
depends_on = ["etcd"] networks = ["solar-network"]
[services.prod.depends_on]
etcd = { condition = "service_healthy" }
# --- Third-Party Services ---
[[services]]
name = "cache"
type = "docker"
[services.dev.healthcheck]
tcp_ports = [6379]
[services.prod]
image = "valkey/valkey" # Valkey
expose = ["6379"]
networks = ["solar-network"]
[services.prod.healthcheck]
test = ["CMD", "valkey-cli", "ping"]
interval = "10s"
timeout = "5s"
retries = 5
[[services]] [[services]]
name = "orders-api" name = "queue"
type = "dotnet" type = "docker"
# IMPORTANT: This path should be updated to point to the actual location of the .NET project. [services.dev.healthcheck]
path = "../turbine-dotnet-services/orders-api" tcp_ports = [4222]
[services.dev]
# Example of running from source:
command = "dotnet watch run"
# Example of running a pre-built development image (uncomment to use):
# image = "my-dev-registry/orders-api:dev-latest"
[services.prod] [services.prod]
# Path to the Dockerfile for the .NET service. image = "nats" # NATS
dockerfile = "../turbine-dotnet-services/orders-api/Dockerfile" command = ["-js"]
image = "turbine/orders-api:latest" expose = ["4222"]
depends_on = ["etcd", "config"] networks = ["solar-network"]
# Environment variables to be set in docker-compose. [services.prod.healthcheck]
test = ["CMD", "nats", "healthz"]
interval = "10s"
timeout = "5s"
retries = 5
[[services]]
name = "etcd"
type = "docker"
[services.dev.healthcheck]
tcp_ports = [2379]
[services.prod]
image = "bitnami/etcd"
environment = [ environment = [
"ASPNETCORE_ENVIRONMENT=Production", "ALLOW_NONE_AUTHENTICATION=yes",
# The URL for the config service, accessible via the docker network. "ETCD_ADVERTISE_CLIENT_URLS=http://etcd:2379",
"CONFIG_URL=http://config"
] ]
ports = ["2379:2379"]
networks = ["solar-network"]
[services.prod.healthcheck]
test = ["CMD", "etcdctl", "endpoint", "health"]
interval = "10s"
timeout = "5s"
retries = 5
# --- .NET Services (omitting the rest for brevity in this example) ---
[[services]]
name = "ring"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Ring"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${RING_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${RING_PORT}",
"HTTPS_PORTS=${RING_ALTPORT}",
"OTEL_SERVICE_NAME=ring",
]
volumes = ["./keys:/app/keys", "./settings/ring.json:/app/appsettings.json"]
expose = ["${RING_PORT}", "${RING_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "pass"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Pass"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${PASS_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${PASS_PORT}",
"HTTPS_PORTS=${PASS_ALTPORT}",
"OTEL_SERVICE_NAME=pass",
]
volumes = ["./keys:/app/keys", "./settings/pass.json:/app/appsettings.json"]
expose = ["${PASS_PORT}", "${PASS_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "sphere"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Sphere"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${SPHERE_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${SPHERE_PORT}",
"HTTPS_PORTS=${SPHERE_ALTPORT}",
"OTEL_SERVICE_NAME=sphere",
]
volumes = ["./keys:/app/keys", "./settings/sphere.json:/app/appsettings.json"]
expose = ["${SPHERE_PORT}", "${SPHERE_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "drive"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Drive"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${DRIVE_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${DRIVE_PORT}",
"HTTPS_PORTS=${DRIVE_ALTPORT}",
"OTEL_SERVICE_NAME=drive",
]
volumes = ["./keys:/app/keys", "./settings/drive.json:/app/appsettings.json"]
expose = ["${DRIVE_PORT}", "${DRIVE_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "develop"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Develop"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${DEVELOP_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${DEVELOP_PORT}",
"HTTPS_PORTS=${DEVELOP_ALTPORT}",
"OTEL_SERVICE_NAME=develop",
]
volumes = ["./keys:/app/keys", "./settings/develop.json:/app/appsettings.json"]
expose = ["${DEVELOP_PORT}", "${DEVELOP_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "insight"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Insight"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${INSIGHT_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${INSIGHT_PORT}",
"HTTPS_PORTS=${INSIGHT_ALTPORT}",
"OTEL_SERVICE_NAME=insight",
]
volumes = ["./keys:/app/keys", "./settings/insight.json:/app/appsettings.json"]
expose = ["${INSIGHT_PORT}", "${INSIGHT_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }
[[services]]
name = "zone"
type = "dotnet"
path = "../DysonNetwork/DysonNetwork.Zone"
[services.dev]
command = "dotnet watch run"
[services.prod]
image = "${ZONE_IMAGE}"
environment = [
"ASPNETCORE_FORWARDEDHEADERS_ENABLED=true",
"HTTP_PORTS=${ZONE_PORT}",
"HTTPS_PORTS=${ZONE_ALTPORT}",
"OTEL_SERVICE_NAME=zone",
]
volumes = ["./keys:/app/keys", "./settings/zone.json:/app/appsettings.json"]
expose = ["${ZONE_PORT}", "${ZONE_ALTPORT}"]
networks = ["solar-network"]
[services.prod.depends_on]
cache = { condition = "service_healthy" }
queue = { condition = "service_healthy" }

View File

@@ -8,5 +8,5 @@ host = "127.0.0.1"
# ETCD configuration for service registration # ETCD configuration for service registration
[etcd] [etcd]
endpoints = ["etcd.orb.local:2379"] endpoints = ["127.0.0.1:2379"]
insecure = true insecure = true

View File

@@ -1,5 +1 @@
[database] [connection_strings]
connection_string = "postgres://user:password@db-host:5432/mydatabase?sslmode=require"
[redis]
address = "redis-host:6379"

View File

@@ -1,11 +1,10 @@
listen = ":2999" listen = ":2999"
[etcd] [etcd]
endpoints = ["etcd.orb.local:2379"] endpoints = ["127.0.0.1:2379"]
insecure = true insecure = true
# Route overrides. The key is the incoming path prefix. # Route overrides. The key is the incoming path prefix.
# The value is the destination in the format "/<service_name>/<path_prefix>" # The value is the destination in the format "/<service_name>/<path_prefix>"
[routes] [routes]
"/websocket" = "/chatter/ws" "/websocket" = "/chatter/ws"

View File

@@ -0,0 +1,76 @@
package config
import (
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
)
// LaunchpadConfig is the top-level configuration structure for the launchpad.
type LaunchpadConfig struct {
Variables struct {
Required []string
}
Networks map[string]interface{}
Services []Service
}
// Service defines a single service that can be managed by the launchpad.
type Service struct {
Name string
Type string
Path string
Dev struct {
Command string
Image string
ExposePorts []int `mapstructure:"expose_ports"` // Ports to check for health in dev mode
Healthcheck Healthcheck
}
Prod struct {
Image string
Command interface{}
Dockerfile string
BuildContext string `mapstructure:"build_context"`
Ports []string
Expose []string
Environment []string
Volumes []string
DependsOn map[string]Dependency `mapstructure:"depends_on"`
Networks []string
Healthcheck Healthcheck
}
}
// Healthcheck defines a health check for a service.
type Healthcheck struct {
Test []string `yaml:"test,omitempty"`
Interval string `yaml:"interval,omitempty"`
Timeout string `yaml:"timeout,omitempty"`
Retries int `yaml:"retries,omitempty"`
// For dev mode TCP checks
TcpPorts []int `mapstructure:"tcp_ports"`
Path string
}
// Dependency defines a dependency condition for docker-compose.
type Dependency struct {
Condition string `yaml:"condition"`
}
// Load reads and parses the launchpad.toml file from the project root.
func Load() LaunchpadConfig {
v := viper.New()
v.SetConfigName("launchpad")
v.AddConfigPath(".")
v.SetConfigType("toml")
if err := v.ReadInConfig(); err != nil {
log.Fatal().Err(err).Msg("Failed to read launchpad.toml")
}
var config LaunchpadConfig
if err := v.Unmarshal(&config); err != nil {
log.Fatal().Err(err).Msg("Failed to parse launchpad.toml")
}
return config
}

View File

@@ -0,0 +1,102 @@
package deploy
import (
"os"
"strings"
"git.solsynth.dev/goatworks/turbine/pkg/launchpad/config"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v3"
)
// --- Docker Compose Structures for YAML Marshalling ---
type ComposeFile struct {
Version string `yaml:"version"`
Services map[string]ComposeService `yaml:"services"`
Networks map[string]interface{} `yaml:"networks,omitempty"`
}
type ComposeService struct {
Image string `yaml:"image,omitempty"`
Build *ComposeBuild `yaml:"build,omitempty"`
Command interface{} `yaml:"command,omitempty"`
Ports []string `yaml:"ports,omitempty"`
Expose []string `yaml:"expose,omitempty"`
Environment map[string]string `yaml:"environment,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
DependsOn map[string]config.Dependency `yaml:"depends_on,omitempty"`
Networks []string `yaml:"networks,omitempty"`
Healthcheck *config.Healthcheck `yaml:"healthcheck,omitempty"`
}
type ComposeBuild struct {
Context string `yaml:"context,omitempty"`
Dockerfile string `yaml:"dockerfile,omitempty"`
}
// GenerateDockerCompose creates a docker-compose.yml file from the launchpad config.
func GenerateDockerCompose(cfg config.LaunchpadConfig) {
compose := ComposeFile{
Version: "3.8",
Services: make(map[string]ComposeService),
Networks: cfg.Networks,
}
for _, s := range cfg.Services {
subst := func(val string) string {
return os.ExpandEnv(val)
}
composeService := ComposeService{
Image: subst(s.Prod.Image),
Command: s.Prod.Command,
Ports: s.Prod.Ports,
Expose: s.Prod.Expose,
Volumes: s.Prod.Volumes,
DependsOn: s.Prod.DependsOn,
Networks: s.Prod.Networks,
}
// Add healthcheck if defined
if len(s.Prod.Healthcheck.Test) > 0 {
composeService.Healthcheck = &s.Prod.Healthcheck
}
if s.Prod.Dockerfile != "" {
context := "."
if s.Prod.BuildContext != "" {
context = s.Prod.BuildContext
}
composeService.Build = &ComposeBuild{
Context: context,
Dockerfile: s.Prod.Dockerfile,
}
}
if len(s.Prod.Environment) > 0 {
envMap := make(map[string]string)
for _, env := range s.Prod.Environment {
parts := strings.SplitN(subst(env), "=", 2)
if len(parts) == 2 {
envMap[parts[0]] = parts[1]
}
}
composeService.Environment = envMap
}
compose.Services[s.Name] = composeService
}
yamlData, err := yaml.Marshal(&compose)
if err != nil {
log.Fatal().Err(err).Msg("Failed to generate YAML for docker-compose")
}
outFile := "docker-compose.yml"
if err := os.WriteFile(outFile, yamlData, 0o644); err != nil {
log.Fatal().Err(err).Msgf("Failed to write %s", outFile)
}
log.Info().Msgf("Successfully generated %s", outFile)
}

263
pkg/launchpad/dev/dev.go Normal file
View File

@@ -0,0 +1,263 @@
package dev
import (
"bufio"
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"git.solsynth.dev/goatworks/turbine/pkg/launchpad/config"
"github.com/rs/zerolog/log"
)
var colors = []string{
"\033[32m", "\033[33m", "\033[34m", "\033[35m", "\033[36m", "\033[31m",
}
const colorReset = "\033[0m"
// RunDev starts all services defined in the config in development mode.
func RunDev(cfg config.LaunchpadConfig) {
log.Info().Msg("Starting services in development mode with dependency checks...")
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigChan
log.Info().Msgf("Received signal: %v. Shutting down all services...", sig)
cancel()
}()
devNetwork := "turbine-dev-net"
if len(cfg.Networks) > 0 {
for name := range cfg.Networks {
devNetwork = name
break
}
}
createDockerNetwork(devNetwork)
// --- Dependency-aware startup ---
serviceMap := make(map[string]config.Service)
for _, s := range cfg.Services {
serviceMap[s.Name] = s
}
started := make(map[string]chan bool)
for _, s := range cfg.Services {
if _, exists := started[s.Name]; !exists {
startServiceWithDeps(ctx, &wg, s, serviceMap, started, devNetwork)
}
}
wg.Wait()
log.Info().Msg("All services have been shut down.")
}
func startServiceWithDeps(ctx context.Context, wg *sync.WaitGroup, s config.Service, serviceMap map[string]config.Service, started map[string]chan bool, network string) {
if _, exists := started[s.Name]; exists {
return
}
// Create a channel that will be closed when this service is healthy
healthyChan := make(chan bool)
started[s.Name] = healthyChan
// First, recursively start dependencies
var depNames []string
for depName := range s.Prod.DependsOn {
depNames = append(depNames, depName)
}
for _, depName := range depNames {
if dep, ok := serviceMap[depName]; ok {
startServiceWithDeps(ctx, wg, dep, serviceMap, started, network)
}
}
// Wait for dependencies to be healthy
log.Info().Str("service", s.Name).Msgf("Waiting for dependencies to be healthy: %v", depNames)
for _, depName := range depNames {
if depChan, ok := started[depName]; ok {
log.Info().Str("service", s.Name).Msgf("Waiting for %s...", depName)
select {
case <-depChan:
log.Info().Str("service", s.Name).Msgf("Dependency %s is healthy.", depName)
case <-ctx.Done():
log.Warn().Str("service", s.Name).Msg("Shutdown signal received, aborting startup.")
return
}
}
}
// Now, start the actual service
wg.Add(1)
go func(s config.Service, color string) {
defer wg.Done()
var healthCheckPorts []int
if s.Type == "docker" {
// For docker, we use the dev healthcheck ports to also map them
healthCheckPorts = s.Dev.Healthcheck.TcpPorts
startDockerService(ctx, s, color, network, healthCheckPorts)
} else if s.Dev.Command != "" {
healthCheckPorts = s.Dev.Healthcheck.TcpPorts
startSourceService(ctx, s, color)
} else {
log.Warn().Str("service", s.Name).Msg("No dev.command or docker type, skipping.")
close(healthyChan) // Mark as "healthy" so other things can proceed
return
}
// Perform health check on the service we just started
waitForHealth(ctx, s.Name, healthCheckPorts)
close(healthyChan) // Signal that this service is now healthy
// Block until context is cancelled to keep the service running
// and ensure wg.Done is called at the right time.
<-ctx.Done()
}(s, colors[len(started)%len(colors)])
}
func waitForHealth(ctx context.Context, serviceName string, ports []int) {
if len(ports) == 0 {
log.Info().Str("service", serviceName).Msg("No healthcheck ports defined, assuming healthy immediately.")
return
}
for _, port := range ports {
address := fmt.Sprintf("127.0.0.1:%d", port)
log.Info().Str("service", serviceName).Msgf("Waiting for %s to be available...", address)
for {
select {
case <-ctx.Done():
log.Warn().Str("service", serviceName).Msg("Shutdown signal received, aborting health check.")
return
default:
conn, err := net.DialTimeout("tcp", address, 1*time.Second)
if err == nil {
conn.Close()
log.Info().Str("service", serviceName).Msgf("%s is healthy!", address)
goto nextPort
}
time.Sleep(2 * time.Second)
}
}
nextPort:
}
}
// startSourceService runs a service from its source code.
func startSourceService(ctx context.Context, s config.Service, color string) {
prefix := fmt.Sprintf("%s[%-10s]%s ", color, s.Name, colorReset)
log.Info().Str("service", s.Name).Str("command", s.Dev.Command).Msg("Starting from source")
parts := strings.Fields(s.Dev.Command)
cmd := exec.CommandContext(ctx, parts[0], parts[1:]...)
cmd.Dir = s.Path
runAndMonitorCommand(ctx, cmd, s.Name, prefix)
}
// startDockerService runs a pre-built Docker image.
func startDockerService(ctx context.Context, s config.Service, color string, network string, portsToMap []int) {
prefix := fmt.Sprintf("%s[%-10s]%s ", color, s.Name, colorReset)
log.Info().Str("service", s.Name).Str("image", s.Prod.Image).Msg("Starting from Docker image")
containerName := fmt.Sprintf("%s-dev", s.Name)
exec.Command("docker", "rm", "-f", containerName).Run()
args := []string{"run", "--rm", "-i", "--name", containerName}
if network != "" {
args = append(args, "--network", network)
}
for _, p := range portsToMap {
args = append(args, "-p", fmt.Sprintf("%d:%d", p, p))
}
for _, e := range s.Prod.Environment {
args = append(args, "-e", os.ExpandEnv(e))
}
args = append(args, os.ExpandEnv(s.Prod.Image))
if s.Prod.Command != nil {
switch v := s.Prod.Command.(type) {
case string:
args = append(args, strings.Fields(v)...)
case []interface{}:
for _, item := range v {
args = append(args, fmt.Sprintf("%v", item))
}
}
}
go func() {
<-ctx.Done()
log.Info().Str("service", s.Name).Msg("Stopping docker container...")
stopCmd := exec.Command("docker", "stop", containerName)
if err := stopCmd.Run(); err != nil {
log.Warn().Err(err).Str("service", s.Name).Msg("Failed to stop container.")
}
}()
cmd := exec.Command("docker", args...)
runAndMonitorCommand(ctx, cmd, s.Name, prefix)
}
func runAndMonitorCommand(ctx context.Context, cmd *exec.Cmd, serviceName, prefix string) {
stdout, _ := cmd.StdoutPipe()
stderr, _ := cmd.StderrPipe()
if err := cmd.Start(); err != nil {
log.Error().Err(err).Str("service", serviceName).Msg("Failed to start")
return
}
go streamOutput(stdout, prefix)
go streamOutput(stderr, prefix)
go func() {
err := cmd.Wait()
if ctx.Err() != nil {
log.Info().Str("service", serviceName).Msg("Process stopped.")
} else if err != nil {
log.Error().Err(err).Str("service", serviceName).Msg("Exited with error")
} else {
log.Info().Str("service", serviceName).Msg("Exited")
}
}()
}
func streamOutput(pipe io.ReadCloser, prefix string) {
scanner := bufio.NewScanner(pipe)
for scanner.Scan() {
fmt.Printf("%s%s\n", prefix, scanner.Text())
}
}
func createDockerNetwork(networkName string) {
log.Info().Str("network", networkName).Msg("Ensuring docker network exists")
cmd := exec.Command("docker", "network", "inspect", networkName)
if err := cmd.Run(); err == nil {
log.Info().Str("network", networkName).Msg("Network already exists.")
return
}
cmd = exec.Command("docker", "network", "create", networkName)
if err := cmd.Run(); err != nil {
log.Warn().Err(err).Msg("Could not create docker network.")
}
}

View File

@@ -1,250 +1,45 @@
package main package main
import ( import (
"bufio"
"bytes"
"context"
"fmt"
"os" "os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"sync"
"syscall"
"text/template"
"git.solsynth.dev/goatworks/turbine/pkg/launchpad/config"
"git.solsynth.dev/goatworks/turbine/pkg/launchpad/deploy"
"git.solsynth.dev/goatworks/turbine/pkg/launchpad/dev"
"github.com/joho/godotenv"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/spf13/viper"
) )
const dockerComposeTemplate = `version: '3.8'
services:
etcd:
image: bitnami/etcd:3.5
environment:
- ALLOW_NONE_AUTHENTICATION=yes
- ETCD_ADVERTISE_CLIENT_URLS=http://etcd:2379
ports:
- "2379:2379"
{{- range .Services }}
{{ .Name }}:
image: {{ .Prod.Image }}
build:
context: {{ .BuildContext }}
dockerfile: {{ .Prod.Dockerfile }}
{{- if .Prod.Ports }}
ports:
{{- range .Prod.Ports }}
- "{{ . }}"
{{- end }}
{{- end }}
{{- if .Prod.Environment }}
environment:
{{- range .Prod.Environment }}
- {{ . }}
{{- end }}
{{- end }}
{{- if .Prod.DependsOn }}
depends_on:
{{- range .Prod.DependsOn }}
- {{ . }}
{{- end }}
{{- end }}
{{- end }}
`
// ANSI colors for logging
var colors = []string{
"\033[32m", // Green
"\033[33m", // Yellow
"\033[34m", // Blue
"\033[35m", // Magenta
"\033[36m", // Cyan
"\033[31m", // Red
}
const colorReset = "\033[0m"
type Service struct {
Name string
Type string
Path string
BuildContext string `mapstructure:"-"` // This will be calculated, not read from file
Dev struct {
Command string
Image string
}
Prod struct {
Dockerfile string
Image string
Ports []string
Environment []string
DependsOn []string `mapstructure:"depends_on"`
}
}
type LaunchpadConfig struct {
Services []Service
}
func init() { func init() {
// Initialize logging
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
} }
func main() { func main() {
if len(os.Args) < 2 { // Load .env file if it exists
log.Fatal().Msg("Usage: launchpad <command>\nCommands: dev, prod-gen") if err := godotenv.Load(); err != nil {
log.Info().Msg("No .env file found, relying on environment variables.")
} }
config := readConfig() // Check for command-line arguments
if len(os.Args) < 2 {
log.Fatal().Msg("Usage: launchpad <command>\nCommands: dev, deploy")
}
// Load the main launchpad configuration
cfg := config.Load()
command := os.Args[1] command := os.Args[1]
// Dispatch to the correct handler
switch command { switch command {
case "dev": case "dev":
runDev(config) dev.RunDev(cfg)
case "deploy": case "deploy":
log.Info().Msg("Generating docker-compose.yml for production...") log.Info().Msg("Generating docker-compose.yml for production deployment...")
generateDockerCompose(config) deploy.GenerateDockerCompose(cfg)
default: default:
log.Fatal().Msgf("Unknown command: %s", command) log.Fatal().Msgf("Unknown command: %s", command)
} }
} }
func readConfig() LaunchpadConfig {
v := viper.New()
v.SetConfigName("launchpad")
v.AddConfigPath(".")
v.SetConfigType("toml")
if err := v.ReadInConfig(); err != nil {
log.Fatal().Err(err).Msg("Failed to read launchpad.toml")
}
var config LaunchpadConfig
if err := v.Unmarshal(&config); err != nil {
log.Fatal().Err(err).Msg("Failed to parse launchpad.toml")
}
return config
}
func runDev(config LaunchpadConfig) {
log.Info().Msg("Starting services in development mode...")
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
// --- Graceful Shutdown Handler ---
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigChan
log.Info().Msgf("Received signal: %v. Shutting down all services...", sig)
cancel() // Cancel the context to signal all goroutines to stop
}()
for i, service := range config.Services {
wg.Add(1)
go func(s Service, color string) {
defer wg.Done()
startService(ctx, s, color)
}(service, colors[i%len(colors)])
}
wg.Wait()
log.Info().Msg("All services have been shut down.")
}
func startService(ctx context.Context, s Service, color string) {
prefix := fmt.Sprintf("%s[%-10s]%s ", color, s.Name, colorReset)
// TODO: Handle dev.image with 'docker run'
if s.Dev.Command == "" {
log.Warn().Str("service", s.Name).Msg("No dev.command found, skipping.")
return
}
parts := strings.Fields(s.Dev.Command)
cmd := exec.CommandContext(ctx, parts[0], parts[1:]...)
cmd.Dir = s.Path
// Capture stdout and stderr
stdout, _ := cmd.StdoutPipe()
stderr, _ := cmd.StderrPipe()
// Start the command
if err := cmd.Start(); err != nil {
log.Error().Err(err).Str("service", s.Name).Msg("Failed to start service")
return
}
log.Info().Str("service", s.Name).Msg("Started")
// Goroutine to stream stdout with prefix
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Printf("%s%s\n", prefix, scanner.Text())
}
}()
// Goroutine to stream stderr with prefix
go func() {
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
fmt.Printf("%s%s\n", prefix, scanner.Text())
}
}()
// Wait for the command to exit
err := cmd.Wait()
// Check if the context was cancelled (graceful shutdown)
if ctx.Err() != nil {
log.Info().Str("service", s.Name).Msg("Stopped gracefully.")
} else if err != nil {
// The process exited with an error
log.Error().Err(err).Str("service", s.Name).Msg("Exited with error")
} else {
// The process exited successfully
log.Info().Str("service", s.Name).Msg("Exited")
}
}
func generateDockerCompose(config LaunchpadConfig) {
// Calculate BuildContext for each service
for i := range config.Services {
dockerfileAbs, err := filepath.Abs(config.Services[i].Prod.Dockerfile)
if err != nil {
log.Fatal().Err(err).Msgf("Could not get absolute path for Dockerfile: %s", config.Services[i].Prod.Dockerfile)
}
projectRoot, _ := os.Getwd()
if strings.HasPrefix(dockerfileAbs, projectRoot) {
// It's inside, use the project root as context for simpler builds
config.Services[i].BuildContext = "."
} else {
// It's outside, use the Dockerfile's directory as context
config.Services[i].BuildContext = filepath.Dir(config.Services[i].Prod.Dockerfile)
}
}
tmpl, err := template.New("docker-compose").Parse(dockerComposeTemplate)
if err != nil {
log.Fatal().Err(err).Msg("Failed to parse docker-compose template")
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, config); err != nil {
log.Fatal().Err(err).Msg("Failed to execute docker-compose template")
}
outFile := "docker-compose.yml"
if err := os.WriteFile(outFile, buf.Bytes(), 0o644); err != nil {
log.Fatal().Err(err).Msgf("Failed to write %s", outFile)
}
log.Info().Msgf("Successfully generated %s", outFile)
}