Compare commits
54 Commits
refactor/r
...
master
Author | SHA1 | Date | |
---|---|---|---|
5ccbc592b7 | |||
b006d29f53 | |||
a11af366ef | |||
2240ac30c6 | |||
34f20e02ae | |||
11a9a4a929 | |||
2481d2b616 | |||
35612d2f18 | |||
99e4a64b5a | |||
498eb05514 | |||
8a5cc34bb4 | |||
006a22cd7b | |||
2cfc9cd7fa | |||
2125239d42 | |||
70e5e5eddf | |||
f22affc05c | |||
dd36e2ab1a | |||
ae12eb2a15 | |||
f66f144f2e | |||
632d37caf5 | |||
28d3a3fa06 | |||
232ded4cd4 | |||
e158cb8819 | |||
90e7b03975 | |||
9a2192aa43 | |||
283a646eb4 | |||
17322108ff | |||
64272e786b | |||
6607e1dc5e | |||
d2710d1718 | |||
7b544f370d | |||
09c4800143 | |||
ef10fab47d | |||
11cf35cf0d | |||
28fd5aca8b | |||
e55077c7e9 | |||
910a9849f7 | |||
2860542e44 | |||
302491741f | |||
72d23c35f5 | |||
1cebff59e1 | |||
74d8b3848f | |||
7a3c489797 | |||
440851a305 | |||
61d30315ec | |||
450250c419 | |||
97df54a315 | |||
e8c39f38cc | |||
1ab4c5984e | |||
da90d12a3a | |||
bfce13fc74 | |||
4bdddf72e9 | |||
b906edc022 | |||
7ad17d9417 |
@ -1,11 +0,0 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"type": "go",
|
||||
"name": "Run RoadSign",
|
||||
"goExecPath": "C:\\Tools\\Scoop\\shims\\go.exe",
|
||||
"buildParams": ["code.smartsheep.studio/goatworks/roadsign/pkg/cmd"],
|
||||
},
|
||||
|
||||
]
|
||||
}
|
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
@ -2,7 +2,7 @@ name: release-nightly
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ refactor/rust ]
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
@ -25,4 +25,4 @@ jobs:
|
||||
context: .
|
||||
push: true
|
||||
file: ./Dockerfile
|
||||
tags: xsheep2010/roadsign:sigma
|
||||
tags: xsheep2010/roadsign:delta
|
||||
|
9
.gitignore
vendored
9
.gitignore
vendored
@ -1,8 +1,5 @@
|
||||
/config
|
||||
/certs
|
||||
/test/data
|
||||
/letsencrypt
|
||||
/certs
|
||||
/dist
|
||||
|
||||
# Added by cargo
|
||||
|
||||
/target
|
||||
.DS_Store
|
@ -7,12 +7,10 @@
|
||||
</component>
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/target" />
|
||||
</content>
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
<orderEntry type="library" name="Python 3.9 interpreter library" level="application" />
|
||||
<orderEntry type="library" name="daisyui" level="application" />
|
||||
</component>
|
||||
</module>
|
7
.idea/inspectionProfiles/Project_Default.xml
Normal file
7
.idea/inspectionProfiles/Project_Default.xml
Normal file
@ -0,0 +1,7 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<profile version="1.0">
|
||||
<option name="myName" value="Project Default" />
|
||||
<inspection_tool class="DuplicatedCode" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
|
||||
<inspection_tool class="ExceptionCaughtLocallyJS" enabled="false" level="WARNING" enabled_by_default="false" />
|
||||
</profile>
|
||||
</component>
|
6
.idea/jsLibraryMappings.xml
Normal file
6
.idea/jsLibraryMappings.xml
Normal file
@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="JavaScriptLibraryMappings">
|
||||
<file url="PROJECT" libraries="{daisyui}" />
|
||||
</component>
|
||||
</project>
|
6
.idea/misc.xml
Normal file
6
.idea/misc.xml
Normal file
@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="Black">
|
||||
<option name="sdkName" value="Python 3.9" />
|
||||
</component>
|
||||
</project>
|
16
.vscode/launch.json
vendored
16
.vscode/launch.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Run RoadSign",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/pkg/cmd/main.go",
|
||||
"cwd": "${workspaceFolder}"
|
||||
}
|
||||
]
|
||||
}
|
2182
Cargo.lock
generated
2182
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
38
Cargo.toml
38
Cargo.toml
@ -1,38 +0,0 @@
|
||||
[package]
|
||||
name = "roadsign"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
actix-files = "0.6.5"
|
||||
actix-proxy = "0.2.0"
|
||||
actix-web = { version = "4.5.1", features = ["rustls-0_22"] }
|
||||
actix-web-httpauth = "0.8.1"
|
||||
awc = { version = "3.4.0", features = ["tls-rustls-0_22"] }
|
||||
config = { version = "0.14.0", features = ["toml"] }
|
||||
lazy_static = "1.4.0"
|
||||
mime = "0.3.17"
|
||||
percent-encoding = "2.3.1"
|
||||
queryst = "3.0.0"
|
||||
rand = "0.8.5"
|
||||
regex = "1.10.2"
|
||||
serde = "1.0.195"
|
||||
serde_json = "1.0.111"
|
||||
tokio = { version = "1.35.1", features = [
|
||||
"rt-multi-thread",
|
||||
"macros",
|
||||
"time",
|
||||
"full",
|
||||
] }
|
||||
toml = "0.8.8"
|
||||
tracing = "0.1.40"
|
||||
tracing-subscriber = "0.3.18"
|
||||
wildmatch = "2.3.0"
|
||||
derive_more = "0.99.17"
|
||||
rustls = "0.22.2"
|
||||
rustls-pemfile = "2.0.0"
|
||||
futures = "0.3.30"
|
||||
actix-web-actors = "4.3.0"
|
||||
actix = "0.13.3"
|
16
Dockerfile
16
Dockerfile
@ -1,13 +1,17 @@
|
||||
# Building Backend
|
||||
FROM rust:alpine as roadsign-server
|
||||
|
||||
RUN apk add libressl-dev build-base
|
||||
FROM golang:alpine as roadsign-server
|
||||
|
||||
WORKDIR /source
|
||||
COPY . .
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
RUN cargo build --release
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -buildvcs -o /dist ./pkg/cmd/server/main.go
|
||||
|
||||
# Runtime
|
||||
FROM golang:alpine
|
||||
|
||||
RUN apk add zip
|
||||
|
||||
COPY --from=roadsign-server /dist /roadsign/server
|
||||
|
||||
EXPOSE 81
|
||||
|
||||
CMD ["/source/target/release/roadsign"]
|
||||
CMD ["/roadsign/server"]
|
47
README.md
47
README.md
@ -1,23 +1,30 @@
|
||||
# 🚦 RoadSign
|
||||
|
||||
A blazing fast reverse proxy with a lot of shining features.
|
||||
A blazing fast http server with a lot of shining features.
|
||||
|
||||
## Features
|
||||
|
||||
1. Reverse proxy
|
||||
2. Static file hosting
|
||||
3. ~~Analytics and Metrics~~
|
||||
4. Integrate with CI/CD
|
||||
5. Webhook integration
|
||||
6. ~~Web management panel~~
|
||||
2. WebSocket Support
|
||||
3. Static File Hosting
|
||||
4. Low Configuration
|
||||
5. Analytics and Metrics
|
||||
6. Integrate with CI/CD
|
||||
7. One-liner CLI
|
||||
8. **Blazing fast ⚡**
|
||||
8. Open-source and free
|
||||
9. **Blazing fast ⚡**
|
||||
|
||||
> Deleted item means under construction, check out our roadmap!
|
||||
But, as we said, this isn't a reverse proxy. It's a http server.
|
||||
So we didn't recommend you directly use it as a gateway to users, because it doesn't support all the features of http.
|
||||
For example like doesn't support multiple site HTTPS, HTTP/2.0, HTTP/3 and Server-Side Events.
|
||||
|
||||
You can use RoadSign behind a reverse proxy like caddy, and let it handle the HTTP/2.0, HTTP/3 and all the other
|
||||
cutting-edge stuff.
|
||||
|
||||
### How fast is it?
|
||||
|
||||
We use roadsign and nginx to host a same static file, and test them with [go-wrk](https://github.com/tsliwowicz/go-wrk).
|
||||
We use roadsign and nginx to host the same static file
|
||||
and test them with [go-wrk](https://github.com/tsliwowicz/go-wrk).
|
||||
Here's the result:
|
||||
|
||||
| **Software** | Total Requests | Requests per Seconds | Transfer per Seconds | Avg Time | Fastest Time | Slowest Time | Errors Count |
|
||||
@ -42,7 +49,7 @@ We strongly recommend you install RoadSign via docker compose.
|
||||
version: "3"
|
||||
services:
|
||||
roadsign:
|
||||
image: code.smartsheep.studio/goatworks/roadsign:nightly
|
||||
image: xsheep2010/roadsign:nightly
|
||||
restart: always
|
||||
volumes:
|
||||
- "./certs:/certs" # Optional, use for storage certificates
|
||||
@ -55,28 +62,28 @@ services:
|
||||
- "81:81"
|
||||
```
|
||||
|
||||
After that, you can manage your roadsign instance with RoadSign CLI aka. RDS CLI.
|
||||
After that, you can manage your roadsign instance with RoadSign CLI aka. RDC.
|
||||
To install it, run this command. (Make sure you have golang toolchain on your computer)
|
||||
|
||||
```shell
|
||||
go install -buildvcs code.smartsheep.studio/goatworks/roadsign/pkg/cmd/rds@latest
|
||||
# Tips: Add `buildvsc` flag to provide more detail compatibility check.
|
||||
go install git.solsynth.dev/goatworks/roadsign/pkg/cmd/rdc@latest
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To use roadsign, you need to add a configuration for it. Create a file locally.
|
||||
Name whatever you like. And follow our [documentation](https://wiki.smartsheep.studio/roadsign/configuration/index.html) to
|
||||
Name whatever you like. And follow our [documentation](https://wiki.smartsheep.studio/roadsign/configuration/index.html)
|
||||
to
|
||||
write it.
|
||||
|
||||
After configure, you need sync your config to remote server. Before that, add a connection between roadsign server and
|
||||
rds cli with this command.
|
||||
|
||||
```shell
|
||||
rds connect <id> <url> <password>
|
||||
# ID will allow you find this server.py.rs in after commands.
|
||||
# URL is to your roadsign server.py.rs sideload api.
|
||||
# Password is your roadsign server.py.rs credential.
|
||||
rdc connect <id> <url> <password>
|
||||
# ID will allow you find this server in after commands.
|
||||
# URL is to your roadsign server sideload api.
|
||||
# Password is your roadsign server credential.
|
||||
# ======================================================================
|
||||
# !WARNING! All these things will storage in your $HOME/.roadsignrc.yaml
|
||||
# ======================================================================
|
||||
@ -85,8 +92,8 @@ rds connect <id> <url> <password>
|
||||
Then, sync your local config to remote.
|
||||
|
||||
```shell
|
||||
rds sync <server.py.rs id> <site id> <config file>
|
||||
# Server ID is your server.py.rs added by last command.
|
||||
rdc sync <server id> <region id> <config file>
|
||||
# Server ID is your server added by last command.
|
||||
# Site ID is your new site id or old site id if you need update it.
|
||||
# Config File is your local config file path.
|
||||
```
|
||||
|
@ -1,17 +0,0 @@
|
||||
regions = "./regions"
|
||||
secret = "aEXcED5xJ3"
|
||||
|
||||
[sideload]
|
||||
bind_addr = "0.0.0.0:81"
|
||||
|
||||
[[proxies.bind]]
|
||||
addr = "0.0.0.0:80"
|
||||
tls = false
|
||||
[[proxies.bind]]
|
||||
addr = "0.0.0.0:443"
|
||||
tls = false
|
||||
|
||||
[[certificates]]
|
||||
domain = "localhost"
|
||||
certs = "certs/fullchain.pem"
|
||||
key = "certs/privkey.pem"
|
177
cli/.gitignore
vendored
Normal file
177
cli/.gitignore
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore
|
||||
|
||||
# Logs
|
||||
|
||||
logs
|
||||
_.log
|
||||
npm-debug.log_
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Caches
|
||||
|
||||
.cache
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
|
||||
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
|
||||
|
||||
# Runtime data
|
||||
|
||||
pids
|
||||
_.pid
|
||||
_.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# IntelliJ based IDEs
|
||||
.idea
|
||||
|
||||
# Finder (MacOS) folder config
|
||||
.DS_Store
|
||||
|
||||
/test/static-files
|
13
cli/.roadsignrc
Normal file
13
cli/.roadsignrc
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"sync": {
|
||||
"region": "static-files",
|
||||
"configPath": "test/static-files.toml"
|
||||
},
|
||||
"deployments": [
|
||||
{
|
||||
"path": "test/static-files",
|
||||
"region": "static-files",
|
||||
"site": "static-files-des"
|
||||
}
|
||||
]
|
||||
}
|
3
cli/README.md
Normal file
3
cli/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# RoadSign CLI
|
||||
|
||||
RoadSign CLI is a command-line interface (CLI) tool that allows you to manage your RoadSign servers and applications.
|
BIN
cli/bun.lockb
Executable file
BIN
cli/bun.lockb
Executable file
Binary file not shown.
42
cli/index.ts
Executable file
42
cli/index.ts
Executable file
@ -0,0 +1,42 @@
|
||||
import { Builtins, Cli } from "clipanion"
|
||||
import figlet from "figlet"
|
||||
import chalk from "chalk"
|
||||
|
||||
import { LoginCommand } from "./src/cmd/login.ts"
|
||||
import { LogoutCommand } from "./src/cmd/logout.ts"
|
||||
import { ListServerCommand } from "./src/cmd/list.ts"
|
||||
import { StatusCommand } from "./src/cmd/status.ts"
|
||||
import { InfoCommand } from "./src/cmd/info.ts"
|
||||
import { ProcessCommand } from "./src/cmd/process-info.ts"
|
||||
import { DeployCommand } from "./src/cmd/deploy.ts"
|
||||
import { SyncCommand } from "./src/cmd/sync.ts"
|
||||
import { ReloadCommand } from "./src/cmd/reload.ts"
|
||||
|
||||
const [node, app, ...args] = process.argv
|
||||
|
||||
const ENABLE_STARTUP_ASCII_ART = false
|
||||
|
||||
if (process.env["ENABLE_STARTUP_ASCII_ART"] || ENABLE_STARTUP_ASCII_ART) {
|
||||
console.log(
|
||||
chalk.yellow(figlet.textSync("RoadSign CLI", { horizontalLayout: "default", verticalLayout: "default" }))
|
||||
)
|
||||
}
|
||||
|
||||
const cli = new Cli({
|
||||
binaryLabel: `RoadSign CLI`,
|
||||
binaryName: `${node} ${app}`,
|
||||
binaryVersion: `1.0.0`
|
||||
})
|
||||
|
||||
cli.register(Builtins.VersionCommand)
|
||||
cli.register(Builtins.HelpCommand)
|
||||
cli.register(LoginCommand)
|
||||
cli.register(LogoutCommand)
|
||||
cli.register(ListServerCommand)
|
||||
cli.register(StatusCommand)
|
||||
cli.register(InfoCommand)
|
||||
cli.register(ProcessCommand)
|
||||
cli.register(DeployCommand)
|
||||
cli.register(SyncCommand)
|
||||
cli.register(ReloadCommand)
|
||||
cli.runExit(args)
|
36
cli/package.json
Normal file
36
cli/package.json
Normal file
@ -0,0 +1,36 @@
|
||||
{
|
||||
"name": "roadsign-cli",
|
||||
"module": "index.ts",
|
||||
"version": "1.0.2",
|
||||
"repository": "https://github.com/solsynth/roadsign",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "rimraf dist && rollup -c rollup.config.js"
|
||||
},
|
||||
"bin": {
|
||||
"rdcli": "./dist/index.cjs"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@rollup/plugin-commonjs": "^28.0.0",
|
||||
"@rollup/plugin-json": "^6.1.0",
|
||||
"@rollup/plugin-node-resolve": "^15.3.0",
|
||||
"@rollup/plugin-typescript": "^12.1.0",
|
||||
"@types/bun": "latest",
|
||||
"@types/cli-progress": "^3.11.6",
|
||||
"@types/figlet": "^1.5.8",
|
||||
"rimraf": "^6.0.1",
|
||||
"rollup": "^4.24.0",
|
||||
"rollup-plugin-typescript2": "^0.36.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"typescript": "^5.6.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"chalk": "^5.3.0",
|
||||
"cli-progress": "^3.12.0",
|
||||
"cli-table3": "^0.6.5",
|
||||
"clipanion": "^4.0.0-rc.4",
|
||||
"figlet": "^1.7.0",
|
||||
"ora": "^8.1.0"
|
||||
}
|
||||
}
|
22
cli/rollup.config.js
Normal file
22
cli/rollup.config.js
Normal file
@ -0,0 +1,22 @@
|
||||
import resolve from "@rollup/plugin-node-resolve"
|
||||
import commonjs from "@rollup/plugin-commonjs"
|
||||
import typescript from "@rollup/plugin-typescript"
|
||||
import json from "@rollup/plugin-json"
|
||||
|
||||
export default {
|
||||
input: "index.ts",
|
||||
output: {
|
||||
banner: "#!/usr/bin/env node",
|
||||
file: "dist/index.cjs",
|
||||
format: "cjs",
|
||||
inlineDynamicImports: true,
|
||||
},
|
||||
plugins: [
|
||||
resolve(),
|
||||
commonjs(),
|
||||
json(),
|
||||
typescript({
|
||||
tsconfig: "./tsconfig.json"
|
||||
})
|
||||
],
|
||||
}
|
124
cli/src/cmd/deploy.ts
Normal file
124
cli/src/cmd/deploy.ts
Normal file
@ -0,0 +1,124 @@
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
import * as fs from "node:fs"
|
||||
import * as child_process from "node:child_process"
|
||||
import * as path from "node:path"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import { RsLocalConfig, type RsLocalConfigDeploymentPostActionData } from "../utils/config-local.ts"
|
||||
import * as os from "node:os"
|
||||
|
||||
export class DeployCommand extends Command {
|
||||
static paths = [[`deploy`]]
|
||||
static usage: Usage = {
|
||||
category: `Building`,
|
||||
description: `Deploying App / Static Site onto RoadSign`,
|
||||
details: `Deploying an application or hosting a static site via RoadSign, you need preconfigured the RoadSign, or sync the configurations via sync command.`,
|
||||
examples: [
|
||||
["Deploying to RoadSign", `deploy <server> <region> <site> <file / directory>`],
|
||||
["Deploying to RoadSign with .roadsignrc file", `deploy <server>`]
|
||||
]
|
||||
}
|
||||
|
||||
server = Option.String({ required: true })
|
||||
region = Option.String({ required: false })
|
||||
site = Option.String({ required: false })
|
||||
input = Option.String({ required: false })
|
||||
|
||||
async deploy(serverLabel: string, region: string, site: string, input: string, postDeploy: RsLocalConfigDeploymentPostActionData | null = null) {
|
||||
const cfg = await RsConfig.getInstance()
|
||||
const server = cfg.config.servers.find(item => item.label === serverLabel)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.server)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
if (!fs.existsSync(input)) {
|
||||
this.context.stdout.write(chalk.red(`Input file ${chalk.bold(this.input)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
let isDirectory = false
|
||||
if (fs.statSync(input).isDirectory()) {
|
||||
const compressPrefStart = performance.now()
|
||||
const compressSpinner = ora(`Compressing ${chalk.bold(input)}...`).start()
|
||||
const destName = path.join(os.tmpdir(), `${Date.now()}-roadsign-archive.zip`)
|
||||
child_process.execSync(`cd ${input} && zip -r ${destName} .`)
|
||||
const compressPrefTook = performance.now() - compressPrefStart
|
||||
compressSpinner.succeed(`Compressing completed in ${(compressPrefTook / 1000).toFixed(2)}s 🎉`)
|
||||
input = destName
|
||||
isDirectory = true
|
||||
}
|
||||
|
||||
const destBreadcrumb = [region, site].join(" ➜ ")
|
||||
const spinner = ora(`Deploying ${chalk.bold(destBreadcrumb)} to ${chalk.bold(this.server)}...`).start()
|
||||
|
||||
const prefStart = performance.now()
|
||||
|
||||
try {
|
||||
const payload = new FormData()
|
||||
payload.set("attachments", await fs.openAsBlob(input), isDirectory ? "dist.zip" : path.basename(input))
|
||||
|
||||
if(postDeploy) {
|
||||
if(postDeploy.command) {
|
||||
payload.set("post-deploy-script", postDeploy.command)
|
||||
} else if(postDeploy.scriptPath) {
|
||||
payload.set("post-deploy-script", fs.readFileSync(postDeploy.scriptPath, "utf8"))
|
||||
} else {
|
||||
this.context.stdout.write(chalk.yellow(`Configured post deploy action but no script provided, skip performing post deploy action...\n`))
|
||||
}
|
||||
payload.set("post-deploy-environment", postDeploy.environment?.join("\n") ?? "")
|
||||
}
|
||||
|
||||
const res = await fetch(`${server.url}/webhooks/publish/${region}/${site}?mimetype=application/zip`, {
|
||||
method: "PUT",
|
||||
body: payload,
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
spinner.succeed(`Deploying completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
} catch (e) {
|
||||
this.context.stdout.write(`Failed to deploy to remote: ${e}\n`)
|
||||
spinner.fail(`Server with label ${chalk.bold(this.server)} is not running! 😢`)
|
||||
} finally {
|
||||
if (isDirectory && input.endsWith(".zip")) {
|
||||
fs.unlinkSync(input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async execute() {
|
||||
if (this.region && this.site && this.input) {
|
||||
await this.deploy(this.server, this.region, this.site, this.input)
|
||||
} else {
|
||||
let localCfg: RsLocalConfig
|
||||
try {
|
||||
localCfg = await RsLocalConfig.getInstance()
|
||||
} catch (e) {
|
||||
this.context.stdout.write(chalk.red(`Unable to load .roadsignrc: ${e}\n`))
|
||||
return
|
||||
}
|
||||
|
||||
if (!localCfg.config.deployments) {
|
||||
this.context.stdout.write(chalk.red(`No deployments found in .roadsignrc, exiting...\n`))
|
||||
return
|
||||
}
|
||||
|
||||
let idx = 0
|
||||
for (const deployment of localCfg.config.deployments ?? []) {
|
||||
this.context.stdout.write(chalk.cyan(`Deploying ${idx + 1} out of ${localCfg.config.deployments.length} deployments...\n`))
|
||||
await this.deploy(this.server, deployment.region, deployment.site, deployment.path, deployment.postDeploy)
|
||||
}
|
||||
|
||||
this.context.stdout.write(chalk.green(`All deployments has been deployed!\n`))
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
175
cli/src/cmd/info.ts
Normal file
175
cli/src/cmd/info.ts
Normal file
@ -0,0 +1,175 @@
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import { RsConfig, type RsConfigServerData } from "../utils/config.ts"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
|
||||
export class InfoCommand extends Command {
|
||||
static paths = [[`info`], [`if`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `Fetching the stats of RoadSign Server`,
|
||||
details: `Fetching the configured things amount and other things of a connected server`,
|
||||
examples: [["Fetch stats from labeled server", `info <label> [area]`]]
|
||||
}
|
||||
|
||||
label = Option.String({ required: true })
|
||||
area = Option.String({ required: false })
|
||||
loop = Option.Boolean("--loop,--follow,-f", false, { description: "Keep updating the results" })
|
||||
|
||||
private static formatUptime(ms: number): string {
|
||||
let seconds: number = Math.floor(ms / 1000)
|
||||
let minutes: number = Math.floor(seconds / 60)
|
||||
let hours: number = Math.floor(minutes / 60)
|
||||
let days: number = Math.floor(hours / 24)
|
||||
|
||||
seconds = seconds % 60
|
||||
minutes = minutes % 60
|
||||
hours = hours % 24
|
||||
|
||||
const uptimeParts: string[] = []
|
||||
|
||||
if (days > 0) uptimeParts.push(`${days} day${days > 1 ? "s" : ""}`)
|
||||
if (hours > 0) uptimeParts.push(`${hours} hour${hours > 1 ? "s" : ""}`)
|
||||
if (minutes > 0) uptimeParts.push(`${minutes} minute${minutes > 1 ? "s" : ""}`)
|
||||
if (seconds > 0 || uptimeParts.length === 0) uptimeParts.push(`${seconds} second${seconds > 1 ? "s" : ""}`)
|
||||
|
||||
return uptimeParts.join(", ")
|
||||
}
|
||||
|
||||
async fetchOverview(server: RsConfigServerData) {
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/stats`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
|
||||
const data: any = await res.json()
|
||||
this.context.stdout.write('\n')
|
||||
this.context.stdout.write(`\nServer stats of ${chalk.bold(this.label)}\n`)
|
||||
this.context.stdout.write(` • Uptime: ${chalk.bold(InfoCommand.formatUptime(data["uptime"]))}\n`)
|
||||
this.context.stdout.write(` • Traffic since last startup: ${chalk.bold(data["traffic"]["total"])}\n`)
|
||||
this.context.stdout.write(` • Unique clients since last startup: ${chalk.bold(data["traffic"]["unique_client"])}\n`)
|
||||
this.context.stdout.write(`\nServer info of ${chalk.bold(this.label)}\n`)
|
||||
this.context.stdout.write(` • Warden Applications: ${chalk.bold(data["applications"])}\n`)
|
||||
this.context.stdout.write(` • Destinations: ${chalk.bold(data["destinations"])}\n`)
|
||||
this.context.stdout.write(` • Locations: ${chalk.bold(data["locations"])}\n`)
|
||||
this.context.stdout.write(` • Regions: ${chalk.bold(data["regions"])}\n`)
|
||||
this.context.stdout.write('\n')
|
||||
} catch (e) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
async fetchTrace(server: RsConfigServerData) {
|
||||
const res = await fetch(`${server.url}/cgi/traces`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
|
||||
const data: any = await res.json()
|
||||
for (const trace of data) {
|
||||
const ts = new Date(trace["timestamp"]).toLocaleString()
|
||||
const path = [trace["region"], trace["location"], trace["destination"]].join(" ➜ ")
|
||||
const uri = trace["uri"].split("?").length == 1 ? trace["uri"] : trace["uri"].split("?")[0] + ` ${chalk.grey(`w/ query parameters`)}`
|
||||
this.context.stdout.write(`${chalk.bgGrey(`[${ts}]`)} ${chalk.bold(path)} ${chalk.cyan(trace["ip_address"])} ${uri}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
async fetchRegions(server: RsConfigServerData) {
|
||||
const res = await fetch(`${server.url}/cgi/regions`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
|
||||
const data: any = await res.json()
|
||||
this.context.stdout.write("\n\n")
|
||||
for (const region of data) {
|
||||
this.context.stdout.write(` • ${chalk.bgGrey('region#')}${chalk.bold(region.id)} ${chalk.gray(`(${region.locations.length} locations)`)}\n`)
|
||||
for (const location of region.locations) {
|
||||
this.context.stdout.write(` • ${chalk.bgGrey('location#')} ${chalk.bold(location.id)} ${chalk.gray(`(${location.destinations.length} destinations)`)}\n`)
|
||||
for (const destination of location.destinations) {
|
||||
this.context.stdout.write(` • ${chalk.bgGrey('destination#')}${chalk.bold(destination.id)}\n`)
|
||||
}
|
||||
}
|
||||
this.context.stdout.write("\n")
|
||||
}
|
||||
}
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
|
||||
const server = config.config.servers.find(item => item.label === this.label)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.label)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
if (this.area == null) {
|
||||
this.area = "overview"
|
||||
}
|
||||
|
||||
const spinner = ora(`Fetching stats from server ${this.label}...`).start()
|
||||
const prefStart = performance.now()
|
||||
|
||||
switch (this.area) {
|
||||
case "overview":
|
||||
try {
|
||||
await this.fetchOverview(server)
|
||||
const prefTook = performance.now() - prefStart
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
}
|
||||
break
|
||||
case "trace":
|
||||
while (true) {
|
||||
try {
|
||||
await this.fetchTrace(server)
|
||||
const prefTook = performance.now() - prefStart
|
||||
if (!this.loop) {
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
}
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.loop) {
|
||||
break
|
||||
} else {
|
||||
spinner.text = "Updating..."
|
||||
await new Promise(resolve => setTimeout(resolve, 3000))
|
||||
this.context.stdout.write("\x1Bc")
|
||||
}
|
||||
}
|
||||
break
|
||||
case "regions":
|
||||
try {
|
||||
await this.fetchRegions(server)
|
||||
const prefTook = performance.now() - prefStart
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
break
|
||||
default:
|
||||
spinner.fail(chalk.red(`Info area was not exists ${chalk.bold(this.area)}...`))
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
26
cli/src/cmd/list.ts
Normal file
26
cli/src/cmd/list.ts
Normal file
@ -0,0 +1,26 @@
|
||||
import { Command, type Usage } from "clipanion"
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import chalk from "chalk"
|
||||
|
||||
export class ListServerCommand extends Command {
|
||||
static paths = [[`list`], [`ls`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `List all connected RoadSign Sideload Services`,
|
||||
details: `Listing all servers that already saved in RoadSign CLI configuration file`,
|
||||
examples: [["List all", `list`]]
|
||||
}
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
|
||||
for (let idx = 0; idx < config.config.servers.length; idx++) {
|
||||
const server = config.config.servers[idx]
|
||||
this.context.stdout.write(`${idx + 1}. ${chalk.bold(server.label)} ${chalk.gray(`(${server.url})`)}\n`)
|
||||
}
|
||||
|
||||
this.context.stdout.write("\n" + chalk.cyan(`Connected ${config.config.servers.length} server(s) in total.`) + "\n")
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
55
cli/src/cmd/login.ts
Normal file
55
cli/src/cmd/login.ts
Normal file
@ -0,0 +1,55 @@
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import ora, { oraPromise } from "ora"
|
||||
|
||||
export class LoginCommand extends Command {
|
||||
static paths = [[`login`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `Login to RoadSign Sideload Service`,
|
||||
details: `Login to RoadSign Server`,
|
||||
examples: [["Login with credentials", `login <label> <host> <password>`]]
|
||||
}
|
||||
|
||||
label = Option.String({ required: true })
|
||||
host = Option.String({ required: true })
|
||||
credentials = Option.String({ required: true })
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
const spinner = ora(`Connecting to ${this.host}...`).start()
|
||||
|
||||
if (!this.host.includes(":")) {
|
||||
this.host += ":81"
|
||||
}
|
||||
if (!this.host.startsWith("http")) {
|
||||
this.host = "http://" + this.host
|
||||
}
|
||||
|
||||
try {
|
||||
const pingRes = await fetch(`${this.host}/cgi/metadata`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(this.credentials)
|
||||
}
|
||||
})
|
||||
if (pingRes.status !== 200) {
|
||||
throw new Error(await pingRes.text())
|
||||
} else {
|
||||
const info: any = await pingRes.json()
|
||||
spinner.succeed(`Connected to ${this.host}, remote version ${info["version"]}`)
|
||||
|
||||
config.config.servers.push({
|
||||
label: this.label,
|
||||
url: this.host,
|
||||
credential: this.credentials
|
||||
})
|
||||
await oraPromise(config.writeConfig(), { text: "Saving changes..." })
|
||||
}
|
||||
} catch (e) {
|
||||
spinner.fail(`Unable connect to remote: ${e}`)
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
31
cli/src/cmd/logout.ts
Normal file
31
cli/src/cmd/logout.ts
Normal file
@ -0,0 +1,31 @@
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { oraPromise } from "ora"
|
||||
import chalk from "chalk"
|
||||
|
||||
export class LogoutCommand extends Command {
|
||||
static paths = [[`logout`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `Logout from RoadSign Sideload Service`,
|
||||
details: `Logout from RoadSign Server`,
|
||||
examples: [["Logout with server label", `logout <label>`]]
|
||||
}
|
||||
|
||||
label = Option.String({ required: true })
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
|
||||
const server = config.config.servers.findIndex(item => item.label === this.label)
|
||||
if (server === -1) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.label)} was not found.\n`))
|
||||
} else {
|
||||
config.config.servers.splice(server, 1)
|
||||
this.context.stdout.write(chalk.green(`Server with label ${chalk.bold(this.label)} was successfully removed.\n`))
|
||||
await oraPromise(config.writeConfig(), { text: "Saving changes..." })
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
146
cli/src/cmd/process-info.ts
Normal file
146
cli/src/cmd/process-info.ts
Normal file
@ -0,0 +1,146 @@
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import Table from "cli-table3"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
|
||||
export class ProcessCommand extends Command {
|
||||
static paths = [[`process`], [`ps`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `Loading the application of RoadSign Server`,
|
||||
details: `Fetching the configured things amount and other things of a connected server`,
|
||||
examples: [
|
||||
["Fetch app directory from labeled server", `ps <label>`],
|
||||
["Fetch app logs from labeled server", `ps <label> <applicationId> logs`]
|
||||
]
|
||||
}
|
||||
|
||||
label = Option.String({ required: true })
|
||||
applicationId = Option.String({ required: false })
|
||||
subcommand = Option.String({ required: false })
|
||||
loop = Option.Boolean("--loop,--follow,-f", false, { description: "Keep updating the results" })
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
|
||||
const server = config.config.servers.find(item => item.label === this.label)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.label)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
const spinner = ora(`Fetching stats from server ${this.label}...`).start()
|
||||
const prefStart = performance.now()
|
||||
|
||||
|
||||
if (this.applicationId == null) {
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/applications`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
if (!this.loop) {
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
}
|
||||
|
||||
const table = new Table({
|
||||
head: ["ID", "Status", "Command"],
|
||||
colWidths: [20, 10, 48]
|
||||
})
|
||||
|
||||
const statusMapping = ["Created", "Starting", "Started", "Exited", "Failed"]
|
||||
|
||||
const data: any = await res.json()
|
||||
for (const app of data) {
|
||||
table.push([app["id"], statusMapping[app["status"]], app["command"].join(" ")])
|
||||
}
|
||||
|
||||
this.context.stdout.write(table.toString())
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
switch (this.subcommand) {
|
||||
case "logs":
|
||||
while (true) {
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/applications/${this.applicationId}/logs`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status === 404) {
|
||||
spinner.fail(`App with id ${chalk.bold(this.applicationId)} was not found! 😢`)
|
||||
return
|
||||
}
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
if (!this.loop) {
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
}
|
||||
|
||||
this.context.stdout.write(await res.text())
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.loop) {
|
||||
break
|
||||
} else {
|
||||
spinner.text = "Updating..."
|
||||
await new Promise(resolve => setTimeout(resolve, 3000))
|
||||
this.context.stdout.write("\x1Bc")
|
||||
}
|
||||
}
|
||||
break
|
||||
case "start":
|
||||
case "stop":
|
||||
case "restart":
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/applications/${this.applicationId}/${this.subcommand}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status === 404) {
|
||||
spinner.fail(`App with id ${chalk.bold(this.applicationId)} was not found! 😢`)
|
||||
return
|
||||
}
|
||||
if (res.status === 500) {
|
||||
this.context.stdout.write(chalk.red(`Server failed to perform action for application: ${await res.text()}\n`))
|
||||
spinner.fail(`Failed to perform action ${chalk.bold(this.applicationId)}... 😢`)
|
||||
return
|
||||
}
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
if (!this.loop) {
|
||||
spinner.succeed(`Fetching completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
}
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
spinner.succeed(`Action for application ${chalk.bold(this.applicationId)} has been performed. 🎉`)
|
||||
break
|
||||
default:
|
||||
this.context.stdout.write(chalk.red(`Subcommand ${chalk.bold(this.subcommand)} was not found.\n`))
|
||||
}
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
53
cli/src/cmd/reload.ts
Normal file
53
cli/src/cmd/reload.ts
Normal file
@ -0,0 +1,53 @@
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
import * as fs from "node:fs"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import { RsLocalConfig } from "../utils/config-local.ts"
|
||||
|
||||
export class ReloadCommand extends Command {
|
||||
static paths = [[`reload`]]
|
||||
static usage: Usage = {
|
||||
category: `Building`,
|
||||
description: `Reload configuration on RoadSign`,
|
||||
details: `Reload configuration on remote RoadSign to make changes applied.`,
|
||||
examples: [
|
||||
["Reload an connected server", `reload <server>`],
|
||||
]
|
||||
}
|
||||
|
||||
server = Option.String({ required: true })
|
||||
|
||||
async execute() {
|
||||
const cfg = await RsConfig.getInstance()
|
||||
const server = cfg.config.servers.find(item => item.label === this.server)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.server)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
const spinner = ora(`Reloading server ${chalk.bold(this.server)}...`).start()
|
||||
|
||||
const prefStart = performance.now()
|
||||
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/reload`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
spinner.succeed(`Reloading completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
} catch (e) {
|
||||
this.context.stdout.write(`Failed to reload remote: ${e}\n`)
|
||||
spinner.fail(`Server with label ${chalk.bold(this.server)} is not running! 😢`)
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
46
cli/src/cmd/status.ts
Normal file
46
cli/src/cmd/status.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
|
||||
export class StatusCommand extends Command {
|
||||
static paths = [[`status`]]
|
||||
static usage: Usage = {
|
||||
category: `Networking`,
|
||||
description: `Check the status of RoadSign Sideload Service`,
|
||||
details: `Check the running status of a connected server`,
|
||||
examples: [["Check the status of labeled server", `status <label>`]]
|
||||
}
|
||||
|
||||
label = Option.String({ required: true })
|
||||
|
||||
async execute() {
|
||||
const config = await RsConfig.getInstance()
|
||||
|
||||
const server = config.config.servers.find(item => item.label === this.label)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.label)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
const spinner = ora(`Checking status of ${this.label}...`).start()
|
||||
|
||||
try {
|
||||
const res = await fetch(`${server.url}/cgi/metadata`, {
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
spinner.succeed(`Server with label ${chalk.bold(this.label)} is up and running! 🎉`)
|
||||
} catch (e) {
|
||||
spinner.fail(`Server with label ${chalk.bold(this.label)} is not running! 😢`)
|
||||
return
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
87
cli/src/cmd/sync.ts
Normal file
87
cli/src/cmd/sync.ts
Normal file
@ -0,0 +1,87 @@
|
||||
import { RsConfig } from "../utils/config.ts"
|
||||
import { Command, Option, type Usage } from "clipanion"
|
||||
import chalk from "chalk"
|
||||
import ora from "ora"
|
||||
import * as fs from "node:fs"
|
||||
import { createAuthHeader } from "../utils/auth.ts"
|
||||
import { RsLocalConfig } from "../utils/config-local.ts"
|
||||
|
||||
export class SyncCommand extends Command {
|
||||
static paths = [[`sync`]]
|
||||
static usage: Usage = {
|
||||
category: `Building`,
|
||||
description: `Sync configuration to RoadSign over Sideload`,
|
||||
details: `Update remote RoadSign configuration with local ones.`,
|
||||
examples: [
|
||||
["Sync to RoadSign", `sync <server> <region> <file>`],
|
||||
["Sync to RoadSign with .roadsignrc file", `sync <server>`]
|
||||
]
|
||||
}
|
||||
|
||||
server = Option.String({ required: true })
|
||||
region = Option.String({ required: false })
|
||||
input = Option.String({ required: false })
|
||||
|
||||
async sync(serverLabel: string, region: string, input: string) {
|
||||
const cfg = await RsConfig.getInstance()
|
||||
const server = cfg.config.servers.find(item => item.label === serverLabel)
|
||||
if (server == null) {
|
||||
this.context.stdout.write(chalk.red(`Server with label ${chalk.bold(this.server)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
if (!fs.existsSync(input)) {
|
||||
this.context.stdout.write(chalk.red(`Input file ${chalk.bold(this.input)} was not found.\n`))
|
||||
return
|
||||
}
|
||||
if (!fs.statSync(input).isFile()) {
|
||||
this.context.stdout.write(chalk.red(`Input file ${chalk.bold(this.input)} is not a file.\n`))
|
||||
return
|
||||
}
|
||||
|
||||
const spinner = ora(`Syncing ${chalk.bold(region)} to ${chalk.bold(this.server)}...`).start()
|
||||
|
||||
const prefStart = performance.now()
|
||||
|
||||
try {
|
||||
const res = await fetch(`${server.url}/webhooks/sync/${region}`, {
|
||||
method: "PUT",
|
||||
body: fs.readFileSync(input, "utf8"),
|
||||
headers: {
|
||||
Authorization: createAuthHeader(server.credential)
|
||||
}
|
||||
})
|
||||
if (res.status !== 200) {
|
||||
throw new Error(await res.text())
|
||||
}
|
||||
const prefTook = performance.now() - prefStart
|
||||
spinner.succeed(`Syncing completed in ${(prefTook / 1000).toFixed(2)}s 🎉`)
|
||||
} catch (e) {
|
||||
this.context.stdout.write(`Failed to sync to remote: ${e}\n`)
|
||||
spinner.fail(`Server with label ${chalk.bold(this.server)} is not running! 😢`)
|
||||
}
|
||||
}
|
||||
|
||||
async execute() {
|
||||
if (this.region && this.input) {
|
||||
await this.sync(this.server, this.region, this.input)
|
||||
} else {
|
||||
let localCfg: RsLocalConfig
|
||||
try {
|
||||
localCfg = await RsLocalConfig.getInstance()
|
||||
} catch (e) {
|
||||
this.context.stdout.write(chalk.red(`Unable to load .roadsignrc: ${e}\n`))
|
||||
return
|
||||
}
|
||||
|
||||
if (!localCfg.config.sync) {
|
||||
this.context.stdout.write(chalk.red(`No sync configuration found in .roadsignrc, exiting...\n`))
|
||||
return
|
||||
}
|
||||
|
||||
await this.sync(this.server, localCfg.config.sync.region, localCfg.config.sync.configPath)
|
||||
}
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
4
cli/src/utils/auth.ts
Normal file
4
cli/src/utils/auth.ts
Normal file
@ -0,0 +1,4 @@
|
||||
export function createAuthHeader(password: string, username: string = "RoadSign CLI") {
|
||||
const credentials = Buffer.from(`${username}:${password}`).toString("base64")
|
||||
return `Basic ${credentials}`
|
||||
}
|
67
cli/src/utils/config-local.ts
Normal file
67
cli/src/utils/config-local.ts
Normal file
@ -0,0 +1,67 @@
|
||||
import * as path from "node:path"
|
||||
import * as fs from "node:fs"
|
||||
|
||||
interface RsLocalConfigData {
|
||||
sync?: RsLocalConfigSyncData
|
||||
deployments?: RsLocalConfigDeploymentData[]
|
||||
}
|
||||
|
||||
interface RsLocalConfigSyncData {
|
||||
configPath: string
|
||||
region: string
|
||||
}
|
||||
|
||||
interface RsLocalConfigDeploymentData {
|
||||
path: string
|
||||
region: string
|
||||
site: string
|
||||
postDeploy?: RsLocalConfigDeploymentPostActionData
|
||||
autoBuild?: RsLocalConfigDeploymentAutoBuildData
|
||||
}
|
||||
|
||||
interface RsLocalConfigDeploymentAutoBuildData {
|
||||
command: string
|
||||
environment?: string[]
|
||||
}
|
||||
|
||||
interface RsLocalConfigDeploymentPostActionData {
|
||||
command?: string
|
||||
scriptPath?: string
|
||||
environment?: string[]
|
||||
}
|
||||
|
||||
class RsLocalConfig {
|
||||
private static instance: RsLocalConfig
|
||||
|
||||
public config: RsLocalConfigData = {}
|
||||
|
||||
private constructor() {
|
||||
}
|
||||
|
||||
public static async getInstance(): Promise<RsLocalConfig> {
|
||||
if (!RsLocalConfig.instance) {
|
||||
RsLocalConfig.instance = new RsLocalConfig()
|
||||
await RsLocalConfig.instance.readConfig()
|
||||
}
|
||||
return RsLocalConfig.instance
|
||||
}
|
||||
|
||||
public async readConfig() {
|
||||
const basepath = process.cwd()
|
||||
const filepath = path.join(basepath, ".roadsignrc")
|
||||
if (!fs.existsSync(filepath)) {
|
||||
throw new Error(`.roadsignrc file was not found at ${filepath}`)
|
||||
}
|
||||
|
||||
const data = fs.readFileSync(filepath, "utf8")
|
||||
this.config = JSON.parse(data)
|
||||
}
|
||||
|
||||
public async writeConfig() {
|
||||
const basepath = process.cwd()
|
||||
const filepath = path.join(basepath, ".roadsignrc")
|
||||
fs.writeFileSync(filepath, JSON.stringify(this.config))
|
||||
}
|
||||
}
|
||||
|
||||
export { RsLocalConfig, type RsLocalConfigData, type RsLocalConfigDeploymentPostActionData }
|
51
cli/src/utils/config.ts
Normal file
51
cli/src/utils/config.ts
Normal file
@ -0,0 +1,51 @@
|
||||
import * as os from "node:os"
|
||||
import * as path from "node:path"
|
||||
import * as fs from "node:fs"
|
||||
|
||||
interface RsConfigData {
|
||||
servers: RsConfigServerData[]
|
||||
}
|
||||
|
||||
interface RsConfigServerData {
|
||||
label: string
|
||||
url: string
|
||||
credential: string
|
||||
}
|
||||
|
||||
class RsConfig {
|
||||
private static instance: RsConfig
|
||||
|
||||
public config: RsConfigData = {
|
||||
servers: []
|
||||
}
|
||||
|
||||
private constructor() {
|
||||
}
|
||||
|
||||
public static async getInstance(): Promise<RsConfig> {
|
||||
if (!RsConfig.instance) {
|
||||
RsConfig.instance = new RsConfig()
|
||||
await RsConfig.instance.readConfig()
|
||||
}
|
||||
return RsConfig.instance
|
||||
}
|
||||
|
||||
public async readConfig() {
|
||||
const basepath = os.homedir()
|
||||
const filepath = path.join(basepath, ".roadsignrc")
|
||||
if (!fs.existsSync(filepath)) {
|
||||
fs.writeFileSync(filepath, JSON.stringify(this.config))
|
||||
}
|
||||
|
||||
const data = fs.readFileSync(filepath, "utf8")
|
||||
this.config = JSON.parse(data)
|
||||
}
|
||||
|
||||
public async writeConfig() {
|
||||
const basepath = os.homedir()
|
||||
const filepath = path.join(basepath, ".roadsignrc")
|
||||
fs.writeFileSync(filepath, JSON.stringify(this.config))
|
||||
}
|
||||
}
|
||||
|
||||
export { RsConfig, type RsConfigData, type RsConfigServerData }
|
9
cli/test/static-files.toml
Normal file
9
cli/test/static-files.toml
Normal file
@ -0,0 +1,9 @@
|
||||
id = "static-files-num2"
|
||||
|
||||
[[locations]]
|
||||
id = "static-files-loc-num2"
|
||||
hosts = ["127.0.0.1:8000"]
|
||||
paths = ["/"]
|
||||
[[locations.destinations]]
|
||||
id = "static-files-des-num2"
|
||||
uri = "files://../data/static-files"
|
12
cli/test/static-files/index.html
Normal file
12
cli/test/static-files/index.html
Normal file
@ -0,0 +1,12 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Hello, World!</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Hello, there!</p>
|
||||
<p>Here's the newer version of static files hosted by roadsign!</p>
|
||||
</body>
|
||||
</html>
|
27
cli/tsconfig.json
Normal file
27
cli/tsconfig.json
Normal file
@ -0,0 +1,27 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "NodeNext",
|
||||
"allowJs": true,
|
||||
|
||||
// Bundler mode
|
||||
"esModuleInterop": true,
|
||||
"moduleResolution": "NodeNext",
|
||||
"allowImportingTsExtensions": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"noEmit": true,
|
||||
"resolveJsonModule": true,
|
||||
|
||||
// Best practices
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
|
||||
// Some stricter flags (disabled by default)
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noPropertyAccessFromIndexSignature": false,
|
||||
"useUnknownInCatchVariables": false,
|
||||
}
|
||||
}
|
32
config/example.toml
Normal file
32
config/example.toml
Normal file
@ -0,0 +1,32 @@
|
||||
id = "example-region"
|
||||
|
||||
# [[locations]]
|
||||
# id = "example-websocket"
|
||||
# host = ["localhost:8000"]
|
||||
# path = ["/ws"]
|
||||
# [[locations.destinations]]
|
||||
# id = "example-websocket-destination"
|
||||
# uri = "http://localhost:8765"
|
||||
|
||||
# [[locations]]
|
||||
# id = "example-warden"
|
||||
# host = ["localhost:4321"]
|
||||
# path = ["/"]
|
||||
# [[locations.destinations]]
|
||||
# id = "example-warden-destination"
|
||||
# uri = "http://localhost:4321"
|
||||
|
||||
# [[applications]]
|
||||
# id = "example-warden-app"
|
||||
# workdir = "test/data/warden"
|
||||
# command = ["node", "dist/server/entry.mjs"]
|
||||
# environment = ["PUBLIC_CMS=https://smartsheep.studio"]
|
||||
|
||||
[[locations]]
|
||||
id = "example"
|
||||
host = ["localhost:8000"]
|
||||
path = ["/"]
|
||||
[[locations.destinations]]
|
||||
id = "example-destination"
|
||||
uri = "https://example.com"
|
||||
helmet = { x_frame_options = "SAMEORIGIN" }
|
63
go.mod
Normal file
63
go.mod
Normal file
@ -0,0 +1,63 @@
|
||||
module git.solsynth.dev/goatworks/roadsign
|
||||
|
||||
go 1.21.4
|
||||
|
||||
require (
|
||||
github.com/fasthttp/websocket v1.5.7
|
||||
github.com/gofiber/fiber/v2 v2.52.5
|
||||
github.com/gofiber/template/html/v2 v2.1.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/samber/lo v1.38.1
|
||||
github.com/saracen/fastzip v0.1.11
|
||||
github.com/spf13/viper v1.17.0
|
||||
github.com/urfave/cli/v2 v2.26.0
|
||||
github.com/valyala/fasthttp v1.56.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/gofiber/template v1.8.2 // indirect
|
||||
github.com/gofiber/utils v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/saracen/zipextra v0.0.0-20220303013732-0187cb0159ea // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.10 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.1
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/sagikazarmark/locafero v0.3.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tinylib/msgp v1.1.8 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sys v0.25.0 // indirect
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
174
go.sum
Normal file
174
go.sum
Normal file
@ -0,0 +1,174 @@
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fasthttp/websocket v1.5.7 h1:0a6o2OfeATvtGgoMKleURhLT6JqWPg7fYfWnH4KHau4=
|
||||
github.com/fasthttp/websocket v1.5.7/go.mod h1:bC4fxSono9czeXHQUVKxsC0sNjbm7lPJR04GDFqClfU=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo=
|
||||
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/gofiber/template v1.8.2 h1:PIv9s/7Uq6m+Fm2MDNd20pAFFKt5wWs7ZBd8iV9pWwk=
|
||||
github.com/gofiber/template v1.8.2/go.mod h1:bs/2n0pSNPOkRa5VJ8zTIvedcI/lEYxzV3+YPXdBvq8=
|
||||
github.com/gofiber/template/html/v2 v2.1.0 h1:FjwzqhhdJpnhyCvav60Z1ytnBqOUr5sGO/aTeob9/ng=
|
||||
github.com/gofiber/template/html/v2 v2.1.0/go.mod h1:txXsRQN/G7Fr2cqGfr6zhVHgreCfpsBS+9+DJyrddJc=
|
||||
github.com/gofiber/utils v1.1.0 h1:vdEBpn7AzIUJRhe+CiTOJdUcTg4Q9RK+pEa0KPbLdrM=
|
||||
github.com/gofiber/utils v1.1.0/go.mod h1:poZpsnhBykfnY1Mc0KeEa6mSHrS3dV0+oBWyeQmb2e0=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
||||
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
|
||||
github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
|
||||
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
|
||||
github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
|
||||
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
|
||||
github.com/saracen/fastzip v0.1.11 h1:NnExbTEJbya7148cov09BCxwfur9tQ5BQ1QyQH6XleA=
|
||||
github.com/saracen/fastzip v0.1.11/go.mod h1:/lN5BiU451/OZMS+hfhVsSDj/RNrxYmO9EYxCtMrFrY=
|
||||
github.com/saracen/zipextra v0.0.0-20220303013732-0187cb0159ea h1:8czYLkvzZRE+AElIQeDffQdgR+CC3wKEFILYU/1PeX4=
|
||||
github.com/saracen/zipextra v0.0.0-20220303013732-0187cb0159ea/go.mod h1:hnzuad9d2wdd3z8fC6UouHQK5qZxqv3F/E6MMzXc7q0=
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee h1:8Iv5m6xEo1NR1AvpV+7XmhI4r39LGNzwUL4YpMuL5vk=
|
||||
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee/go.mod h1:qwtSXrKuJh/zsFQ12yEE89xfCrGKK63Rr7ctU/uCo4g=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
||||
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI=
|
||||
github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
||||
github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=
|
||||
github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.56.0 h1:bEZdJev/6LCBlpdORfrLu/WOZXXxvrUQSiyniuaoW8U=
|
||||
github.com/valyala/fasthttp v1.56.0/go.mod h1:sReBt3XZVnudxuLOx4J/fMrJVorWRiWY2koQKgABiVI=
|
||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
661
license
Normal file
661
license
Normal file
@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
78
pkg/cmd/server/main.go
Normal file
78
pkg/cmd/server/main.go
Normal file
@ -0,0 +1,78 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
roadsign "git.solsynth.dev/goatworks/roadsign/pkg"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/hypertext"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/sideload"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout})
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Configure settings
|
||||
viper.AddConfigPath(".")
|
||||
viper.AddConfigPath("..")
|
||||
viper.AddConfigPath("/")
|
||||
viper.SetConfigName("settings")
|
||||
viper.SetConfigType("toml")
|
||||
|
||||
// Load settings
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when loading settings.")
|
||||
}
|
||||
|
||||
// Present settings
|
||||
if len(viper.GetString("security.credential")) <= 0 {
|
||||
credential := strings.ReplaceAll(uuid.NewString(), "-", "")
|
||||
viper.Set("security.credential", credential)
|
||||
_ = viper.WriteConfig()
|
||||
|
||||
log.Warn().Msg("There isn't any api credential configured in settings.yml, auto generated a credential for api accessing.")
|
||||
log.Warn().Msgf("RoadSign auto generated api credential is %s", credential)
|
||||
}
|
||||
|
||||
// Load & init navigator
|
||||
if err := navi.ReadInConfig(viper.GetString("paths.configs")); err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when loading configurations.")
|
||||
} else {
|
||||
log.Info().Int("count", len(navi.R.Regions)).Msg("All configuration has been loaded.")
|
||||
}
|
||||
|
||||
// Init warden
|
||||
navi.InitializeWarden(navi.R.Regions)
|
||||
|
||||
// Init hypertext server
|
||||
hypertext.RunServer(
|
||||
hypertext.InitServer(),
|
||||
viper.GetStringSlice("hypertext.ports"),
|
||||
viper.GetStringSlice("hypertext.secured_ports"),
|
||||
)
|
||||
|
||||
// Init sideload server
|
||||
hypertext.RunServer(
|
||||
sideload.InitSideload(),
|
||||
viper.GetStringSlice("sideload.ports"),
|
||||
[]string{},
|
||||
)
|
||||
|
||||
log.Info().Msgf("RoadSign v%s is started...", roadsign.AppVersion)
|
||||
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
|
||||
log.Info().Msgf("RoadSign v%s is quitting...", roadsign.AppVersion)
|
||||
}
|
138
pkg/hypertext/proxies.go
Normal file
138
pkg/hypertext/proxies.go
Normal file
@ -0,0 +1,138 @@
|
||||
package hypertext
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func ProxiesHandler(ctx *fiber.Ctx) error {
|
||||
host := ctx.Hostname()
|
||||
path := ctx.Path()
|
||||
queries := ctx.Queries()
|
||||
headers := ctx.GetReqHeaders()
|
||||
|
||||
// Filtering sites
|
||||
for _, region := range navi.R.Regions {
|
||||
// Matching rules
|
||||
for _, location := range region.Locations {
|
||||
if !lo.Contains(location.Hosts, host) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !func() bool {
|
||||
flag := false
|
||||
for _, pattern := range location.Paths {
|
||||
if ok, _ := regexp.MatchString(pattern, path); ok {
|
||||
flag = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return flag
|
||||
}() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter query strings
|
||||
flag := true
|
||||
for rk, rv := range location.Queries {
|
||||
for ik, iv := range queries {
|
||||
if rk != ik && rv != iv {
|
||||
flag = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter headers
|
||||
for rk, rv := range location.Headers {
|
||||
for ik, iv := range headers {
|
||||
if rk == ik {
|
||||
for _, ov := range iv {
|
||||
if !lo.Contains(rv, ov) {
|
||||
flag = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
continue
|
||||
}
|
||||
|
||||
idx := rand.Intn(len(location.Destinations))
|
||||
dest := location.Destinations[idx]
|
||||
|
||||
// Passing all the rules means the site is what we are looking for.
|
||||
// Let us respond to our client!
|
||||
return makeResponse(ctx, region, &location, &dest)
|
||||
}
|
||||
}
|
||||
|
||||
// There is no site available for this request.
|
||||
// Just ignore it and give our client a not found status.
|
||||
// Do not care about the user experience, we can do it in custom error handler.
|
||||
return fiber.ErrNotFound
|
||||
}
|
||||
|
||||
func makeResponse(c *fiber.Ctx, region *navi.Region, location *navi.Location, dest *navi.Destination) error {
|
||||
uri := c.Request().URI().String()
|
||||
|
||||
// Modify request
|
||||
for _, transformer := range dest.Transformers {
|
||||
if err := transformer.TransformRequest(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Forward
|
||||
err := navi.R.Forward(c, dest)
|
||||
|
||||
// Modify response
|
||||
for _, transformer := range dest.Transformers {
|
||||
if err := transformer.TransformResponse(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Collect trace
|
||||
if viper.GetBool("telemetry.capture_traces") {
|
||||
var message string
|
||||
if err != nil {
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
go navi.R.Metrics.AddTrace(navi.RoadTrace{
|
||||
Region: region.ID,
|
||||
Location: location.ID,
|
||||
Destination: dest.ID,
|
||||
Uri: uri,
|
||||
IpAddress: c.IP(),
|
||||
UserAgent: c.Get(fiber.HeaderUserAgent),
|
||||
Error: navi.RoadTraceError{
|
||||
IsNull: err == nil,
|
||||
Message: message,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
139
pkg/hypertext/server.go
Normal file
139
pkg/hypertext/server.go
Normal file
@ -0,0 +1,139 @@
|
||||
package hypertext
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/hypertext/status"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/limiter"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/template/html/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func InitServer() *fiber.App {
|
||||
views := html.NewFileSystem(http.FS(status.FS), ".gohtml")
|
||||
app := fiber.New(fiber.Config{
|
||||
ViewsLayout: "views/index",
|
||||
AppName: "RoadSign",
|
||||
ServerHeader: "RoadSign",
|
||||
DisableStartupMessage: true,
|
||||
EnableIPValidation: true,
|
||||
Views: views,
|
||||
ErrorHandler: status.StatusPageHandler,
|
||||
JSONDecoder: jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal,
|
||||
JSONEncoder: jsoniter.ConfigCompatibleWithStandardLibrary.Marshal,
|
||||
ProxyHeader: fiber.HeaderXForwardedFor,
|
||||
Prefork: viper.GetBool("performance.prefork"),
|
||||
BodyLimit: viper.GetInt("hypertext.limitation.max_body_size"),
|
||||
})
|
||||
|
||||
if viper.GetBool("hypertext.force_https") {
|
||||
app.Use(func(c *fiber.Ctx) error {
|
||||
if !c.Secure() {
|
||||
return c.Redirect(
|
||||
strings.Replace(c.Request().URI().String(), "http", "https", 1),
|
||||
fiber.StatusMovedPermanently,
|
||||
)
|
||||
}
|
||||
|
||||
return c.Next()
|
||||
})
|
||||
}
|
||||
|
||||
if viper.GetBool("telemetry.request_logging") {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Output: log.Logger,
|
||||
Format: "[Proxies] [${time}] ${status} - ${latency} ${method} ${path}\n",
|
||||
}))
|
||||
}
|
||||
|
||||
if viper.GetInt("hypertext.limitation.max_qps") > 0 {
|
||||
app.Use(limiter.New(limiter.Config{
|
||||
Max: viper.GetInt("hypertext.limitation.max_qps"),
|
||||
Expiration: 1 * time.Second,
|
||||
LimitReached: func(c *fiber.Ctx) error {
|
||||
return fiber.ErrTooManyRequests
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
app.All("/*", ProxiesHandler)
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
type CertificateConfig struct {
|
||||
Key string `json:"key"`
|
||||
Pem string `json:"pem"`
|
||||
}
|
||||
|
||||
func RunServer(app *fiber.App, ports []string, securedPorts []string) {
|
||||
var certs []CertificateConfig
|
||||
raw, _ := jsoniter.Marshal(viper.Get("hypertext.certificate"))
|
||||
_ = jsoniter.Unmarshal(raw, &certs)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: []tls.Certificate{},
|
||||
}
|
||||
|
||||
for _, info := range certs {
|
||||
cert, err := tls.LoadX509KeyPair(info.Pem, info.Key)
|
||||
if err != nil {
|
||||
log.Error().Err(err).
|
||||
Str("pem", info.Pem).
|
||||
Str("key", info.Key).
|
||||
Msg("An error occurred when loading certificate.")
|
||||
} else {
|
||||
tlsCfg.Certificates = append(tlsCfg.Certificates, cert)
|
||||
}
|
||||
}
|
||||
|
||||
for _, port := range ports {
|
||||
port := port
|
||||
go func() {
|
||||
if viper.GetBool("hypertext.redirect_to_https") {
|
||||
redirector := fiber.New(fiber.Config{
|
||||
AppName: "RoadSign",
|
||||
ServerHeader: "RoadSign",
|
||||
DisableStartupMessage: true,
|
||||
EnableIPValidation: true,
|
||||
})
|
||||
redirector.All("/", func(c *fiber.Ctx) error {
|
||||
return c.Redirect(strings.ReplaceAll(string(c.Request().URI().FullURI()), "http", "https"))
|
||||
})
|
||||
if err := redirector.Listen(port); err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when listening hypertext non-tls ports.")
|
||||
}
|
||||
} else {
|
||||
if err := app.Listen(port); err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when listening hypertext non-tls ports.")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info().Msgf("Listening for %s... http://0.0.0.0%s", app.Config().AppName, port)
|
||||
}
|
||||
|
||||
for _, port := range securedPorts {
|
||||
port := port
|
||||
go func() {
|
||||
listener, err := net.Listen("tcp", port)
|
||||
if err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when listening hypertext tls ports.")
|
||||
}
|
||||
if err := app.Listener(tls.NewListener(listener, tlsCfg)); err != nil {
|
||||
log.Panic().Err(err).Msg("An error occurred when listening hypertext tls ports.")
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info().Msgf("Listening for %s... https://0.0.0.0%s", app.Config().AppName, port)
|
||||
}
|
||||
}
|
6
pkg/hypertext/status/embed.go
Normal file
6
pkg/hypertext/status/embed.go
Normal file
@ -0,0 +1,6 @@
|
||||
package status
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed all:views
|
||||
var FS embed.FS
|
56
pkg/hypertext/status/serve.go
Normal file
56
pkg/hypertext/status/serve.go
Normal file
@ -0,0 +1,56 @@
|
||||
package status
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
roadsign "git.solsynth.dev/goatworks/roadsign/pkg"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type ErrorPayload struct {
|
||||
Title string `json:"title"`
|
||||
Message string `json:"message"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func StatusPageHandler(c *fiber.Ctx, err error) error {
|
||||
var reqErr *fiber.Error
|
||||
var status = fiber.StatusInternalServerError
|
||||
if errors.As(err, &reqErr) {
|
||||
status = reqErr.Code
|
||||
}
|
||||
|
||||
c.Status(status)
|
||||
|
||||
payload := ErrorPayload{
|
||||
Version: roadsign.AppVersion,
|
||||
}
|
||||
|
||||
switch status {
|
||||
case fiber.StatusNotFound:
|
||||
payload.Title = "Not Found"
|
||||
payload.Message = fmt.Sprintf("no resource for \"%s\"", c.OriginalURL())
|
||||
return c.Render("views/not-found", payload)
|
||||
case fiber.StatusTooManyRequests:
|
||||
payload.Title = "Request Too Fast"
|
||||
payload.Message = fmt.Sprintf("you have sent over %d request(s) in a second", viper.GetInt("hypertext.limitation.max_qps"))
|
||||
return c.Render("views/too-many-requests", payload)
|
||||
case fiber.StatusRequestEntityTooLarge:
|
||||
payload.Title = "Request Too Large"
|
||||
payload.Message = fmt.Sprintf("you have sent a request over %d bytes", viper.GetInt("hypertext.limitation.max_body_size"))
|
||||
return c.Render("views/request-too-large", payload)
|
||||
case fiber.StatusBadGateway:
|
||||
payload.Title = "Backend Down"
|
||||
payload.Message = fmt.Sprintf("all destnations configured to handle your request are down: %s", err.Error())
|
||||
return c.Render("views/bad-gateway", payload)
|
||||
case fiber.StatusGatewayTimeout:
|
||||
payload.Title = "Backend Took Too Long To Response"
|
||||
payload.Message = fmt.Sprintf("the destnation took too long to response your request: %s", err.Error())
|
||||
return c.Render("views/gateway-timeout", payload)
|
||||
default:
|
||||
payload.Title = "Oops"
|
||||
payload.Message = err.Error()
|
||||
return c.Render("views/fallback", payload)
|
||||
}
|
||||
}
|
6
pkg/hypertext/status/views/bad-gateway.gohtml
Normal file
6
pkg/hypertext/status/views/bad-gateway.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">502</h1>
|
||||
<h2 class="text-lg">No one is standing...</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
6
pkg/hypertext/status/views/fallback.gohtml
Normal file
6
pkg/hypertext/status/views/fallback.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">Oops</h1>
|
||||
<h2 class="text-lg">Something went wrong...</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
6
pkg/hypertext/status/views/gateway-timeout.gohtml
Normal file
6
pkg/hypertext/status/views/gateway-timeout.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">504</h1>
|
||||
<h2 class="text-lg">Looks like the server in the back fell asleep</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
83
pkg/hypertext/status/views/index.gohtml
Normal file
83
pkg/hypertext/status/views/index.gohtml
Normal file
@ -0,0 +1,83 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
<link href="https://cdn.jsdelivr.net/npm/daisyui@4.6.1/dist/full.min.css" rel="stylesheet" type="text/css" />
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
|
||||
<script>
|
||||
tailwind.config = {
|
||||
daisyui: {
|
||||
themes: [
|
||||
{
|
||||
light: {
|
||||
primary: "#4750a3",
|
||||
secondary: "#93c5fd",
|
||||
accent: "#0f766e",
|
||||
info: "#67e8f9",
|
||||
success: "#15803d",
|
||||
warning: "#f97316",
|
||||
error: "#dc2626",
|
||||
neutral: "#2B3440",
|
||||
"secondary-content": "oklch(98.71% 0.0106 342.55)",
|
||||
"neutral-content": "#D7DDE4",
|
||||
"base-100": "oklch(100% 0 0)",
|
||||
"base-200": "#F2F2F2",
|
||||
"base-300": "#E5E6E6",
|
||||
"base-content": "#1f2937",
|
||||
"color-scheme": "light",
|
||||
"--rounded-box": "0",
|
||||
"--rounded-btn": "0",
|
||||
"--rounded-badge": "0",
|
||||
"--tab-radius": "0"
|
||||
}
|
||||
},
|
||||
{
|
||||
dark: {
|
||||
primary: "#4750a3",
|
||||
secondary: "#93c5fd",
|
||||
accent: "#0f766e",
|
||||
info: "#67e8f9",
|
||||
success: "#15803d",
|
||||
warning: "#f97316",
|
||||
error: "#dc2626",
|
||||
neutral: "#2a323c",
|
||||
"neutral-content": "#A6ADBB",
|
||||
"base-100": "#1d232a",
|
||||
"base-200": "#191e24",
|
||||
"base-300": "#15191e",
|
||||
"base-content": "#A6ADBB",
|
||||
"color-scheme": "dark",
|
||||
"--rounded-box": "0",
|
||||
"--rounded-btn": "0",
|
||||
"--rounded-badge": "0",
|
||||
"--tab-radius": "0"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<title>{{ .Title }} | RoadSign</title>
|
||||
</head>
|
||||
<body>
|
||||
<main class="w-full h-screen flex justify-center items-center">
|
||||
<div class="text-center">
|
||||
{{embed}}
|
||||
|
||||
<footer class="mt-3 text-sm">
|
||||
<p>
|
||||
Powered by
|
||||
<a href="https://wiki.smartsheep.studio/roadsign/index.html" target="_blank" class="link link-primary">
|
||||
RoadSign
|
||||
</a>
|
||||
</p>
|
||||
<p class="text-xs">v{{ .Version }}</p>
|
||||
</footer>
|
||||
</div>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
6
pkg/hypertext/status/views/not-found.gohtml
Normal file
6
pkg/hypertext/status/views/not-found.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">404</h1>
|
||||
<h2 class="text-lg">Not Found</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
6
pkg/hypertext/status/views/request-too-large.gohtml
Normal file
6
pkg/hypertext/status/views/request-too-large.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">413</h1>
|
||||
<h2 class="text-lg">Auh, you are too big.</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
6
pkg/hypertext/status/views/too-many-requests.gohtml
Normal file
6
pkg/hypertext/status/views/too-many-requests.gohtml
Normal file
@ -0,0 +1,6 @@
|
||||
<h1 class="text-2xl font-bold">429</h1>
|
||||
<h2 class="text-lg">Stop it, you just to fast!</h2>
|
||||
|
||||
<div class="mt-3 mx-auto p-5 w-[360px] max-w-screen bg-neutral text-neutral-content rounded">
|
||||
<code class="capitalize">{{ .Message }}</code>
|
||||
</div>
|
17
pkg/meta.go
Normal file
17
pkg/meta.go
Normal file
@ -0,0 +1,17 @@
|
||||
package roadsign
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
for _, setting := range info.Settings {
|
||||
if setting.Key == "vcs.revision" {
|
||||
AppVersion += "#" + setting.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var AppVersion = "2.1.0"
|
58
pkg/navi/config.go
Normal file
58
pkg/navi/config.go
Normal file
@ -0,0 +1,58 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
)
|
||||
|
||||
var R *RoadApp
|
||||
|
||||
func ReadInConfig(root string) error {
|
||||
instance := &RoadApp{
|
||||
Regions: make([]*Region, 0),
|
||||
Metrics: &RoadMetrics{
|
||||
Traces: make([]RoadTrace, 0),
|
||||
Traffic: make(map[string]int64),
|
||||
TrafficFrom: make(map[string]int64),
|
||||
TotalTraffic: 0,
|
||||
StartupAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
if err := filepath.Walk(root, func(fp string, info os.FileInfo, _ error) error {
|
||||
var region Region
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
} else if !strings.HasSuffix(info.Name(), ".toml") {
|
||||
return nil
|
||||
} else if file, err := os.OpenFile(fp, os.O_RDONLY, 0755); err != nil {
|
||||
return err
|
||||
} else if data, err := io.ReadAll(file); err != nil {
|
||||
return err
|
||||
} else if err := toml.Unmarshal(data, ®ion); err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer file.Close()
|
||||
|
||||
if region.Disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
instance.Regions = append(instance.Regions, ®ion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Hot swap
|
||||
R = instance
|
||||
|
||||
return nil
|
||||
}
|
89
pkg/navi/helmet.go
Normal file
89
pkg/navi/helmet.go
Normal file
@ -0,0 +1,89 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type HelmetConfig struct {
|
||||
XSSProtection string `json:"xss_protection" toml:"xss_protection"`
|
||||
ContentTypeNosniff string `json:"content_type_nosniff" toml:"content_type_nosniff"`
|
||||
XFrameOptions string `json:"x_frame_options" toml:"x_frame_options"`
|
||||
HSTSMaxAge int `json:"hsts_max_age" toml:"hsts_max_age"`
|
||||
HSTSExcludeSubdomains bool `json:"hsts_exclude_subdomains" toml:"hsts_exclude_subdomains"`
|
||||
ContentSecurityPolicy string `json:"content_security_policy" toml:"content_security_policy"`
|
||||
CSPReportOnly bool `json:"csp_report_only" toml:"csp_report_only"`
|
||||
HSTSPreloadEnabled bool `json:"hsts_preload_enabled" toml:"hsts_preload_enabled"`
|
||||
ReferrerPolicy string `json:"referrer_policy" toml:"referrer_policy"`
|
||||
PermissionPolicy string `json:"permission_policy" toml:"permission_policy"`
|
||||
CrossOriginEmbedderPolicy string `json:"cross_origin_embedder_policy" toml:"cross_origin_embedder_policy"`
|
||||
CrossOriginOpenerPolicy string `json:"cross_origin_opener_policy" toml:"cross_origin_opener_policy"`
|
||||
CrossOriginResourcePolicy string `json:"cross_origin_resource_policy" toml:"cross_origin_resource_policy"`
|
||||
OriginAgentCluster string `json:"origin_agent_cluster" toml:"origin_agent_cluster"`
|
||||
XDNSPrefetchControl string `json:"xdns_prefetch_control" toml:"xdns_prefetch_control"`
|
||||
XDownloadOptions string `json:"x_download_options" toml:"x_download_options"`
|
||||
XPermittedCrossDomain string `json:"x_permitted_cross_domain" toml:"x_permitted_cross_domain"`
|
||||
}
|
||||
|
||||
func (cfg HelmetConfig) Apply(c *fiber.Ctx) {
|
||||
// Apply other headers
|
||||
if cfg.XSSProtection != "" {
|
||||
c.Set(fiber.HeaderXXSSProtection, cfg.XSSProtection)
|
||||
}
|
||||
if cfg.ContentTypeNosniff != "" {
|
||||
c.Set(fiber.HeaderXContentTypeOptions, cfg.ContentTypeNosniff)
|
||||
}
|
||||
if cfg.XFrameOptions != "" {
|
||||
c.Set(fiber.HeaderXFrameOptions, cfg.XFrameOptions)
|
||||
}
|
||||
if cfg.CrossOriginEmbedderPolicy != "" {
|
||||
c.Set("Cross-Origin-Embedder-Policy", cfg.CrossOriginEmbedderPolicy)
|
||||
}
|
||||
if cfg.CrossOriginOpenerPolicy != "" {
|
||||
c.Set("Cross-Origin-Opener-Policy", cfg.CrossOriginOpenerPolicy)
|
||||
}
|
||||
if cfg.CrossOriginResourcePolicy != "" {
|
||||
c.Set("Cross-Origin-Resource-Policy", cfg.CrossOriginResourcePolicy)
|
||||
}
|
||||
if cfg.OriginAgentCluster != "" {
|
||||
c.Set("Origin-Agent-Cluster", cfg.OriginAgentCluster)
|
||||
}
|
||||
if cfg.ReferrerPolicy != "" {
|
||||
c.Set("Referrer-Policy", cfg.ReferrerPolicy)
|
||||
}
|
||||
if cfg.XDNSPrefetchControl != "" {
|
||||
c.Set("X-DNS-Prefetch-Control", cfg.XDNSPrefetchControl)
|
||||
}
|
||||
if cfg.XDownloadOptions != "" {
|
||||
c.Set("X-Download-Options", cfg.XDownloadOptions)
|
||||
}
|
||||
if cfg.XPermittedCrossDomain != "" {
|
||||
c.Set("X-Permitted-Cross-Domain-Policies", cfg.XPermittedCrossDomain)
|
||||
}
|
||||
|
||||
// Handle HSTS headers
|
||||
if c.Protocol() == "https" && cfg.HSTSMaxAge != 0 {
|
||||
subdomains := ""
|
||||
if !cfg.HSTSExcludeSubdomains {
|
||||
subdomains = "; includeSubDomains"
|
||||
}
|
||||
if cfg.HSTSPreloadEnabled {
|
||||
subdomains = fmt.Sprintf("%s; preload", subdomains)
|
||||
}
|
||||
c.Set(fiber.HeaderStrictTransportSecurity, fmt.Sprintf("max-age=%d%s", cfg.HSTSMaxAge, subdomains))
|
||||
}
|
||||
|
||||
// Handle Content-Security-Policy headers
|
||||
if cfg.ContentSecurityPolicy != "" {
|
||||
if cfg.CSPReportOnly {
|
||||
c.Set(fiber.HeaderContentSecurityPolicyReportOnly, cfg.ContentSecurityPolicy)
|
||||
} else {
|
||||
c.Set(fiber.HeaderContentSecurityPolicy, cfg.ContentSecurityPolicy)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Permissions-Policy headers
|
||||
if cfg.PermissionPolicy != "" {
|
||||
c.Set(fiber.HeaderPermissionsPolicy, cfg.PermissionPolicy)
|
||||
}
|
||||
}
|
63
pkg/navi/metrics.go
Normal file
63
pkg/navi/metrics.go
Normal file
@ -0,0 +1,63 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RoadMetrics struct {
|
||||
Traces []RoadTrace `json:"-"`
|
||||
|
||||
Traffic map[string]int64 `json:"traffic"`
|
||||
TrafficFrom map[string]int64 `json:"traffic_from"`
|
||||
TotalTraffic int64 `json:"total_traffic"`
|
||||
StartupAt time.Time `json:"startup_at"`
|
||||
}
|
||||
|
||||
type RoadTrace struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Region string `json:"region"`
|
||||
Location string `json:"location"`
|
||||
Destination string `json:"destination"`
|
||||
Uri string `json:"uri"`
|
||||
IpAddress string `json:"ip_address"`
|
||||
UserAgent string `json:"user_agent"`
|
||||
Error RoadTraceError `json:"error"`
|
||||
}
|
||||
|
||||
type RoadTraceError struct {
|
||||
IsNull bool `json:"is_null"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (v *RoadMetrics) AddTrace(trace RoadTrace) {
|
||||
if viper.GetBool("performance.low_memory") {
|
||||
return
|
||||
}
|
||||
|
||||
v.TotalTraffic++
|
||||
trace.Timestamp = time.Now()
|
||||
if _, ok := v.Traffic[trace.Region]; !ok {
|
||||
v.Traffic[trace.Region] = 0
|
||||
} else {
|
||||
v.Traffic[trace.Region]++
|
||||
}
|
||||
if _, ok := v.TrafficFrom[trace.IpAddress]; !ok {
|
||||
v.TrafficFrom[trace.IpAddress] = 0
|
||||
} else {
|
||||
v.TrafficFrom[trace.IpAddress]++
|
||||
}
|
||||
|
||||
v.Traces = append(v.Traces, trace)
|
||||
|
||||
// Garbage recycle
|
||||
if len(v.Traffic) > viper.GetInt("performance.traces_limit") {
|
||||
clear(v.Traffic)
|
||||
}
|
||||
if len(v.TrafficFrom) > viper.GetInt("performance.traces_limit") {
|
||||
clear(v.TrafficFrom)
|
||||
}
|
||||
if len(v.Traces) > viper.GetInt("performance.traces_limit") {
|
||||
clear(v.Traces)
|
||||
}
|
||||
}
|
201
pkg/navi/responder.go
Normal file
201
pkg/navi/responder.go
Normal file
@ -0,0 +1,201 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/proxy"
|
||||
"github.com/gofiber/fiber/v2/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"github.com/valyala/fasthttp"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func makeUnifiedResponse(c *fiber.Ctx, dest *Destination) error {
|
||||
if websocket.FastHTTPIsWebSocketUpgrade(c.Context()) {
|
||||
// Handle websocket
|
||||
return makeWebsocketResponse(c, dest)
|
||||
} else {
|
||||
// TODO Impl SSE with https://github.com/gofiber/recipes/blob/master/sse/main.go
|
||||
// Handle normal http request
|
||||
return makeHypertextResponse(c, dest)
|
||||
}
|
||||
}
|
||||
|
||||
func makeHypertextResponse(c *fiber.Ctx, dest *Destination) error {
|
||||
_, queries := dest.GetRawUri()
|
||||
raw := lo.Ternary(len(queries.Get("timeout")) > 0, queries.Get("timeout"), "5000")
|
||||
num, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
num = 5000
|
||||
}
|
||||
|
||||
limit := time.Duration(num) * time.Millisecond
|
||||
uri := dest.BuildUri(c)
|
||||
return proxy.Do(c, uri, &fasthttp.Client{
|
||||
ReadTimeout: limit,
|
||||
WriteTimeout: limit,
|
||||
})
|
||||
}
|
||||
|
||||
var wsUpgrader = websocket.FastHTTPUpgrader{}
|
||||
|
||||
func makeWebsocketResponse(c *fiber.Ctx, dest *Destination) error {
|
||||
uri := dest.MakeWebsocketUri(c)
|
||||
|
||||
// Upgrade connection
|
||||
return wsUpgrader.Upgrade(c.Context(), func(conn *websocket.Conn) {
|
||||
// Dial the destination
|
||||
remote, _, err := websocket.DefaultDialer.Dial(uri, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer remote.Close()
|
||||
|
||||
// Read messages from remote
|
||||
disconnect := make(chan struct{})
|
||||
signal := make(chan struct {
|
||||
head int
|
||||
data []byte
|
||||
})
|
||||
go func() {
|
||||
defer close(disconnect)
|
||||
for {
|
||||
mode, message, err := remote.ReadMessage()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("An error occurred during the websocket proxying...")
|
||||
return
|
||||
} else {
|
||||
signal <- struct {
|
||||
head int
|
||||
data []byte
|
||||
}{head: mode, data: message}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Relay the destination websocket to client
|
||||
for {
|
||||
select {
|
||||
case <-disconnect:
|
||||
case val := <-signal:
|
||||
if err := conn.WriteMessage(val.head, val.data); err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
if head, data, err := conn.ReadMessage(); err != nil {
|
||||
return
|
||||
} else {
|
||||
remote.WriteMessage(head, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeFileResponse(c *fiber.Ctx, dest *Destination) error {
|
||||
uri, queries := dest.GetRawUri()
|
||||
root := http.Dir(uri)
|
||||
|
||||
method := c.Method()
|
||||
|
||||
// We only serve static assets for GET and HEAD methods
|
||||
if method != fiber.MethodGet && method != fiber.MethodHead {
|
||||
return c.Next()
|
||||
}
|
||||
|
||||
// Strip prefix
|
||||
prefix := c.Route().Path
|
||||
path := strings.TrimPrefix(c.Path(), prefix)
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
|
||||
// Add prefix
|
||||
if queries.Get("prefix") != "" {
|
||||
path = queries.Get("prefix") + path
|
||||
}
|
||||
|
||||
if len(path) > 1 {
|
||||
path = utils.TrimRight(path, '/')
|
||||
}
|
||||
|
||||
file, err := root.Open(path)
|
||||
if err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if queries.Get("suffix") != "" {
|
||||
file, err = root.Open(path + queries.Get("suffix"))
|
||||
}
|
||||
if err != nil && queries.Get("fallback") != "" {
|
||||
file, err = root.Open(queries.Get("fallback"))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return fiber.ErrNotFound
|
||||
}
|
||||
return fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
|
||||
// Serve index if the path is a directory
|
||||
if stat.IsDir() {
|
||||
indexFile := lo.Ternary(len(queries.Get("index")) > 0, queries.Get("index"), "index.html")
|
||||
indexPath := filepath.Join(path, indexFile)
|
||||
index, err := root.Open(indexPath)
|
||||
if err == nil {
|
||||
indexStat, err := index.Stat()
|
||||
if err == nil {
|
||||
file = index
|
||||
stat = indexStat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.Status(fiber.StatusOK)
|
||||
|
||||
modTime := stat.ModTime()
|
||||
contentLength := int(stat.Size())
|
||||
|
||||
// Set Content-Type header
|
||||
if queries.Get("charset") == "" {
|
||||
c.Type(filepath.Ext(stat.Name()))
|
||||
} else {
|
||||
c.Type(filepath.Ext(stat.Name()), queries.Get("charset"))
|
||||
}
|
||||
|
||||
// Set Last-Modified header
|
||||
if !modTime.IsZero() {
|
||||
c.Set(fiber.HeaderLastModified, modTime.UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if method == fiber.MethodGet {
|
||||
maxAge, err := strconv.Atoi(queries.Get("maxAge"))
|
||||
if lo.Ternary(err != nil, maxAge, 0) > 0 {
|
||||
c.Set(fiber.HeaderCacheControl, "public, max-age="+queries.Get("maxAge"))
|
||||
}
|
||||
c.Response().SetBodyStream(file, contentLength)
|
||||
return nil
|
||||
}
|
||||
if method == fiber.MethodHead {
|
||||
c.Request().ResetBody()
|
||||
c.Response().SkipBody = true
|
||||
c.Response().Header.SetContentLength(contentLength)
|
||||
if err := file.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fiber.ErrNotFound
|
||||
}
|
57
pkg/navi/route.go
Normal file
57
pkg/navi/route.go
Normal file
@ -0,0 +1,57 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
roadsign "git.solsynth.dev/goatworks/roadsign/pkg"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi/transformers"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type RoadApp struct {
|
||||
Regions []*Region `json:"regions"`
|
||||
Metrics *RoadMetrics `json:"metrics"`
|
||||
}
|
||||
|
||||
func (v *RoadApp) Forward(c *fiber.Ctx, dest *Destination) error {
|
||||
// Add reserve proxy headers
|
||||
ip := c.IP()
|
||||
scheme := c.Protocol()
|
||||
protocol := string(c.Request().Header.Protocol())
|
||||
c.Request().Header.Set(fiber.HeaderXForwardedFor, ip)
|
||||
c.Request().Header.Set(fiber.HeaderXForwardedHost, ip)
|
||||
c.Request().Header.Set(fiber.HeaderXForwardedProto, scheme)
|
||||
c.Request().Header.Set(
|
||||
fiber.HeaderVia,
|
||||
fmt.Sprintf("%s %s", protocol, viper.GetString("central")),
|
||||
)
|
||||
c.Request().Header.Set(
|
||||
fiber.HeaderForwarded,
|
||||
fmt.Sprintf("by=%s; for=%s; host=%s; proto=%s", c.IP(), c.IP(), c.Get(fiber.HeaderHost), scheme),
|
||||
)
|
||||
|
||||
// Response body
|
||||
var err error
|
||||
switch dest.GetType() {
|
||||
case DestinationHypertext:
|
||||
err = makeUnifiedResponse(c, dest)
|
||||
case DestinationStaticFile:
|
||||
err = makeFileResponse(c, dest)
|
||||
default:
|
||||
err = fiber.ErrBadGateway
|
||||
}
|
||||
|
||||
// Apply helmet
|
||||
if dest.Helmet != nil {
|
||||
dest.Helmet.Apply(c)
|
||||
}
|
||||
|
||||
// Apply watermark
|
||||
c.Response().Header.Set(fiber.HeaderServer, "RoadSign")
|
||||
c.Response().Header.Set(fiber.HeaderXPoweredBy, fmt.Sprintf("RoadSign %s", roadsign.AppVersion))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type RequestTransformerConfig = transformers.TransformerConfig
|
89
pkg/navi/struct.go
Normal file
89
pkg/navi/struct.go
Normal file
@ -0,0 +1,89 @@
|
||||
package navi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi/transformers"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type Region struct {
|
||||
ID string `json:"id" toml:"id"`
|
||||
Disabled bool `json:"disabled" toml:"disabled"`
|
||||
Locations []Location `json:"locations" toml:"locations"`
|
||||
Applications []warden.Application `json:"applications" toml:"applications"`
|
||||
}
|
||||
|
||||
type Location struct {
|
||||
ID string `json:"id" toml:"id"`
|
||||
Hosts []string `json:"hosts" toml:"hosts"`
|
||||
Paths []string `json:"paths" toml:"paths"`
|
||||
Queries map[string]string `json:"queries" toml:"queries"`
|
||||
Headers map[string][]string `json:"headers" toml:"headers"`
|
||||
Destinations []Destination `json:"destinations" toml:"destinations"`
|
||||
}
|
||||
|
||||
type DestinationType = int8
|
||||
|
||||
const (
|
||||
DestinationHypertext = DestinationType(iota)
|
||||
DestinationStaticFile
|
||||
DestinationUnknown
|
||||
)
|
||||
|
||||
type Destination struct {
|
||||
ID string `json:"id" toml:"id"`
|
||||
Uri string `json:"uri" toml:"uri"`
|
||||
Helmet *HelmetConfig `json:"helmet" toml:"helmet"`
|
||||
Transformers []transformers.TransformerConfig `json:"transformers" toml:"transformers"`
|
||||
}
|
||||
|
||||
func (v *Destination) GetProtocol() string {
|
||||
return strings.SplitN(v.Uri, "://", 2)[0]
|
||||
}
|
||||
|
||||
func (v *Destination) GetType() DestinationType {
|
||||
protocol := v.GetProtocol()
|
||||
switch protocol {
|
||||
case "http", "https":
|
||||
return DestinationHypertext
|
||||
case "file", "files":
|
||||
return DestinationStaticFile
|
||||
}
|
||||
return DestinationUnknown
|
||||
}
|
||||
|
||||
func (v *Destination) GetRawUri() (string, url.Values) {
|
||||
uri := strings.SplitN(v.Uri, "://", 2)[1]
|
||||
data := strings.SplitN(uri, "?", 2)
|
||||
data = append(data, " ") // Make the data array at least have two elements
|
||||
qs, _ := url.ParseQuery(data[1])
|
||||
|
||||
return data[0], qs
|
||||
}
|
||||
|
||||
func (v *Destination) BuildUri(ctx *fiber.Ctx) string {
|
||||
var queries []string
|
||||
for k, v := range ctx.Queries() {
|
||||
parsed, _ := url.QueryUnescape(v)
|
||||
value := url.QueryEscape(parsed)
|
||||
queries = append(queries, fmt.Sprintf("%s=%s", k, value))
|
||||
}
|
||||
|
||||
path := string(ctx.Request().URI().Path())
|
||||
hash := string(ctx.Request().URI().Hash())
|
||||
protocol := v.GetProtocol()
|
||||
uri, _ := v.GetRawUri()
|
||||
|
||||
return protocol + "://" + uri + path +
|
||||
lo.Ternary(len(queries) > 0, "?"+strings.Join(queries, "&"), "") +
|
||||
lo.Ternary(len(hash) > 0, "#"+hash, "")
|
||||
}
|
||||
|
||||
func (v *Destination) MakeWebsocketUri(ctx *fiber.Ctx) string {
|
||||
return strings.Replace(v.BuildUri(ctx), "http", "ws", 1)
|
||||
}
|
41
pkg/navi/transformers/compress.go
Normal file
41
pkg/navi/transformers/compress.go
Normal file
@ -0,0 +1,41 @@
|
||||
package transformers
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
var CompressResponse = Transformer{
|
||||
ModifyResponse: func(options any, ctx *fiber.Ctx) error {
|
||||
opts := DeserializeOptions[struct {
|
||||
Level int `json:"level" toml:"level"`
|
||||
}](options)
|
||||
|
||||
fctx := func(c *fasthttp.RequestCtx) {}
|
||||
var compressor fasthttp.RequestHandler
|
||||
switch opts.Level {
|
||||
// Best Speed Mode
|
||||
case 1:
|
||||
compressor = fasthttp.CompressHandlerBrotliLevel(fctx,
|
||||
fasthttp.CompressBrotliBestSpeed,
|
||||
fasthttp.CompressBestSpeed,
|
||||
)
|
||||
// Best Compression Mode
|
||||
case 2:
|
||||
compressor = fasthttp.CompressHandlerBrotliLevel(fctx,
|
||||
fasthttp.CompressBrotliBestCompression,
|
||||
fasthttp.CompressBestCompression,
|
||||
)
|
||||
// Default Mode
|
||||
default:
|
||||
compressor = fasthttp.CompressHandlerBrotliLevel(fctx,
|
||||
fasthttp.CompressBrotliDefaultCompression,
|
||||
fasthttp.CompressDefaultCompression,
|
||||
)
|
||||
}
|
||||
|
||||
compressor(ctx.Context())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
61
pkg/navi/transformers/module.go
Normal file
61
pkg/navi/transformers/module.go
Normal file
@ -0,0 +1,61 @@
|
||||
package transformers
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// Definitions
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
type Transformer struct {
|
||||
ModifyRequest func(options any, ctx *fiber.Ctx) error
|
||||
ModifyResponse func(options any, ctx *fiber.Ctx) error
|
||||
}
|
||||
|
||||
type TransformerConfig struct {
|
||||
Type string `json:"type" toml:"type"`
|
||||
Options any `json:"options" toml:"options"`
|
||||
}
|
||||
|
||||
func (v *TransformerConfig) TransformRequest(ctx *fiber.Ctx) error {
|
||||
for k, f := range Transformers {
|
||||
if k == v.Type {
|
||||
if f.ModifyRequest != nil {
|
||||
return f.ModifyRequest(v.Options, ctx)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *TransformerConfig) TransformResponse(ctx *fiber.Ctx) error {
|
||||
for k, f := range Transformers {
|
||||
if k == v.Type {
|
||||
if f.ModifyResponse != nil {
|
||||
return f.ModifyResponse(v.Options, ctx)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helpers
|
||||
|
||||
func DeserializeOptions[T any](data any) T {
|
||||
var out T
|
||||
raw, _ := json.Marshal(data)
|
||||
_ = json.Unmarshal(raw, &out)
|
||||
return out
|
||||
}
|
||||
|
||||
// Map of Transformers
|
||||
// Every transformer need to be mapped here so that they can get work.
|
||||
|
||||
var Transformers = map[string]Transformer{
|
||||
"replacePath": ReplacePath,
|
||||
"compressResponse": CompressResponse,
|
||||
}
|
26
pkg/navi/transformers/replace_path.go
Normal file
26
pkg/navi/transformers/replace_path.go
Normal file
@ -0,0 +1,26 @@
|
||||
package transformers
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
var ReplacePath = Transformer{
|
||||
ModifyRequest: func(options any, ctx *fiber.Ctx) error {
|
||||
opts := DeserializeOptions[struct {
|
||||
Pattern string `json:"pattern" toml:"pattern"`
|
||||
Value string `json:"value" toml:"value"`
|
||||
Repl string `json:"repl" toml:"repl"` // Use when complex mode(regexp) enabled
|
||||
Complex bool `json:"complex" toml:"complex"`
|
||||
}](options)
|
||||
path := string(ctx.Request().URI().Path())
|
||||
if !opts.Complex {
|
||||
ctx.Path(strings.ReplaceAll(path, opts.Pattern, opts.Value))
|
||||
} else if ex := regexp.MustCompile(opts.Pattern); ex != nil {
|
||||
ctx.Path(ex.ReplaceAllString(path, opts.Repl))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
19
pkg/navi/warden.go
Normal file
19
pkg/navi/warden.go
Normal file
@ -0,0 +1,19 @@
|
||||
package navi
|
||||
|
||||
import "git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
|
||||
func InitializeWarden(regions []*Region) {
|
||||
pool := make([]*warden.AppInstance, 0)
|
||||
|
||||
for _, region := range regions {
|
||||
for _, application := range region.Applications {
|
||||
pool = append(pool, &warden.AppInstance{
|
||||
Manifest: application,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Hot swap
|
||||
warden.InstancePool = pool
|
||||
warden.StartPool()
|
||||
}
|
73
pkg/sideload/applications.go
Normal file
73
pkg/sideload/applications.go
Normal file
@ -0,0 +1,73 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func getApplications(c *fiber.Ctx) error {
|
||||
applications := lo.FlatMap(navi.R.Regions, func(item *navi.Region, idx int) []warden.ApplicationInfo {
|
||||
return lo.Map(item.Applications, func(item warden.Application, index int) warden.ApplicationInfo {
|
||||
return warden.ApplicationInfo{
|
||||
Application: item,
|
||||
Status: warden.GetFromPool(item.ID).Status,
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
return c.JSON(applications)
|
||||
}
|
||||
|
||||
func getApplicationLogs(c *fiber.Ctx) error {
|
||||
if instance, ok := lo.Find(warden.InstancePool, func(item *warden.AppInstance) bool {
|
||||
return item.Manifest.ID == c.Params("id")
|
||||
}); !ok {
|
||||
return fiber.NewError(fiber.StatusNotFound)
|
||||
} else {
|
||||
return c.SendString(instance.Logs())
|
||||
}
|
||||
}
|
||||
|
||||
func letApplicationStart(c *fiber.Ctx) error {
|
||||
if instance, ok := lo.Find(warden.InstancePool, func(item *warden.AppInstance) bool {
|
||||
return item.Manifest.ID == c.Params("id")
|
||||
}); !ok {
|
||||
return fiber.NewError(fiber.StatusNotFound)
|
||||
} else {
|
||||
if err := instance.Wake(); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
|
||||
}
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func letApplicationStop(c *fiber.Ctx) error {
|
||||
if instance, ok := lo.Find(warden.InstancePool, func(item *warden.AppInstance) bool {
|
||||
return item.Manifest.ID == c.Params("id")
|
||||
}); !ok {
|
||||
return fiber.NewError(fiber.StatusNotFound)
|
||||
} else {
|
||||
if err := instance.Stop(); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
|
||||
}
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func letApplicationRestart(c *fiber.Ctx) error {
|
||||
if instance, ok := lo.Find(warden.InstancePool, func(item *warden.AppInstance) bool {
|
||||
return item.Manifest.ID == c.Params("id")
|
||||
}); !ok {
|
||||
return fiber.NewError(fiber.StatusNotFound)
|
||||
} else {
|
||||
if err := instance.Stop(); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
|
||||
}
|
||||
if err := instance.Start(); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
|
||||
}
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
||||
}
|
13
pkg/sideload/metadata.go
Normal file
13
pkg/sideload/metadata.go
Normal file
@ -0,0 +1,13 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
roadsign "git.solsynth.dev/goatworks/roadsign/pkg"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func getMetadata(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusOK).JSON(fiber.Map{
|
||||
"server": "RoadSign",
|
||||
"version": roadsign.AppVersion,
|
||||
})
|
||||
}
|
14
pkg/sideload/metrics.go
Normal file
14
pkg/sideload/metrics.go
Normal file
@ -0,0 +1,14 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func getTraffic(c *fiber.Ctx) error {
|
||||
return c.JSON(navi.R.Metrics)
|
||||
}
|
||||
|
||||
func getTraces(c *fiber.Ctx) error {
|
||||
return c.JSON(navi.R.Metrics.Traces)
|
||||
}
|
107
pkg/sideload/publish.go
Normal file
107
pkg/sideload/publish.go
Normal file
@ -0,0 +1,107 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
"github.com/rs/zerolog/log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/samber/lo"
|
||||
"github.com/saracen/fastzip"
|
||||
)
|
||||
|
||||
func doPublish(c *fiber.Ctx) error {
|
||||
var workdir string
|
||||
var destination *navi.Destination
|
||||
var application *warden.Application
|
||||
for _, item := range navi.R.Regions {
|
||||
if item.ID == c.Params("site") {
|
||||
for _, location := range item.Locations {
|
||||
for _, dest := range location.Destinations {
|
||||
if dest.ID == c.Params("slug") {
|
||||
destination = &dest
|
||||
workdir, _ = dest.GetRawUri()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, app := range item.Applications {
|
||||
if app.ID == c.Params("slug") {
|
||||
application = &app
|
||||
workdir = app.Workdir
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var instance *warden.AppInstance
|
||||
if application != nil {
|
||||
if instance = warden.GetFromPool(application.ID); instance != nil {
|
||||
if err := instance.Stop(); err != nil {
|
||||
log.Warn().Err(err).Str("id", application.ID).Msg("Failed to stop application when publishing...")
|
||||
}
|
||||
}
|
||||
} else if destination != nil && destination.GetType() != navi.DestinationStaticFile {
|
||||
return fiber.ErrUnprocessableEntity
|
||||
} else if destination == nil {
|
||||
return fiber.ErrNotFound
|
||||
}
|
||||
|
||||
if c.QueryBool("overwrite", true) {
|
||||
files, _ := filepath.Glob(filepath.Join(workdir, "*"))
|
||||
for _, file := range files {
|
||||
_ = os.Remove(file)
|
||||
}
|
||||
}
|
||||
|
||||
if form, err := c.MultipartForm(); err == nil {
|
||||
files := form.File["attachments"]
|
||||
for _, file := range files {
|
||||
mimetype := lo.Ternary(len(c.Query("mimetype")) > 0, c.Query("mimetype"), file.Header["Content-Type"][0])
|
||||
switch mimetype {
|
||||
case "application/zip":
|
||||
dst := filepath.Join(os.TempDir(), uuid.NewString()+".zip")
|
||||
if err := c.SaveFile(file, dst); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if ex, err := fastzip.NewExtractor(dst, workdir); err != nil {
|
||||
return err
|
||||
} else if err = ex.Extract(context.Background()); err != nil {
|
||||
defer ex.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
_ = os.Remove(dst)
|
||||
default:
|
||||
dst := filepath.Join(workdir, file.Filename)
|
||||
if err := c.SaveFile(file, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if postScript := c.FormValue("post-deploy-script", ""); len(postScript) > 0 {
|
||||
cmd := exec.Command("sh", "-c", postScript)
|
||||
cmd.Dir = filepath.Join(workdir)
|
||||
cmd.Env = append(cmd.Env, strings.Split(c.FormValue("post-deploy-environment", ""), "\n")...)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, fmt.Sprintf("post deploy script runs failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if instance != nil {
|
||||
_ = instance.Wake()
|
||||
}
|
||||
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
84
pkg/sideload/regions.go
Normal file
84
pkg/sideload/regions.go
Normal file
@ -0,0 +1,84 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func getRegions(c *fiber.Ctx) error {
|
||||
return c.JSON(navi.R.Regions)
|
||||
}
|
||||
|
||||
func getRegionConfig(c *fiber.Ctx) error {
|
||||
fp := filepath.Join(viper.GetString("paths.configs"), c.Params("id"))
|
||||
|
||||
var err error
|
||||
var data []byte
|
||||
if data, err = os.ReadFile(fp + ".toml"); err != nil {
|
||||
return fiber.NewError(fiber.StatusNotFound, err.Error())
|
||||
}
|
||||
|
||||
return c.Type("toml").SendString(string(data))
|
||||
}
|
||||
|
||||
func doSync(c *fiber.Ctx) error {
|
||||
req := string(c.Body())
|
||||
|
||||
id := c.Params("slug")
|
||||
path := filepath.Join(viper.GetString("paths.configs"), fmt.Sprintf("%s.toml", id))
|
||||
|
||||
if file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755); err != nil {
|
||||
return fiber.NewError(fiber.ErrInternalServerError.Code, err.Error())
|
||||
} else {
|
||||
var testOut map[string]any
|
||||
if err := toml.Unmarshal([]byte(req), &testOut); err != nil {
|
||||
return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("invalid configuration: %v", err))
|
||||
}
|
||||
_, _ = file.Write([]byte(req))
|
||||
defer file.Close()
|
||||
}
|
||||
|
||||
var stopQueue, startQueue []*warden.AppInstance
|
||||
// Getting things need to stop
|
||||
if region, ok := lo.Find(navi.R.Regions, func(item *navi.Region) bool {
|
||||
return item.ID == id
|
||||
}); ok {
|
||||
for _, application := range region.Applications {
|
||||
if instance := warden.GetFromPool(application.ID); instance != nil {
|
||||
stopQueue = append(stopQueue, instance)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reload
|
||||
_ = navi.ReadInConfig(viper.GetString("paths.configs"))
|
||||
|
||||
// Getting things need to start
|
||||
if region, ok := lo.Find(navi.R.Regions, func(item *navi.Region) bool {
|
||||
return item.ID == id
|
||||
}); ok {
|
||||
for _, application := range region.Applications {
|
||||
if instance := warden.GetFromPool(application.ID); instance != nil {
|
||||
startQueue = append(startQueue, instance)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reboot
|
||||
for _, instance := range stopQueue {
|
||||
_ = instance.Stop()
|
||||
}
|
||||
for _, instance := range startQueue {
|
||||
_ = instance.Wake()
|
||||
}
|
||||
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
18
pkg/sideload/reload.go
Normal file
18
pkg/sideload/reload.go
Normal file
@ -0,0 +1,18 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func doReload(c *fiber.Ctx) error {
|
||||
if err := navi.ReadInConfig(viper.GetString("paths.configs")); err != nil {
|
||||
return fiber.NewError(fiber.StatusInternalServerError, err.Error())
|
||||
}
|
||||
if c.QueryBool("warden", false) {
|
||||
navi.InitializeWarden(navi.R.Regions)
|
||||
}
|
||||
|
||||
return c.SendStatus(fiber.StatusOK)
|
||||
}
|
66
pkg/sideload/server.go
Normal file
66
pkg/sideload/server.go
Normal file
@ -0,0 +1,66 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
roadsign "git.solsynth.dev/goatworks/roadsign/pkg"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/basicauth"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func InitSideload() *fiber.App {
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "RoadSign Sideload",
|
||||
ServerHeader: "RoadSign Sideload",
|
||||
DisableStartupMessage: true,
|
||||
EnableIPValidation: true,
|
||||
JSONDecoder: jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal,
|
||||
JSONEncoder: jsoniter.ConfigCompatibleWithStandardLibrary.Marshal,
|
||||
ProxyHeader: fiber.HeaderXForwardedFor,
|
||||
EnablePrintRoutes: viper.GetBool("debug.print_routes"),
|
||||
TrustedProxies: viper.GetStringSlice("sideload.trusted_proxies"),
|
||||
BodyLimit: viper.GetInt("hypertext.limitation.max_body_size"),
|
||||
})
|
||||
|
||||
if viper.GetBool("telemetry.request_logging") {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Output: log.Logger,
|
||||
Format: "[Sideload] [${time}] ${status} - ${latency} ${method} ${path}\n",
|
||||
}))
|
||||
}
|
||||
|
||||
app.Use(basicauth.New(basicauth.Config{
|
||||
Realm: fmt.Sprintf("RoadSign v%s", roadsign.AppVersion),
|
||||
Authorizer: func(_, password string) bool {
|
||||
return password == viper.GetString("security.credential")
|
||||
},
|
||||
}))
|
||||
|
||||
cgi := app.Group("/cgi").Name("CGI")
|
||||
{
|
||||
cgi.Get("/metadata", getMetadata)
|
||||
cgi.Get("/traffic", getTraffic)
|
||||
cgi.Get("/traces", getTraces)
|
||||
cgi.Get("/stats", getStats)
|
||||
cgi.Get("/regions", getRegions)
|
||||
cgi.Get("/regions/cfg/:id", getRegionConfig)
|
||||
cgi.Get("/applications", getApplications)
|
||||
cgi.Get("/applications/:id/logs", getApplicationLogs)
|
||||
cgi.Post("/applications/:id/start", letApplicationStart)
|
||||
cgi.Post("/applications/:id/stop", letApplicationStop)
|
||||
cgi.Post("/applications/:id/restart", letApplicationRestart)
|
||||
|
||||
cgi.Post("/reload", doReload)
|
||||
}
|
||||
|
||||
webhooks := app.Group("/webhooks").Name("WebHooks")
|
||||
{
|
||||
webhooks.Put("/publish/:site/:slug", doPublish)
|
||||
webhooks.Put("/sync/:slug", doSync)
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
33
pkg/sideload/statistics.go
Normal file
33
pkg/sideload/statistics.go
Normal file
@ -0,0 +1,33 @@
|
||||
package sideload
|
||||
|
||||
import (
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/navi"
|
||||
"git.solsynth.dev/goatworks/roadsign/pkg/warden"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getStats(c *fiber.Ctx) error {
|
||||
locations := lo.FlatMap(navi.R.Regions, func(item *navi.Region, idx int) []navi.Location {
|
||||
return item.Locations
|
||||
})
|
||||
destinations := lo.FlatMap(locations, func(item navi.Location, idx int) []navi.Destination {
|
||||
return item.Destinations
|
||||
})
|
||||
applications := lo.FlatMap(navi.R.Regions, func(item *navi.Region, idx int) []warden.Application {
|
||||
return item.Applications
|
||||
})
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"regions": len(navi.R.Regions),
|
||||
"locations": len(locations),
|
||||
"destinations": len(destinations),
|
||||
"applications": len(applications),
|
||||
"uptime": time.Since(navi.R.Metrics.StartupAt).Milliseconds(),
|
||||
"traffic": fiber.Map{
|
||||
"total": navi.R.Metrics.TotalTraffic,
|
||||
"unique_client": len(navi.R.Metrics.TrafficFrom),
|
||||
},
|
||||
})
|
||||
}
|
125
pkg/warden/executor.go
Normal file
125
pkg/warden/executor.go
Normal file
@ -0,0 +1,125 @@
|
||||
package warden
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog/log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
var InstancePool []*AppInstance
|
||||
|
||||
func GetFromPool(id string) *AppInstance {
|
||||
val, ok := lo.Find(InstancePool, func(item *AppInstance) bool {
|
||||
return item.Manifest.ID == id
|
||||
})
|
||||
return lo.Ternary(ok, val, nil)
|
||||
}
|
||||
|
||||
func StartPool() []error {
|
||||
var errors []error
|
||||
for _, instance := range InstancePool {
|
||||
if err := instance.Wake(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
type AppStatus = int8
|
||||
|
||||
const (
|
||||
AppCreated = AppStatus(iota)
|
||||
AppStarting
|
||||
AppStarted
|
||||
AppExited
|
||||
AppFailure
|
||||
)
|
||||
|
||||
type AppInstance struct {
|
||||
Manifest Application `json:"manifest"`
|
||||
|
||||
Cmd *exec.Cmd `json:"-"`
|
||||
Logger strings.Builder `json:"-"`
|
||||
|
||||
Status AppStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (v *AppInstance) Wake() error {
|
||||
if v.Cmd != nil {
|
||||
return nil
|
||||
}
|
||||
if v.Cmd == nil {
|
||||
return v.Start()
|
||||
}
|
||||
if v.Cmd.Process == nil || v.Cmd.ProcessState == nil {
|
||||
return v.Start()
|
||||
}
|
||||
if v.Cmd.ProcessState.Exited() {
|
||||
return v.Start()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *AppInstance) Start() error {
|
||||
manifest := v.Manifest
|
||||
|
||||
if len(manifest.Command) <= 0 {
|
||||
return fmt.Errorf("you need set the command for %s to enable process manager", manifest.ID)
|
||||
}
|
||||
|
||||
v.Cmd = exec.Command(manifest.Command[0], manifest.Command[1:]...)
|
||||
v.Cmd.Dir = filepath.Join(manifest.Workdir)
|
||||
v.Cmd.Env = append(v.Cmd.Env, manifest.Environment...)
|
||||
v.Cmd.Stdout = &v.Logger
|
||||
v.Cmd.Stderr = &v.Logger
|
||||
|
||||
// Monitor
|
||||
go func() {
|
||||
for {
|
||||
if v.Cmd != nil && v.Cmd.Process == nil {
|
||||
v.Status = AppStarting
|
||||
} else if v.Cmd != nil && v.Cmd.ProcessState == nil {
|
||||
v.Status = AppStarted
|
||||
} else {
|
||||
v.Status = AppFailure
|
||||
v.Cmd = nil
|
||||
return
|
||||
}
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
return v.Cmd.Start()
|
||||
}
|
||||
|
||||
func (v *AppInstance) Stop() error {
|
||||
if v.Cmd != nil && v.Cmd.Process != nil {
|
||||
if err := v.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
log.Warn().Int("pid", v.Cmd.Process.Pid).Err(err).Msgf("Failed to send SIGTERM to process...")
|
||||
if err = v.Cmd.Process.Kill(); err != nil {
|
||||
log.Error().Int("pid", v.Cmd.Process.Pid).Err(err).Msgf("Failed to kill process...")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We need to wait for the process to exit
|
||||
// The wait syscall will read the exit status of the process
|
||||
// So that we don't produce defunct processes
|
||||
// Refer to https://stackoverflow.com/questions/46293435/golang-exec-command-cause-a-lot-of-defunct-processes
|
||||
_ = v.Cmd.Wait()
|
||||
|
||||
v.Cmd = nil
|
||||
v.Status = AppExited
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *AppInstance) Logs() string {
|
||||
return v.Logger.String()
|
||||
}
|
13
pkg/warden/manifest.go
Normal file
13
pkg/warden/manifest.go
Normal file
@ -0,0 +1,13 @@
|
||||
package warden
|
||||
|
||||
type Application struct {
|
||||
ID string `json:"id" toml:"id"`
|
||||
Workdir string `json:"workdir" toml:"workdir"`
|
||||
Command []string `json:"command" toml:"command"`
|
||||
Environment []string `json:"environment" toml:"environment"`
|
||||
}
|
||||
|
||||
type ApplicationInfo struct {
|
||||
Application
|
||||
Status AppStatus `json:"status"`
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Hello, World!</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Hello, there!</p>
|
||||
<p>Here's the roadsign benchmarking test data!</p>
|
||||
</body>
|
||||
</html>
|
@ -1,21 +0,0 @@
|
||||
id = "index"
|
||||
|
||||
[[locations]]
|
||||
id = "root"
|
||||
hosts = ["localhost"]
|
||||
paths = ["/"]
|
||||
[[locations.destinations]]
|
||||
id = "websocket"
|
||||
uri = "http://localhost:8765"
|
||||
# [[locations.destinations]]
|
||||
# id = "hypertext"
|
||||
# uri = "https://example.com"
|
||||
# [[locations.destinations]]
|
||||
# id = "static"
|
||||
# uri = "files://regions?index=index.html"
|
||||
|
||||
|
||||
# [[applications]]
|
||||
# id = "script"
|
||||
# exe = "./script.sh"
|
||||
# workdir = "regions"
|
@ -1 +0,0 @@
|
||||
Ko Ko Da Yo~
|
@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Good morning!" > ./kokodayo.txt
|
@ -1,15 +0,0 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Hello, World!</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Hello, there!</p>
|
||||
<p>
|
||||
Here's the roadsign benchmarking test data! And you are in the subfolder
|
||||
now!
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
37
settings.toml
Normal file
37
settings.toml
Normal file
@ -0,0 +1,37 @@
|
||||
id = "central"
|
||||
|
||||
[debug]
|
||||
print_routes = false
|
||||
|
||||
[sideload]
|
||||
ports = [":81"]
|
||||
secured_ports = []
|
||||
trusted_proxies = ["localhost"]
|
||||
|
||||
[hypertext]
|
||||
ports = [":8000"]
|
||||
secured_ports = []
|
||||
force_https = false
|
||||
|
||||
# [[hypertext.certificate]]
|
||||
# key = "./certs/privkey.pem"
|
||||
# pem = "./certs/fullchain.pem"
|
||||
|
||||
[hypertext.limitation]
|
||||
max_body_size = 549_755_813_888 # 512 GiB
|
||||
max_qps = -1
|
||||
|
||||
[paths]
|
||||
configs = "./config"
|
||||
|
||||
[telemetry]
|
||||
request_logging = true
|
||||
capture_traces = true
|
||||
|
||||
[performance]
|
||||
low_memory = true
|
||||
traces_limit = 256
|
||||
prefork = false
|
||||
|
||||
[security]
|
||||
credential = "e81f43f32d934271af6322e5376f5f59"
|
@ -1,10 +0,0 @@
|
||||
use config::Config;
|
||||
|
||||
pub fn load_settings() -> Config {
|
||||
Config::builder()
|
||||
.add_source(config::File::with_name("Settings"))
|
||||
.add_source(config::File::with_name("/Settings"))
|
||||
.add_source(config::Environment::with_prefix("ROADSIGN"))
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
use config::Config;
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::loader::load_settings;
|
||||
|
||||
pub mod loader;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CFG: RwLock<Config> = RwLock::new(load_settings());
|
||||
}
|
69
src/main.rs
69
src/main.rs
@ -1,69 +0,0 @@
|
||||
extern crate core;
|
||||
|
||||
mod config;
|
||||
mod proxies;
|
||||
mod sideload;
|
||||
mod warden;
|
||||
mod server;
|
||||
pub mod tls;
|
||||
|
||||
use std::error;
|
||||
use lazy_static::lazy_static;
|
||||
use proxies::RoadInstance;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinSet;
|
||||
use tracing::{error, info, Level};
|
||||
use crate::proxies::server::build_proxies;
|
||||
use crate::sideload::server::build_sideload;
|
||||
|
||||
lazy_static! {
|
||||
static ref ROAD: Mutex<RoadInstance> = Mutex::new(RoadInstance::new());
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
// Setting up logging
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(Level::DEBUG)
|
||||
.init();
|
||||
|
||||
// Prepare all the stuff
|
||||
info!("Loading proxy regions...");
|
||||
match proxies::loader::scan_regions(
|
||||
config::CFG
|
||||
.read()
|
||||
.await
|
||||
.get_string("regions")?
|
||||
) {
|
||||
Err(_) => error!("Loading proxy regions... failed"),
|
||||
Ok((regions, count)) => {
|
||||
ROAD.lock().await.regions = regions;
|
||||
info!(count, "Loading proxy regions... done")
|
||||
}
|
||||
};
|
||||
|
||||
let mut server_set = JoinSet::new();
|
||||
|
||||
// Proxies
|
||||
for server in build_proxies().await? {
|
||||
server_set.spawn(server);
|
||||
}
|
||||
|
||||
// Sideload
|
||||
server_set.spawn(build_sideload().await?);
|
||||
|
||||
// Process manager
|
||||
{
|
||||
let mut app = ROAD.lock().await;
|
||||
{
|
||||
let reg = app.regions.clone();
|
||||
app.warden.scan(reg);
|
||||
}
|
||||
app.warden.start().await;
|
||||
}
|
||||
|
||||
// Wait for web servers
|
||||
server_set.join_next().await;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,117 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use queryst::parse;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::warden::Application;
|
||||
|
||||
use super::responder::StaticResponderConfig;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Region {
|
||||
pub id: String,
|
||||
pub locations: Vec<Location>,
|
||||
pub applications: Option<Vec<Application>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Location {
|
||||
pub id: String,
|
||||
pub hosts: Vec<String>,
|
||||
pub paths: Vec<String>,
|
||||
pub headers: Option<HashMap<String, String>>,
|
||||
pub queries: Option<Vec<String>>,
|
||||
pub methods: Option<Vec<String>>,
|
||||
pub destinations: Vec<Destination>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Destination {
|
||||
pub id: String,
|
||||
pub uri: String,
|
||||
pub timeout: Option<u32>,
|
||||
pub weight: Option<u32>,
|
||||
}
|
||||
|
||||
pub enum DestinationType {
|
||||
Hypertext,
|
||||
StaticFiles,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl Destination {
|
||||
pub fn get_type(&self) -> DestinationType {
|
||||
match self.get_protocol() {
|
||||
"http" | "https" => DestinationType::Hypertext,
|
||||
"file" | "files" => DestinationType::StaticFiles,
|
||||
_ => DestinationType::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_protocol(&self) -> &str {
|
||||
self.uri.as_str().splitn(2, "://").collect::<Vec<_>>()[0]
|
||||
}
|
||||
|
||||
pub fn get_queries(&self) -> &str {
|
||||
self.uri
|
||||
.as_str()
|
||||
.splitn(2, '?')
|
||||
.collect::<Vec<_>>()
|
||||
.get(1)
|
||||
.unwrap_or(&"")
|
||||
}
|
||||
|
||||
pub fn get_host(&self) -> &str {
|
||||
self
|
||||
.uri
|
||||
.as_str()
|
||||
.splitn(2, "://")
|
||||
.collect::<Vec<_>>()
|
||||
.get(1)
|
||||
.unwrap_or(&"")
|
||||
.splitn(2, '?')
|
||||
.collect::<Vec<_>>()[0]
|
||||
}
|
||||
|
||||
pub fn get_hypertext_uri(&self) -> Result<String, ()> {
|
||||
match self.get_protocol() {
|
||||
"http" => Ok("http://".to_string() + self.get_host()),
|
||||
"https" => Ok("https://".to_string() + self.get_host()),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_static_config(&self) -> Result<StaticResponderConfig, ()> {
|
||||
match self.get_protocol() {
|
||||
"file" | "files" => {
|
||||
let queries = parse(self.get_queries()).unwrap_or(json!({}));
|
||||
Ok(StaticResponderConfig {
|
||||
uri: self.get_host().to_string(),
|
||||
utf8: queries
|
||||
.get("utf8")
|
||||
.and_then(|val| val.as_bool())
|
||||
.unwrap_or(false),
|
||||
browse: queries
|
||||
.get("browse")
|
||||
.and_then(|val| val.as_bool())
|
||||
.unwrap_or(false),
|
||||
with_slash: queries
|
||||
.get("slash")
|
||||
.and_then(|val| val.as_bool())
|
||||
.unwrap_or(false),
|
||||
index: queries
|
||||
.get("index")
|
||||
.and_then(|val| val.as_str().map(str::to_string)),
|
||||
fallback: queries
|
||||
.get("fallback")
|
||||
.and_then(|val| val.as_str().map(str::to_string)),
|
||||
suffix: queries
|
||||
.get("suffix")
|
||||
.and_then(|val| val.as_str().map(str::to_string)),
|
||||
})
|
||||
}
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self, DirEntry};
|
||||
use std::io;
|
||||
|
||||
use tracing::warn;
|
||||
|
||||
use crate::proxies::config;
|
||||
|
||||
pub fn scan_regions(basepath: String) -> io::Result<(Vec<config::Region>, u32)> {
|
||||
let mut count: u32 = 0;
|
||||
let mut result = vec![];
|
||||
for entry in fs::read_dir(basepath)? {
|
||||
if let Ok(val) = load_region(entry.unwrap()) {
|
||||
result.push(val);
|
||||
count += 1;
|
||||
};
|
||||
}
|
||||
|
||||
Ok((result, count))
|
||||
}
|
||||
|
||||
pub fn load_region(file: DirEntry) -> Result<config::Region, String> {
|
||||
if file.metadata().map(|val| val.is_dir()).unwrap()
|
||||
|| file.path().extension().and_then(OsStr::to_str).unwrap() != "toml"
|
||||
{
|
||||
return Err("File entry wasn't toml file".to_string());
|
||||
}
|
||||
|
||||
let fp = file.path();
|
||||
let content = match fs::read_to_string(fp.clone()) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
err = format!("{:?}", err),
|
||||
filepath = fp.clone().to_str(),
|
||||
"An error occurred when loading region, skipped."
|
||||
);
|
||||
return Err("Failed to load file".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
let data: config::Region = match toml::from_str(&content) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
err = format!("{:?}", err),
|
||||
filepath = fp.clone().to_str(),
|
||||
"An error occurred when parsing region, skipped."
|
||||
);
|
||||
return Err("Failed to parse file".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(data)
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::config::{Destination, Location, Region};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct RoadTrace {
|
||||
pub region: String,
|
||||
pub location: String,
|
||||
pub destination: String,
|
||||
pub ip_address: String,
|
||||
pub user_agent: String,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl RoadTrace {
|
||||
pub fn from_structs(
|
||||
ip: String,
|
||||
ua: String,
|
||||
reg: Region,
|
||||
loc: Location,
|
||||
end: Destination,
|
||||
) -> RoadTrace {
|
||||
RoadTrace {
|
||||
ip_address: ip,
|
||||
user_agent: ua,
|
||||
region: reg.id,
|
||||
location: loc.id,
|
||||
destination: end.id,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_structs_with_error(
|
||||
ip: String,
|
||||
ua: String,
|
||||
reg: Region,
|
||||
loc: Location,
|
||||
end: Destination,
|
||||
err: String,
|
||||
) -> RoadTrace {
|
||||
let mut trace = Self::from_structs(ip, ua, reg, loc, end);
|
||||
trace.error = Some(err);
|
||||
trace
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RoadMetrics {
|
||||
pub requests_count: u64,
|
||||
pub failures_count: u64,
|
||||
|
||||
pub recent_successes: VecDeque<RoadTrace>,
|
||||
pub recent_errors: VecDeque<RoadTrace>,
|
||||
}
|
||||
|
||||
const MAX_TRACE_COUNT: usize = 32;
|
||||
|
||||
impl RoadMetrics {
|
||||
pub fn new() -> RoadMetrics {
|
||||
RoadMetrics {
|
||||
requests_count: 0,
|
||||
failures_count: 0,
|
||||
recent_successes: VecDeque::new(),
|
||||
recent_errors: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
if self.requests_count > 0 {
|
||||
(self.requests_count - self.failures_count) as f64 / self.requests_count as f64
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_success_request(
|
||||
&mut self,
|
||||
ip: String,
|
||||
ua: String,
|
||||
reg: Region,
|
||||
loc: Location,
|
||||
end: Destination,
|
||||
) {
|
||||
self.requests_count += 1;
|
||||
self.recent_successes
|
||||
.push_back(RoadTrace::from_structs(ip, ua, reg, loc, end));
|
||||
if self.recent_successes.len() > MAX_TRACE_COUNT {
|
||||
self.recent_successes.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_failure_request(
|
||||
&mut self,
|
||||
ip: String,
|
||||
ua: String,
|
||||
reg: Region,
|
||||
loc: Location,
|
||||
end: Destination,
|
||||
err: String, // For some reason error is rarely cloneable, so we use preformatted message
|
||||
) {
|
||||
self.requests_count += 1;
|
||||
self.failures_count += 1;
|
||||
self.recent_errors
|
||||
.push_back(RoadTrace::from_structs_with_error(ip, ua, reg, loc, end, err));
|
||||
if self.recent_errors.len() > MAX_TRACE_COUNT {
|
||||
self.recent_errors.pop_front();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
use actix_web::http::header::{ContentType, HeaderMap};
|
||||
use actix_web::http::{Method, StatusCode, Uri};
|
||||
use regex::Regex;
|
||||
use wildmatch::WildMatch;
|
||||
use actix_web::{error, HttpResponse};
|
||||
use derive_more::{Display};
|
||||
|
||||
use crate::warden::WardenInstance;
|
||||
|
||||
use self::{
|
||||
config::{Location, Region},
|
||||
metrics::RoadMetrics,
|
||||
};
|
||||
|
||||
pub mod config;
|
||||
pub mod loader;
|
||||
pub mod metrics;
|
||||
pub mod responder;
|
||||
pub mod route;
|
||||
pub mod server;
|
||||
|
||||
#[derive(Debug, Display)]
|
||||
pub enum ProxyError {
|
||||
#[display(fmt = "Upgrade required for this connection")]
|
||||
UpgradeRequired,
|
||||
|
||||
#[display(fmt = "Remote gateway issue")]
|
||||
BadGateway,
|
||||
|
||||
#[display(fmt = "No configured able to process this request")]
|
||||
NoGateway,
|
||||
|
||||
#[display(fmt = "Not found")]
|
||||
NotFound,
|
||||
|
||||
#[display(fmt = "Only accepts method GET")]
|
||||
MethodGetOnly,
|
||||
|
||||
#[display(fmt = "Invalid request path")]
|
||||
InvalidRequestPath,
|
||||
|
||||
#[display(fmt = "Upstream does not support protocol you used")]
|
||||
NotImplemented,
|
||||
}
|
||||
|
||||
impl error::ResponseError for ProxyError {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match *self {
|
||||
ProxyError::UpgradeRequired => StatusCode::UPGRADE_REQUIRED,
|
||||
ProxyError::BadGateway => StatusCode::BAD_GATEWAY,
|
||||
ProxyError::NoGateway => StatusCode::NOT_FOUND,
|
||||
ProxyError::NotFound => StatusCode::NOT_FOUND,
|
||||
ProxyError::MethodGetOnly => StatusCode::METHOD_NOT_ALLOWED,
|
||||
ProxyError::InvalidRequestPath => StatusCode::BAD_REQUEST,
|
||||
ProxyError::NotImplemented => StatusCode::NOT_IMPLEMENTED,
|
||||
}
|
||||
}
|
||||
|
||||
fn error_response(&self) -> HttpResponse {
|
||||
HttpResponse::build(self.status_code())
|
||||
.insert_header(ContentType::html())
|
||||
.body(self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RoadInstance {
|
||||
pub regions: Vec<Region>,
|
||||
pub metrics: RoadMetrics,
|
||||
pub warden: WardenInstance,
|
||||
}
|
||||
|
||||
impl RoadInstance {
|
||||
pub fn new() -> RoadInstance {
|
||||
RoadInstance {
|
||||
regions: vec![],
|
||||
warden: WardenInstance {
|
||||
applications: vec![],
|
||||
},
|
||||
metrics: RoadMetrics::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filter(
|
||||
&self,
|
||||
uri: &Uri,
|
||||
method: &Method,
|
||||
headers: &HeaderMap,
|
||||
) -> Option<(&Region, &Location)> {
|
||||
self.regions.iter().find_map(|region| {
|
||||
let location = region.locations.iter().find(|location| {
|
||||
let mut hosts = location.hosts.iter();
|
||||
if !hosts.any(|item| {
|
||||
WildMatch::new(item.as_str()).matches(uri.host().unwrap_or("localhost"))
|
||||
}) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut paths = location.paths.iter();
|
||||
if !paths.any(|item| {
|
||||
uri.path().starts_with(item)
|
||||
|| Regex::new(item.as_str()).unwrap().is_match(uri.path())
|
||||
}) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(val) = location.methods.clone() {
|
||||
if !val.iter().any(|item| *item == method.to_string()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(val) = location.headers.clone() {
|
||||
match !val.keys().all(|item| {
|
||||
headers.get(item).unwrap()
|
||||
== location.headers.clone().unwrap().get(item).unwrap()
|
||||
}) {
|
||||
true => return false,
|
||||
false => (),
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(val) = location.queries.clone() {
|
||||
let queries: Vec<&str> = uri.query().unwrap_or("").split('&').collect();
|
||||
if !val.iter().all(|item| queries.contains(&item.as_str())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
|
||||
location.map(|location| (region, location))
|
||||
})
|
||||
}
|
||||
}
|
@ -1,288 +0,0 @@
|
||||
use crate::proxies::ProxyError;
|
||||
use crate::proxies::ProxyError::{BadGateway, UpgradeRequired};
|
||||
use actix_files::NamedFile;
|
||||
use actix_web::http::{header, Method};
|
||||
use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use awc::error::HeaderValue;
|
||||
use awc::http::Uri;
|
||||
use awc::Client;
|
||||
use futures::Sink;
|
||||
use futures::stream::StreamExt;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
ffi::OsStr,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use actix::io::{SinkWrite, WriteHandler};
|
||||
use actix::{Actor, ActorContext, AsyncContext, StreamHandler};
|
||||
use actix_web_actors::ws;
|
||||
use actix_web_actors::ws::{CloseReason, handshake, ProtocolError, WebsocketContext};
|
||||
use tracing::log::warn;
|
||||
|
||||
pub async fn respond_hypertext(
|
||||
uri: String,
|
||||
req: HttpRequest,
|
||||
payload: web::Payload,
|
||||
client: web::Data<Client>,
|
||||
) -> Result<HttpResponse, ProxyError> {
|
||||
let mut append_part = req.uri().to_string();
|
||||
if let Some(stripped_uri) = append_part.strip_prefix('/') {
|
||||
append_part = stripped_uri.to_string();
|
||||
}
|
||||
|
||||
let uri = Uri::from_str(uri.as_str()).expect("Invalid upstream");
|
||||
let target_url = format!("{}{}", uri, append_part);
|
||||
|
||||
let forwarded_req = client
|
||||
.request_from(target_url.as_str(), req.head())
|
||||
.insert_header((header::HOST, uri.host().expect("Invalid upstream")));
|
||||
|
||||
let forwarded_req = match req.connection_info().realip_remote_addr() {
|
||||
Some(addr) => forwarded_req
|
||||
.insert_header((header::X_FORWARDED_FOR, addr))
|
||||
.insert_header((header::X_FORWARDED_PROTO, req.connection_info().scheme()))
|
||||
.insert_header((header::X_FORWARDED_HOST, req.connection_info().host()))
|
||||
.insert_header((
|
||||
header::FORWARDED,
|
||||
format!(
|
||||
"by={};for={};host={};proto={}",
|
||||
addr,
|
||||
addr,
|
||||
req.connection_info().host(),
|
||||
req.connection_info().scheme()
|
||||
),
|
||||
)),
|
||||
None => forwarded_req,
|
||||
};
|
||||
|
||||
if req
|
||||
.headers()
|
||||
.get(header::UPGRADE)
|
||||
.unwrap_or(&HeaderValue::from_static(""))
|
||||
.to_str()
|
||||
.unwrap_or("")
|
||||
.to_lowercase()
|
||||
== "websocket"
|
||||
{
|
||||
let uri = uri.to_string().replacen("http", "ws", 1);
|
||||
return respond_websocket(uri, req, payload).await;
|
||||
}
|
||||
|
||||
let res = forwarded_req
|
||||
.timeout(Duration::from_secs(1800))
|
||||
.send_stream(payload)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
warn!("Remote gateway issue... {}", err);
|
||||
BadGateway
|
||||
})?;
|
||||
|
||||
let mut client_resp = HttpResponse::build(res.status());
|
||||
for (header_name, header_value) in res
|
||||
.headers()
|
||||
.iter()
|
||||
.filter(|(h, _)| *h != header::CONNECTION && *h != header::CONTENT_ENCODING)
|
||||
{
|
||||
client_resp.insert_header((header_name.clone(), header_value.clone()));
|
||||
}
|
||||
|
||||
Ok(client_resp.streaming(res))
|
||||
}
|
||||
|
||||
pub struct WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + Sink<ws::Message>,
|
||||
{
|
||||
send: SinkWrite<ws::Message, S>,
|
||||
}
|
||||
|
||||
impl<S> WriteHandler<ProtocolError> for WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + 'static + Sink<ws::Message>,
|
||||
{
|
||||
fn error(&mut self, err: ProtocolError, ctx: &mut Self::Context) -> actix::Running {
|
||||
self.error(err, ctx);
|
||||
actix::Running::Stop
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Actor for WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + 'static + Sink<ws::Message>,
|
||||
{
|
||||
type Context = WebsocketContext<Self>;
|
||||
}
|
||||
|
||||
impl<S> StreamHandler<Result<ws::Frame, ProtocolError>> for WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + Sink<ws::Message> + 'static,
|
||||
{
|
||||
fn handle(&mut self, item: Result<ws::Frame, ProtocolError>, ctx: &mut Self::Context) {
|
||||
let frame = match item {
|
||||
Ok(frame) => frame,
|
||||
Err(err) => return self.error(err, ctx),
|
||||
};
|
||||
let msg = match frame {
|
||||
ws::Frame::Text(t) => match t.try_into() {
|
||||
Ok(t) => ws::Message::Text(t),
|
||||
Err(e) => {
|
||||
self.error(e, ctx);
|
||||
return;
|
||||
}
|
||||
},
|
||||
ws::Frame::Binary(b) => ws::Message::Binary(b),
|
||||
ws::Frame::Continuation(c) => ws::Message::Continuation(c),
|
||||
ws::Frame::Ping(p) => ws::Message::Ping(p),
|
||||
ws::Frame::Pong(p) => ws::Message::Pong(p),
|
||||
ws::Frame::Close(r) => ws::Message::Close(r),
|
||||
};
|
||||
|
||||
ctx.write_raw(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> StreamHandler<Result<ws::Message, ProtocolError>> for WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + Sink<ws::Message> + 'static,
|
||||
{
|
||||
fn handle(&mut self, item: Result<ws::Message, ProtocolError>, ctx: &mut Self::Context) {
|
||||
let msg = match item {
|
||||
Ok(msg) => msg,
|
||||
Err(err) => return self.error(err, ctx),
|
||||
};
|
||||
|
||||
let _ = self.send.write(msg);
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> WebsocketProxy<S>
|
||||
where
|
||||
S: Unpin + Sink<ws::Message> + 'static,
|
||||
{
|
||||
fn error<E>(&mut self, err: E, ctx: &mut <Self as Actor>::Context)
|
||||
where
|
||||
E: std::error::Error,
|
||||
{
|
||||
let reason = Some(CloseReason {
|
||||
code: ws::CloseCode::Error,
|
||||
description: Some(err.to_string()),
|
||||
});
|
||||
|
||||
ctx.close(reason.clone());
|
||||
let _ = self.send.write(ws::Message::Close(reason));
|
||||
self.send.close();
|
||||
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn respond_websocket(
|
||||
uri: String,
|
||||
req: HttpRequest,
|
||||
payload: web::Payload,
|
||||
) -> Result<HttpResponse, ProxyError> {
|
||||
let mut res = handshake(&req).map_err(|_| UpgradeRequired)?;
|
||||
|
||||
let (_, conn) = awc::Client::new()
|
||||
.ws(uri)
|
||||
.connect()
|
||||
.await
|
||||
.map_err(|_| BadGateway)?;
|
||||
|
||||
let (send, recv) = conn.split();
|
||||
|
||||
let out = WebsocketContext::with_factory(payload, |ctx| {
|
||||
ctx.add_stream(recv);
|
||||
WebsocketProxy {
|
||||
send: SinkWrite::new(send, ctx),
|
||||
}
|
||||
});
|
||||
|
||||
Ok(res.streaming(out))
|
||||
}
|
||||
|
||||
pub struct StaticResponderConfig {
|
||||
pub uri: String,
|
||||
pub utf8: bool,
|
||||
pub browse: bool,
|
||||
pub with_slash: bool,
|
||||
pub index: Option<String>,
|
||||
pub fallback: Option<String>,
|
||||
pub suffix: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn respond_static(
|
||||
cfg: StaticResponderConfig,
|
||||
req: HttpRequest,
|
||||
) -> Result<HttpResponse, ProxyError> {
|
||||
if req.method() != Method::GET {
|
||||
return Err(ProxyError::MethodGetOnly);
|
||||
}
|
||||
|
||||
let path = req
|
||||
.uri()
|
||||
.path()
|
||||
.trim_start_matches('/')
|
||||
.trim_end_matches('/');
|
||||
|
||||
let path = match percent_encoding::percent_decode_str(path).decode_utf8() {
|
||||
Ok(val) => val,
|
||||
Err(_) => {
|
||||
return Err(ProxyError::NotFound);
|
||||
}
|
||||
};
|
||||
|
||||
let base_path = cfg.uri.parse::<PathBuf>().unwrap();
|
||||
let mut file_path = base_path.clone();
|
||||
for p in Path::new(&*path) {
|
||||
if p == OsStr::new(".") {
|
||||
continue;
|
||||
} else if p == OsStr::new("..") {
|
||||
file_path.pop();
|
||||
} else {
|
||||
file_path.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
if !file_path.starts_with(cfg.uri) {
|
||||
return Err(ProxyError::InvalidRequestPath);
|
||||
}
|
||||
|
||||
if !file_path.exists() {
|
||||
if let Some(suffix) = cfg.suffix {
|
||||
let file_name = file_path
|
||||
.file_name()
|
||||
.and_then(OsStr::to_str)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
file_path.pop();
|
||||
file_path.push((file_name + &suffix).as_str());
|
||||
if file_path.is_file() {
|
||||
return Ok(NamedFile::open(file_path).unwrap().into_response(&req));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(file) = cfg.fallback {
|
||||
let fallback_path = base_path.join(file);
|
||||
if fallback_path.is_file() {
|
||||
return Ok(NamedFile::open(fallback_path).unwrap().into_response(&req));
|
||||
}
|
||||
}
|
||||
|
||||
return Err(ProxyError::NotFound);
|
||||
}
|
||||
|
||||
if file_path.is_file() {
|
||||
Ok(NamedFile::open(file_path).unwrap().into_response(&req))
|
||||
} else {
|
||||
if let Some(index_file) = &cfg.index {
|
||||
let index_path = file_path.join(index_file);
|
||||
if index_path.is_file() {
|
||||
return Ok(NamedFile::open(index_path).unwrap().into_response(&req));
|
||||
}
|
||||
}
|
||||
|
||||
Err(ProxyError::NotFound)
|
||||
}
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
use actix_web::{HttpRequest, HttpResponse, ResponseError, web};
|
||||
use actix_web::http::header;
|
||||
use awc::Client;
|
||||
use rand::seq::SliceRandom;
|
||||
|
||||
use crate::{
|
||||
proxies::{
|
||||
config::{Destination, DestinationType},
|
||||
responder,
|
||||
},
|
||||
ROAD,
|
||||
};
|
||||
use crate::proxies::ProxyError;
|
||||
|
||||
pub async fn handle(req: HttpRequest, payload: web::Payload, client: web::Data<Client>) -> HttpResponse {
|
||||
let readable_app = ROAD.lock().await;
|
||||
let (region, location) = match readable_app.filter(req.uri(), req.method(), req.headers()) {
|
||||
Some(val) => val,
|
||||
None => {
|
||||
return ProxyError::NoGateway.error_response();
|
||||
}
|
||||
};
|
||||
|
||||
let destination = location
|
||||
.destinations
|
||||
.choose_weighted(&mut rand::thread_rng(), |item| item.weight.unwrap_or(1))
|
||||
.unwrap();
|
||||
|
||||
async fn forward(
|
||||
end: &Destination,
|
||||
req: HttpRequest,
|
||||
payload: web::Payload,
|
||||
client: web::Data<Client>,
|
||||
) -> Result<HttpResponse, ProxyError> {
|
||||
// Handle normal web request
|
||||
match end.get_type() {
|
||||
DestinationType::Hypertext => {
|
||||
let Ok(uri) = end.get_hypertext_uri() else {
|
||||
return Err(ProxyError::NotImplemented);
|
||||
};
|
||||
|
||||
responder::respond_hypertext(uri, req, payload, client).await
|
||||
}
|
||||
DestinationType::StaticFiles => {
|
||||
let Ok(cfg) = end.get_static_config() else {
|
||||
return Err(ProxyError::NotImplemented);
|
||||
};
|
||||
|
||||
responder::respond_static(cfg, req).await
|
||||
}
|
||||
_ => Err(ProxyError::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
let reg = region.clone();
|
||||
let loc = location.clone();
|
||||
let end = destination.clone();
|
||||
|
||||
let ip = match req.connection_info().realip_remote_addr() {
|
||||
None => "unknown".to_string(),
|
||||
Some(val) => val.to_string(),
|
||||
};
|
||||
let ua = match req.headers().get(header::USER_AGENT) {
|
||||
None => "unknown".to_string(),
|
||||
Some(val) => val.to_str().unwrap().to_string(),
|
||||
};
|
||||
|
||||
match forward(&end, req, payload, client).await {
|
||||
Ok(resp) => {
|
||||
tokio::spawn(async move {
|
||||
let writable_app = &mut ROAD.lock().await;
|
||||
writable_app.metrics.add_success_request(ip, ua, reg, loc, end);
|
||||
});
|
||||
resp
|
||||
}
|
||||
Err(resp) => {
|
||||
let message = resp.to_string();
|
||||
tokio::spawn(async move {
|
||||
let writable_app = &mut ROAD.lock().await;
|
||||
writable_app
|
||||
.metrics
|
||||
.add_failure_request(ip, ua, reg, loc, end, message);
|
||||
});
|
||||
resp.error_response()
|
||||
}
|
||||
}
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
use std::error;
|
||||
use actix_web::{App, HttpServer, web};
|
||||
use actix_web::dev::Server;
|
||||
use actix_web::middleware::{Compress, Logger};
|
||||
use awc::Client;
|
||||
use crate::config::CFG;
|
||||
use crate::proxies::route;
|
||||
use crate::server::ServerBindConfig;
|
||||
use crate::tls::{load_certificates, use_rustls};
|
||||
|
||||
pub async fn build_proxies() -> Result<Vec<Server>, Box<dyn error::Error>> {
|
||||
load_certificates().await?;
|
||||
|
||||
let cfg = CFG
|
||||
.read()
|
||||
.await
|
||||
.get::<Vec<ServerBindConfig>>("proxies.bind")?;
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for item in cfg {
|
||||
tasks.push(build_single_proxy(item)?);
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
pub fn build_single_proxy(cfg: ServerBindConfig) -> Result<Server, Box<dyn error::Error>> {
|
||||
let server = HttpServer::new(|| {
|
||||
App::new()
|
||||
.wrap(Logger::default())
|
||||
.wrap(Compress::default())
|
||||
.app_data(web::Data::new(Client::default()))
|
||||
.default_service(web::to(route::handle))
|
||||
});
|
||||
if cfg.tls {
|
||||
Ok(server.bind_rustls_0_22(cfg.addr, use_rustls()?)?.run())
|
||||
} else {
|
||||
Ok(server.bind(cfg.addr)?.run())
|
||||
}
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ServerBindConfig {
|
||||
pub addr: String,
|
||||
pub tls: bool,
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
use actix_web::{Scope, web};
|
||||
use crate::sideload::overview::get_overview;
|
||||
use crate::sideload::regions::list_region;
|
||||
|
||||
mod overview;
|
||||
mod regions;
|
||||
pub mod server;
|
||||
|
||||
static ROOT: &str = "";
|
||||
|
||||
pub fn service() -> Scope {
|
||||
web::scope("/cgi")
|
||||
.route(ROOT, web::get().to(get_overview))
|
||||
.route("/regions", web::get().to(list_region))
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
use actix_web::web;
|
||||
use serde::Serialize;
|
||||
use crate::proxies::config::{Destination, Location};
|
||||
use crate::proxies::metrics::RoadTrace;
|
||||
use crate::ROAD;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OverviewData {
|
||||
regions: usize,
|
||||
locations: usize,
|
||||
destinations: usize,
|
||||
requests_count: u64,
|
||||
failures_count: u64,
|
||||
successes_count: u64,
|
||||
success_rate: f64,
|
||||
recent_successes: Vec<RoadTrace>,
|
||||
recent_errors: Vec<RoadTrace>,
|
||||
}
|
||||
|
||||
pub async fn get_overview() -> web::Json<OverviewData> {
|
||||
let locked_app = ROAD.lock().await;
|
||||
let regions = locked_app.regions.clone();
|
||||
let locations = regions
|
||||
.iter()
|
||||
.flat_map(|item| item.locations.clone())
|
||||
.collect::<Vec<Location>>();
|
||||
let destinations = locations
|
||||
.iter()
|
||||
.flat_map(|item| item.destinations.clone())
|
||||
.collect::<Vec<Destination>>();
|
||||
web::Json(OverviewData {
|
||||
regions: regions.len(),
|
||||
locations: locations.len(),
|
||||
destinations: destinations.len(),
|
||||
requests_count: locked_app.metrics.requests_count,
|
||||
successes_count: locked_app.metrics.requests_count - locked_app.metrics.failures_count,
|
||||
failures_count: locked_app.metrics.failures_count,
|
||||
success_rate: locked_app.metrics.get_success_rate(),
|
||||
recent_successes: locked_app
|
||||
.metrics
|
||||
.recent_successes
|
||||
.clone()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>(),
|
||||
recent_errors: locked_app
|
||||
.metrics
|
||||
.recent_errors
|
||||
.clone()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>(),
|
||||
})
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
use actix_web::web;
|
||||
use crate::proxies::config::Region;
|
||||
use crate::ROAD;
|
||||
|
||||
pub async fn list_region() -> web::Json<Vec<Region>> {
|
||||
let locked_app = ROAD.lock().await;
|
||||
|
||||
web::Json(locked_app.regions.clone())
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
use std::error;
|
||||
use actix_web::dev::Server;
|
||||
use actix_web::{App, HttpServer};
|
||||
use actix_web_httpauth::extractors::AuthenticationError;
|
||||
use actix_web_httpauth::headers::www_authenticate::basic::Basic;
|
||||
use actix_web_httpauth::middleware::HttpAuthentication;
|
||||
use crate::sideload;
|
||||
|
||||
pub async fn build_sideload() -> Result<Server, Box<dyn error::Error>> {
|
||||
Ok(
|
||||
HttpServer::new(|| {
|
||||
App::new()
|
||||
.wrap(HttpAuthentication::basic(|req, credentials| async move {
|
||||
let password = match crate::config::CFG
|
||||
.read()
|
||||
.await
|
||||
.get_string("secret") {
|
||||
Ok(val) => val,
|
||||
Err(_) => return Err((AuthenticationError::new(Basic::new()).into(), req))
|
||||
};
|
||||
if credentials.password().unwrap_or("") != password {
|
||||
Err((AuthenticationError::new(Basic::new()).into(), req))
|
||||
} else {
|
||||
Ok(req)
|
||||
}
|
||||
}))
|
||||
.service(sideload::service())
|
||||
}).bind(
|
||||
crate::config::CFG
|
||||
.read()
|
||||
.await
|
||||
.get_string("sideload.bind_addr")?
|
||||
)?.workers(1).run()
|
||||
)
|
||||
}
|
76
src/tls.rs
76
src/tls.rs
@ -1,76 +0,0 @@
|
||||
use std::fs::File;
|
||||
use std::{error};
|
||||
use std::io::BufReader;
|
||||
use std::sync::Arc;
|
||||
use config::ConfigError;
|
||||
use lazy_static::lazy_static;
|
||||
use rustls::crypto::ring::sign::RsaSigningKey;
|
||||
use rustls::server::{ClientHello, ResolvesServerCert};
|
||||
use rustls::sign::CertifiedKey;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Mutex;
|
||||
use wildmatch::WildMatch;
|
||||
|
||||
lazy_static! {
|
||||
static ref CERTS: Mutex<Vec<CertificateConfig>> = Mutex::new(Vec::new());
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ProxyCertResolver;
|
||||
|
||||
impl ResolvesServerCert for ProxyCertResolver {
|
||||
fn resolve(&self, handshake: ClientHello) -> Option<Arc<CertifiedKey>> {
|
||||
let domain = handshake.server_name()?;
|
||||
|
||||
let certs = CERTS.lock().unwrap();
|
||||
for cert in certs.iter() {
|
||||
if WildMatch::new(cert.domain.as_str()).matches(domain) {
|
||||
return match cert.clone().load() {
|
||||
Ok(val) => Some(val),
|
||||
Err(_) => None
|
||||
};
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
struct CertificateConfig {
|
||||
pub domain: String,
|
||||
pub certs: String,
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
impl CertificateConfig {
|
||||
pub fn load(self) -> Result<Arc<CertifiedKey>, Box<dyn error::Error>> {
|
||||
let certs =
|
||||
rustls_pemfile::certs(&mut BufReader::new(&mut File::open(self.certs)?))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let key =
|
||||
rustls_pemfile::private_key(&mut BufReader::new(&mut File::open(self.key)?))?
|
||||
.unwrap();
|
||||
let sign = RsaSigningKey::new(&key)?;
|
||||
|
||||
Ok(Arc::new(CertifiedKey::new(certs, Arc::new(sign))))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_certificates() -> Result<(), ConfigError> {
|
||||
let certs = crate::config::CFG
|
||||
.read()
|
||||
.await
|
||||
.get::<Vec<CertificateConfig>>("certificates")?;
|
||||
|
||||
CERTS.lock().unwrap().clone_from(&certs);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn use_rustls() -> Result<rustls::ServerConfig, ConfigError> {
|
||||
Ok(
|
||||
rustls::ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(ProxyCertResolver))
|
||||
)
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
pub mod runner;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use crate::proxies::config::Region;
|
||||
|
||||
use self::runner::AppInstance;
|
||||
|
||||
lazy_static! {
|
||||
static ref INSTANCES: Mutex<HashMap<String, AppInstance>> = Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WardenInstance {
|
||||
pub applications: Vec<Application>,
|
||||
}
|
||||
|
||||
impl WardenInstance {
|
||||
pub fn new() -> WardenInstance {
|
||||
WardenInstance {
|
||||
applications: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scan(&mut self, regions: Vec<Region>) {
|
||||
self.applications = regions
|
||||
.iter()
|
||||
.flat_map(|item| item.applications.clone().unwrap_or_default())
|
||||
.collect::<Vec<Application>>();
|
||||
debug!(
|
||||
applications = format!("{:?}", self.applications),
|
||||
"Warden scan accomplished."
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
for item in self.applications.iter() {
|
||||
let mut instance = AppInstance::new();
|
||||
match instance.start(item.clone()).await {
|
||||
Ok(_) => {
|
||||
debug!(id = item.id, "Warden successfully created instance for");
|
||||
INSTANCES.lock().await.insert(item.clone().id, instance);
|
||||
}
|
||||
Err(err) => warn!(
|
||||
id = item.id,
|
||||
err = format!("{:?}", err),
|
||||
"Warden failed to create an instance for"
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WardenInstance {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Application {
|
||||
pub id: String,
|
||||
pub exe: String,
|
||||
pub args: Option<Vec<String>>,
|
||||
pub env: Option<HashMap<String, String>>,
|
||||
pub workdir: String,
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
use std::{borrow::BorrowMut, collections::HashMap, io};
|
||||
|
||||
use super::Application;
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::{
|
||||
io::{AsyncBufReadExt, BufReader},
|
||||
process::{Child, Command},
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
lazy_static! {
|
||||
static ref STDOUT: Mutex<HashMap<String, String>> = Mutex::new(HashMap::new());
|
||||
static ref STDERR: Mutex<HashMap<String, String>> = Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AppInstance {
|
||||
pub app: Option<Application>,
|
||||
pub program: Option<Child>,
|
||||
}
|
||||
|
||||
impl AppInstance {
|
||||
pub fn new() -> AppInstance {
|
||||
AppInstance {
|
||||
app: None,
|
||||
program: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&mut self, app: Application) -> io::Result<()> {
|
||||
return match Command::new(app.exe.clone())
|
||||
.args(app.args.clone().unwrap_or_default())
|
||||
.envs(app.env.clone().unwrap_or_default())
|
||||
.current_dir(app.workdir.clone())
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.spawn()
|
||||
{
|
||||
Ok(mut child) => {
|
||||
let stderr_reader = BufReader::new(child.stderr.take().unwrap());
|
||||
let stdout_reader = BufReader::new(child.stdout.take().unwrap());
|
||||
|
||||
tokio::spawn(read_stream_and_capture(stderr_reader, app.id.clone(), true));
|
||||
tokio::spawn(read_stream_and_capture(
|
||||
stdout_reader,
|
||||
app.id.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
self.app = Some(app.clone());
|
||||
self.program = Some(child);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn stop(&mut self) -> Result<(), io::Error> {
|
||||
if let Some(child) = self.program.borrow_mut() {
|
||||
return child.kill().await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_stdout(&self) -> Option<String> {
|
||||
if let Some(app) = self.app.clone() {
|
||||
STDOUT.lock().await.get(&app.id).cloned()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_stderr(&self) -> Option<String> {
|
||||
if let Some(app) = self.app.clone() {
|
||||
STDERR.lock().await.get(&app.id).cloned()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AppInstance {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_stream_and_capture<R>(reader: R, id: String, is_err: bool) -> io::Result<()>
|
||||
where
|
||||
R: tokio::io::AsyncBufRead + Unpin,
|
||||
{
|
||||
let mut lines = reader.lines();
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
if !is_err {
|
||||
if let Some(out) = STDOUT.lock().await.get_mut(&id) {
|
||||
out.push_str(&line);
|
||||
}
|
||||
} else if let Some(out) = STDERR.lock().await.get_mut(&id) {
|
||||
out.push_str(&line);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
85
test/README.md
Normal file
85
test/README.md
Normal file
@ -0,0 +1,85 @@
|
||||
# Benchmark
|
||||
|
||||
This result is design for test the performance of the roadsign.
|
||||
Welcome to contribute more tests of others reverse proxy software!
|
||||
|
||||
## Platform
|
||||
|
||||
All tests are running on my workstation:
|
||||
|
||||
```text
|
||||
..' littlesheep@LittleSheepdeMacBook-Pro
|
||||
,xNMM. ------------------------------------
|
||||
.OMMMMo OS: macOS Sonoma 14.1 23B2073 arm64
|
||||
lMM" Host: MacBook Pro (14-inch, Nov 2023, Three Thunderbolt 4 ports)
|
||||
.;loddo:. .olloddol;. Kernel: 23.1.0
|
||||
cKMMMMMMMMMMNWMMMMMMMMMM0: Uptime: 2 days, 1 hour, 16 mins
|
||||
.KMMMMMMMMMMMMMMMMMMMMMMMWd. Packages: 63 (brew), 4 (brew-cask)
|
||||
XMMMMMMMMMMMMMMMMMMMMMMMX. Shell: zsh 5.9
|
||||
;MMMMMMMMMMMMMMMMMMMMMMMM: Display (Color LCD): 3024x1964 @ 120Hz (as 1512x982) [Built-in]
|
||||
:MMMMMMMMMMMMMMMMMMMMMMMM: DE: Aqua
|
||||
.MMMMMMMMMMMMMMMMMMMMMMMMX. WM: Quartz Compositor
|
||||
kMMMMMMMMMMMMMMMMMMMMMMMMWd. WM Theme: Multicolor (Dark)
|
||||
'XMMMMMMMMMMMMMMMMMMMMMMMMMMk Font: .AppleSystemUIFont [System], Helvetica [User]
|
||||
'XMMMMMMMMMMMMMMMMMMMMMMMMK. Cursor: Fill - Black, Outline - White (32px)
|
||||
kMMMMMMMMMMMMMMMMMMMMMMd Terminal: iTerm 3.4.22
|
||||
;KMMMMMMMWXXWMMMMMMMk. Terminal Font: MesloLGMNFM-Regular (12pt)
|
||||
"cooc*" "*coo'" CPU: Apple M3 Max (14) @ 4.06 GHz
|
||||
GPU: Apple M3 Max (30) [Integrated]
|
||||
Memory: 18.45 GiB / 36.00 GiB (51%)
|
||||
Swap: Disabled
|
||||
Disk (/): 72.52 GiB / 926.35 GiB (8%) - apfs [Read-only]
|
||||
Local IP (en0): 192.168.50.0/24 *
|
||||
Battery: 100% [AC connected]
|
||||
Power Adapter: 96W USB-C Power Adapter
|
||||
Locale: zh_CN.UTF-8
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
The tests are run in the order `nginx -> roadsign without prefork -> roadsign with prefork`. There is no reason why nginx performance should be affected by hardware temperature.
|
||||
|
||||
### Nginx
|
||||
|
||||
```shell
|
||||
go-wrk -c 60 -d 120 http://localhost:8001
|
||||
# => Running 120s test @ http://localhost:8001
|
||||
# => 60 goroutine(s) running concurrently
|
||||
# => 515749 requests in 1m59.953302003s, 245.92MB read
|
||||
# => Requests/sec: 4299.58
|
||||
# => Transfer/sec: 2.05MB
|
||||
# => Avg Req Time: 13.954846ms
|
||||
# => Fastest Request: 0s
|
||||
# => Slowest Request: 410.6972ms
|
||||
# => Number of Errors: 0
|
||||
```
|
||||
|
||||
### RoadSign
|
||||
|
||||
```shell
|
||||
go-wrk -c 60 -d 120 http://localhost:8000
|
||||
# => Running 120s test @ http://localhost:8000
|
||||
# => 60 goroutine(s) running concurrently
|
||||
# => 8905230 requests in 1m56.215762709s, 3.52GB read
|
||||
# => Requests/sec: 76626.70
|
||||
# => Transfer/sec: 30.98MB
|
||||
# => Avg Req Time: 783.016µs
|
||||
# => Fastest Request: 28.542µs
|
||||
# => Slowest Request: 46.773083ms
|
||||
# => Number of Errors: 0
|
||||
```
|
||||
|
||||
### RoadSign w/ Prefork
|
||||
|
||||
```shell
|
||||
go-wrk -c 60 -d 120 http://localhost:8000
|
||||
# => Running 120s test @ http://localhost:8000
|
||||
# => 60 goroutine(s) running concurrently
|
||||
# => 4784308 requests in 1m59.100307178s, 1.89GB read
|
||||
# => Requests/sec: 40170.41
|
||||
# => Transfer/sec: 16.24MB
|
||||
# => Avg Req Time: 1.493636ms
|
||||
# => Fastest Request: 34.291µs
|
||||
# => Slowest Request: 8.727666ms
|
||||
# => Number of Errors: 0
|
||||
```
|
3
test/data/.gitignore
vendored
Normal file
3
test/data/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/spa
|
||||
/capital
|
||||
/static-files
|
12
test/data/index.html
Normal file
12
test/data/index.html
Normal file
@ -0,0 +1,12 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Hello, World!</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Hello, there!</p>
|
||||
<p>Here's the roadsign vs. nginx benchmarking test data!</p>
|
||||
</body>
|
||||
</html>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user