Merge branch 'development' into development_tmux

This commit is contained in:
Mahmoud-Emad
2025-08-24 23:15:34 +03:00
30 changed files with 3312 additions and 271 deletions

View File

@@ -1,6 +1,7 @@
module main
import freeflowuniverse.herolib.osal.sshagent
import freeflowuniverse.herolib.osal.linux
fn do1() ! {
mut agent := sshagent.new()!
@@ -20,6 +21,31 @@ fn do1() ! {
// println(agent)
}
fn test_user_mgmt() ! {
mut lf := linux.new()!
// Test user creation
lf.user_create(
name: 'testuser'
sshkey: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM3/2K7R8A/l0kM0/d'
)!
// Test ssh key creation
lf.sshkey_create(
username: 'testuser'
sshkey_name: 'testkey'
)!
// Test ssh key deletion
lf.sshkey_delete(
username: 'testuser'
sshkey_name: 'testkey'
)!
// Test user deletion
lf.user_delete(name: 'testuser')!
}
fn main() {
do1() or { panic(err) }
test_user_mgmt() or { panic(err) }
}

View File

@@ -18,7 +18,7 @@ You can configure the client using a HeroScript file:
Here's how to get the client and use its methods.
```vlang
```v
import freeflowuniverse.herolib.clients.giteaclient
import freeflowuniverse.herolib.core.base

View File

@@ -1,9 +1,37 @@
module livekit
// App struct with `livekit.Client`, API keys, and other shared data
pub struct Client {
pub:
url string @[required]
api_key string @[required]
api_secret string @[required]
import net.http
import json
import time
fn (mut c LivekitClient) post(path string, body any) !http.Response {
mut token := c.new_access_token(
identity: 'api'
name: 'API User'
ttl: 10 * 60 // 10 minutes
)!
token.add_video_grant(VideoGrant{
room_create: true
room_admin: true
room_list: true
})
jwt := token.to_jwt()!
mut header := http.new_header()
header.add('Authorization', 'Bearer ' + jwt)!
header.add('Content-Type', 'application/json')!
url := '${c.url}/${path}'
data := json.encode(body)
mut req := http.Request{
method: .post
url: url
header: header
data: data
}
resp := http.fetch(req)!
if resp.status_code != 200 {
return error('failed to execute request: ${resp.body}')
}
return resp
}

View File

@@ -0,0 +1,34 @@
module livekit
import freeflowuniverse.herolib.data.caching
import os
const CACHING_METHOD = caching.CachingMethod.once_per_process
fn _init() ! {
if caching.is_set(key: 'livekit_clients') {
return
}
caching.set[map[string]LivekitClient](key: 'livekit_clients', val: map[string]LivekitClient{}, CachingMethod.once_per_process)!
}
fn _get() !map[string]LivekitClient {
_init()!
return caching.get[map[string]LivekitClient](key: 'livekit_clients')!
}
pub fn get(name string) !LivekitClient {
mut clients := _get()!
return clients[name] or { return error('livekit client ${name} not found') }
}
pub fn set(client LivekitClient) ! {
mut clients := _get()!
clients[client.name] = client
caching.set[map[string]LivekitClient](key: 'livekit_clients', val: clients, CachingMethod.once_per_process)!
}
pub fn exists(name string) !bool {
mut clients := _get()!
return name in clients
}

View File

@@ -0,0 +1,18 @@
module livekit
pub struct SendDataArgs {
pub mut:
room_name string
data []u8
kind DataPacket_Kind
destination_sids []string
}
pub enum DataPacket_Kind {
reliable
lossy
}
pub fn (mut c LivekitClient) send_data(args SendDataArgs) ! {
_ = c.post('twirp/livekit.RoomService/SendData', args)!
}

View File

@@ -0,0 +1,84 @@
module livekit
import json
pub struct EgressInfo {
pub mut:
egress_id string
room_id string
status string
started_at i64
ended_at i64
error string
}
pub struct StartRoomCompositeEgressArgs {
pub mut:
room_name string
layout string
audio_only bool
video_only bool
custom_base_url string
}
pub struct StartTrackCompositeEgressArgs {
pub mut:
room_name string
audio_track_id string
video_track_id string
}
pub struct StartWebEgressArgs {
pub mut:
url string
audio_only bool
video_only bool
}
pub struct UpdateStreamArgs {
pub mut:
add_output_urls []string
remove_output_urls []string
}
pub fn (mut c LivekitClient) start_room_composite_egress(args StartRoomCompositeEgressArgs) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/StartRoomCompositeEgress', args)!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}
pub fn (mut c LivekitClient) start_track_composite_egress(args StartTrackCompositeEgressArgs) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/StartTrackCompositeEgress', args)!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}
pub fn (mut c LivekitClient) start_web_egress(args StartWebEgressArgs) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/StartWebEgress', args)!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}
pub fn (mut c LivekitClient) update_layout(egress_id string, layout string) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/UpdateLayout', {'egress_id': egress_id, 'layout': layout})!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}
pub fn (mut c LivekitClient) update_stream(egress_id string, args UpdateStreamArgs) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/UpdateStream', {'egress_id': egress_id, 'add_output_urls': args.add_output_urls, 'remove_output_urls': args.remove_output_urls})!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}
pub fn (mut c LivekitClient) list_egress(room_name string) ![]EgressInfo {
mut resp := c.post('twirp/livekit.Egress/ListEgress', {'room_name': room_name})!
egress_infos := json.decode[[]EgressInfo](resp.body)!
return egress_infos
}
pub fn (mut c LivekitClient) stop_egress(egress_id string) !EgressInfo {
mut resp := c.post('twirp/livekit.Egress/StopEgress', {'egress_id': egress_id})!
egress_info := json.decode[EgressInfo](resp.body)!
return egress_info
}

View File

@@ -0,0 +1,128 @@
module livekit
import json
pub struct IngressInfo {
pub mut:
ingress_id string
name string
stream_key string
url string
input_type IngressInput
audio IngressAudioOptions
video IngressVideoOptions
state IngressState
}
pub enum IngressInput {
rtmp_input
whip_input
}
pub struct IngressAudioOptions {
pub mut:
name string
source TrackSource
preset AudioPreset
}
pub struct IngressVideoOptions {
pub mut:
name string
source TrackSource
preset VideoPreset
}
pub enum TrackSource {
camera
microphone
screen_share
screen_share_audio
}
pub enum AudioPreset {
opus_stereo_96kbps
opus_mono_64kbps
}
pub enum VideoPreset {
h264_720p_30fps_3mbps
h264_1080p_30fps_4_5mbps
h264_540p_25fps_2mbps
}
pub struct IngressState {
pub mut:
status IngressStatus
error string
video InputVideoState
audio InputAudioState
room_id string
started_at i64
}
pub enum IngressStatus {
endpoint_inactive
endpoint_buffering
endpoint_publishing
}
pub struct InputVideoState {
pub mut:
mime_type string
width u32
height u32
framerate u32
}
pub struct InputAudioState {
pub mut:
mime_type string
channels u32
sample_rate u32
}
pub struct CreateIngressArgs {
pub mut:
name string
room_name string
participant_identity string
participant_name string
input_type IngressInput
audio IngressAudioOptions
video IngressVideoOptions
}
pub struct UpdateIngressArgs {
pub mut:
name string
room_name string
participant_identity string
participant_name string
audio IngressAudioOptions
video IngressVideoOptions
}
pub fn (mut c LivekitClient) create_ingress(args CreateIngressArgs) !IngressInfo {
mut resp := c.post('twirp/livekit.Ingress/CreateIngress', args)!
ingress_info := json.decode[IngressInfo](resp.body)!
return ingress_info
}
pub fn (mut c LivekitClient) update_ingress(ingress_id string, args UpdateIngressArgs) !IngressInfo {
mut resp := c.post('twirp/livekit.Ingress/UpdateIngress', {'ingress_id': ingress_id, ...args})!
ingress_info := json.decode[IngressInfo](resp.body)!
return ingress_info
}
pub fn (mut c LivekitClient) list_ingress(room_name string) ![]IngressInfo {
mut resp := c.post('twirp/livekit.Ingress/ListIngress', {'room_name': room_name})!
ingress_infos := json.decode[[]IngressInfo](resp.body)!
return ingress_infos
}
pub fn (mut c LivekitClient) delete_ingress(ingress_id string) !IngressInfo {
mut resp := c.post('twirp/livekit.Ingress/DeleteIngress', {'ingress_id': ingress_id})!
ingress_info := json.decode[IngressInfo](resp.body)!
return ingress_info
}

View File

@@ -8,24 +8,26 @@ pub const version = '0.0.0'
const singleton = false
const default = true
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
@[heap]
pub struct LivekitClient {
pub mut:
name string = 'default'
mail_from string
mail_password string @[secret]
mail_port int
mail_server string
mail_username string
url string @[required]
api_key string @[required]
api_secret string @[required; secret]
}
// your checking & initialization code if needed
fn obj_init(mycfg_ LivekitClient) !LivekitClient {
mut mycfg := mycfg_
if mycfg.password == '' && mycfg.secret == '' {
return error('password or secret needs to be filled in for ${mycfg.name}')
if mycfg.url == '' {
return error('url needs to be filled in for ${mycfg.name}')
}
if mycfg.api_key == '' {
return error('api_key needs to be filled in for ${mycfg.name}')
}
if mycfg.api_secret == '' {
return error('api_secret needs to be filled in for ${mycfg.name}')
}
return mycfg
}

View File

@@ -0,0 +1,57 @@
module livekit
import json
pub struct ParticipantInfo {
pub mut:
sid string
identity string
state string
metadata string
joined_at i64
name string
version u32
permission string
region string
publisher bool
}
pub struct UpdateParticipantArgs {
pub mut:
room_name string
identity string
metadata string
permission string
}
pub struct MutePublishedTrackArgs {
pub mut:
room_name string
identity string
track_sid string
muted bool
}
pub fn (mut c LivekitClient) list_participants(room_name string) ![]ParticipantInfo {
mut resp := c.post('twirp/livekit.RoomService/ListParticipants', {'room': room_name})!
participants := json.decode[[]ParticipantInfo](resp.body)!
return participants
}
pub fn (mut c LivekitClient) get_participant(room_name string, identity string) !ParticipantInfo {
mut resp := c.post('twirp/livekit.RoomService/GetParticipant', {'room': room_name, 'identity': identity})!
participant := json.decode[ParticipantInfo](resp.body)!
return participant
}
pub fn (mut c LivekitClient) remove_participant(room_name string, identity string) ! {
_ = c.post('twirp/livekit.RoomService/RemoveParticipant', {'room': room_name, 'identity': identity})!
}
pub fn (mut c LivekitClient) update_participant(args UpdateParticipantArgs) ! {
_ = c.post('twirp/livekit.RoomService/UpdateParticipant', args)!
}
pub fn (mut c LivekitClient) mute_published_track(args MutePublishedTrackArgs) ! {
_ = c.post('twirp/livekit.RoomService/MutePublishedTrack', args)!
}

167
lib/clients/livekit/play.v Normal file
View File

@@ -0,0 +1,167 @@
module livekit
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.ui.console
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'livekit.') {
return
}
// Handle livekit.init - configure the client
if plbook.exists_once(filter: 'livekit.init') {
mut action := plbook.get(filter: 'livekit.init')!
mut p := action.params
name := texttools.name_fix(p.get_default('name', 'default')!)
url := p.get('url')!
api_key := p.get('api_key')!
api_secret := p.get('api_secret')!
mut client := LivekitClient{
name: name
url: url
api_key: api_key
api_secret: api_secret
}
set(client)!
console.print_header('LiveKit client "${name}" configured')
action.done = true
}
// Handle room creation
mut room_create_actions := plbook.find(filter: 'livekit.room_create')!
for mut action in room_create_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
room_name := p.get('name')!
empty_timeout := p.get_u32_default('empty_timeout', 300)!
max_participants := p.get_u32_default('max_participants', 50)!
metadata := p.get_default('metadata', '')!
mut client := get(name: client_name)!
room := client.create_room(
name: room_name
empty_timeout: empty_timeout
max_participants: max_participants
metadata: metadata
)!
console.print_header('Room "${room_name}" created successfully')
action.done = true
}
// Handle room deletion
mut room_delete_actions := plbook.find(filter: 'livekit.room_delete')!
for mut action in room_delete_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
room_name := p.get('name')!
mut client := get(name: client_name)!
client.delete_room(room_name)!
console.print_header('Room "${room_name}" deleted successfully')
action.done = true
}
// Handle participant removal
mut participant_remove_actions := plbook.find(filter: 'livekit.participant_remove')!
for mut action in participant_remove_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
room_name := p.get('room')!
identity := p.get('identity')!
mut client := get(name: client_name)!
client.remove_participant(room_name, identity)!
console.print_header('Participant "${identity}" removed from room "${room_name}"')
action.done = true
}
// Handle participant mute/unmute
mut participant_mute_actions := plbook.find(filter: 'livekit.participant_mute')!
for mut action in participant_mute_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
room_name := p.get('room')!
identity := p.get('identity')!
track_sid := p.get('track_sid')!
muted := p.get_default_true('muted')
mut client := get(name: client_name)!
client.mute_published_track(
room_name: room_name
identity: identity
track_sid: track_sid
muted: muted
)!
status := if muted { 'muted' } else { 'unmuted' }
console.print_header('Track "${track_sid}" ${status} for participant "${identity}"')
action.done = true
}
// Handle room metadata update
mut room_update_actions := plbook.find(filter: 'livekit.room_update')!
for mut action in room_update_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
room_name := p.get('room')!
metadata := p.get('metadata')!
mut client := get(name: client_name)!
client.update_room_metadata(
room_name: room_name
metadata: metadata
)!
console.print_header('Room "${room_name}" metadata updated')
action.done = true
}
// Handle access token generation
mut token_create_actions := plbook.find(filter: 'livekit.token_create')!
for mut action in token_create_actions {
mut p := action.params
client_name := texttools.name_fix(p.get_default('client', 'default')!)
identity := p.get('identity')!
name := p.get_default('name', identity)!
room := p.get_default('room', '')!
ttl := p.get_int_default('ttl', 21600)!
can_publish := p.get_default_false('can_publish')
can_subscribe := p.get_default_true('can_subscribe')
can_publish_data := p.get_default_false('can_publish_data')
mut client := get(name: client_name)!
mut token := client.new_access_token(
identity: identity
name: name
ttl: ttl
)!
token.add_video_grant(VideoGrant{
room: room
room_join: true
can_publish: can_publish
can_subscribe: can_subscribe
can_publish_data: can_publish_data
})
jwt := token.to_jwt()!
console.print_header('Access token generated for "${identity}"')
console.print_debug('Token: ${jwt}')
action.done = true
}
}

View File

@@ -1,50 +1,47 @@
module livekit
import net.http
import json
import net.http
@[params]
pub struct ListRoomsParams {
names []string
pub struct Room {
pub mut:
sid string
name string
empty_timeout u32
max_participants u32
creation_time i64
turn_password string
enabled_codecs []string
metadata string
num_participants u32
num_connected_participants u32
active_recording bool
}
pub struct ListRoomsResponse {
pub:
rooms []Room
pub struct CreateRoomArgs {
pub mut:
name string
empty_timeout u32
max_participants u32
metadata string
}
pub fn (c Client) list_rooms(params ListRoomsParams) !ListRoomsResponse {
// Prepare request body
request := params
request_json := json.encode(request)
// create token and give grant to list rooms
mut token := c.new_access_token()!
token.grants.video.room_list = true
// make POST request
url := '${c.url}/twirp/livekit.RoomService/ListRooms'
// Configure HTTP request
mut headers := http.new_header_from_map({
http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}'
http.CommonHeader.content_type: 'application/json'
})
response := http.fetch(http.FetchConfig{
url: url
method: .post
header: headers
data: request_json
})!
if response.status_code != 200 {
return error('Failed to list rooms: ${response.status_code}')
}
// Parse response
rooms_response := json.decode(ListRoomsResponse, response.body) or {
return error('Failed to parse response: ${err}')
}
return rooms_response
pub struct UpdateRoomMetadataArgs {
pub mut:
room_name string
metadata string
}
pub fn (mut c LivekitClient) create_room(args CreateRoomArgs) !Room {
mut resp := c.post('twirp/livekit.RoomService/CreateRoom', args)!
room := json.decode[Room](resp.body)!
return room
}
pub fn (mut c LivekitClient) delete_room(room_name string) ! {
_ = c.post('twirp/livekit.RoomService/DeleteRoom', {'room': room_name})!
}
pub fn (mut c LivekitClient) update_room_metadata(args UpdateRoomMetadataArgs) ! {
_ = c.post('twirp/livekit.RoomService/UpdateRoomMetadata', args)!
}

View File

@@ -1,34 +1,52 @@
module livekit
import jwt
import time
import rand
import crypto.hmac
import crypto.sha256
import encoding.base64
import json
// Define AccessTokenOptions struct
@[params]
pub struct AccessTokenOptions {
pub struct AccessToken {
pub mut:
ttl int = 21600 // TTL in seconds
name string // Display name for the participant
identity string // Identity of the user
metadata string // Custom metadata to be passed to participants
api_key string
api_secret string
identity string
name string
ttl int
video_grant VideoGrant
}
// Constructor for AccessToken
pub fn (client Client) new_access_token(options AccessTokenOptions) !AccessToken {
pub struct VideoGrant {
pub mut:
room_create bool
room_admin bool
room_join bool
room_list bool
can_publish bool
can_subscribe bool
can_publish_data bool
room string
}
pub fn (mut c LivekitClient) new_access_token(identity string, name string, ttl int) !AccessToken {
return AccessToken{
api_key: client.api_key
api_secret: client.api_secret
identity: options.identity
ttl: options.ttl
grants: ClaimGrants{
exp: time.now().unix() + options.ttl
iss: client.api_key
sub: options.name
name: options.name
}
api_key: c.api_key
api_secret: c.api_secret
identity: identity
name: name
ttl: ttl
}
}
pub fn (mut t AccessToken) add_video_grant(grant VideoGrant) {
t.video_grant = grant
}
pub fn (t AccessToken) to_jwt() !string {
mut claims := jwt.new_claims()
claims.iss = t.api_key
claims.sub = t.identity
claims.exp = time.now().unix_time() + t.ttl
claims.nbf = time.now().unix_time()
claims.iat = time.now().unix_time()
claims.name = t.name
claims.video = t.video_grant
return jwt.encode(claims, t.api_secret, .hs256)
}

View File

@@ -0,0 +1,51 @@
module traefik
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.osal.traefik as osal_traefik
__global (
traefik_managers map[string]&TraefikManager
)
@[params]
pub struct FactoryArgs {
pub mut:
name string = 'default'
redis_url string = '127.0.0.1:6379'
}
pub fn new(args FactoryArgs) !&TraefikManager {
name := texttools.name_fix(args.name)
if name in traefik_managers {
return traefik_managers[name]
}
mut redis := redisclient.core_get(redisclient.get_redis_url(args.redis_url)!)!
mut manager := &TraefikManager{
name: name
redis: redis
config: osal_traefik.new_traefik_config()
}
// Set redis connection in config
manager.config.redis = redis
traefik_managers[name] = manager
return manager
}
pub fn get(args FactoryArgs) !&TraefikManager {
name := texttools.name_fix(args.name)
return traefik_managers[name] or {
return error('traefik manager with name "${name}" does not exist')
}
}
pub fn default() !&TraefikManager {
if traefik_managers.len == 0 {
return new(name: 'default')!
}
return get(name: 'default')!
}

View File

@@ -0,0 +1,154 @@
module traefik
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.osal.traefik as osal_traefik
import freeflowuniverse.herolib.core.texttools
@[heap]
pub struct TraefikManager {
pub mut:
name string
redis &redisclient.Redis
config osal_traefik.TraefikConfig
entrypoints []EntryPointConfig
}
pub struct EntryPointConfig {
pub mut:
name string @[required]
address string @[required]
tls bool
}
@[params]
pub struct RouterAddArgs {
pub mut:
name string @[required]
rule string @[required]
service string @[required]
entrypoints []string
middlewares []string
tls bool
priority int
}
@[params]
pub struct ServiceAddArgs {
pub mut:
name string @[required]
servers []string @[required]
strategy string = 'wrr' // wrr or p2c
}
@[params]
pub struct MiddlewareAddArgs {
pub mut:
name string @[required]
typ string @[required]
settings map[string]string
}
@[params]
pub struct EntryPointAddArgs {
pub mut:
name string @[required]
address string @[required]
tls bool
}
// Add router configuration
pub fn (mut tm TraefikManager) router_add(args RouterAddArgs) ! {
tm.config.add_route(
name: texttools.name_fix(args.name)
rule: args.rule
service: texttools.name_fix(args.service)
middlewares: args.middlewares.map(texttools.name_fix(it))
priority: args.priority
tls: args.tls
)
}
// Add service configuration
pub fn (mut tm TraefikManager) service_add(args ServiceAddArgs) ! {
mut servers := []osal_traefik.ServerConfig{}
for server_url in args.servers {
servers << osal_traefik.ServerConfig{
url: server_url.trim_space()
}
}
tm.config.add_service(
name: texttools.name_fix(args.name)
load_balancer: osal_traefik.LoadBalancerConfig{
servers: servers
}
)
}
// Add middleware configuration
pub fn (mut tm TraefikManager) middleware_add(args MiddlewareAddArgs) ! {
tm.config.add_middleware(
name: texttools.name_fix(args.name)
typ: args.typ
settings: args.settings
)
}
// Add entrypoint configuration (stored separately as these are typically static config)
pub fn (mut tm TraefikManager) entrypoint_add(args EntryPointAddArgs) ! {
entrypoint := EntryPointConfig{
name: texttools.name_fix(args.name)
address: args.address
tls: args.tls
}
// Check if entrypoint already exists
for mut ep in tm.entrypoints {
if ep.name == entrypoint.name {
ep.address = entrypoint.address
ep.tls = entrypoint.tls
return
}
}
tm.entrypoints << entrypoint
}
// Apply all configurations to Redis
pub fn (mut tm TraefikManager) apply() ! {
// Apply dynamic configuration (routers, services, middlewares)
tm.config.set()!
// Store entrypoints separately (these would typically be in static config)
for ep in tm.entrypoints {
tm.redis.hset('traefik:entrypoints', ep.name, '${ep.address}|${ep.tls}')!
}
}
// Get all entrypoints
pub fn (mut tm TraefikManager) entrypoints_get() ![]EntryPointConfig {
return tm.entrypoints.clone()
}
// Clear all configurations
pub fn (mut tm TraefikManager) clear() ! {
tm.config = osal_traefik.new_traefik_config()
tm.config.redis = tm.redis
tm.entrypoints = []EntryPointConfig{}
// Clear Redis keys
keys := tm.redis.keys('traefik/*')!
for key in keys {
tm.redis.del(key)!
}
}
// Get configuration status
pub fn (mut tm TraefikManager) status() !map[string]int {
return {
'routers': tm.config.routers.len
'services': tm.config.services.len
'middlewares': tm.config.middlewares.len
'entrypoints': tm.entrypoints.len
}
}

168
lib/clients/traefik/play.v Normal file
View File

@@ -0,0 +1,168 @@
module traefik
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.ui.console
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'traefik.') {
return
}
// Get or create default traefik manager
mut manager := default()!
// Process entrypoints first
play_entrypoints(mut plbook, mut manager)!
// Process services (before routers that might reference them)
play_services(mut plbook, mut manager)!
// Process middlewares (before routers that might reference them)
play_middlewares(mut plbook, mut manager)!
// Process routers
play_routers(mut plbook, mut manager)!
// Apply all configurations to Redis
manager.apply()!
console.print_debug('Traefik configuration applied successfully')
}
fn play_entrypoints(mut plbook PlayBook, mut manager TraefikManager) ! {
entrypoint_actions := plbook.find(filter: 'traefik.entrypoint')!
for mut action in entrypoint_actions {
mut p := action.params
manager.entrypoint_add(
name: p.get('name')!
address: p.get('address')!
tls: p.get_default_false('tls')
)!
action.done = true
}
}
fn play_routers(mut plbook PlayBook, mut manager TraefikManager) ! {
router_actions := plbook.find(filter: 'traefik.router')!
for mut action in router_actions {
mut p := action.params
// Parse entrypoints list
mut entrypoints := []string{}
if entrypoints_str := p.get_default('entrypoints', '') {
if entrypoints_str.len > 0 {
entrypoints = entrypoints_str.split(',').map(it.trim_space())
}
}
// Parse middlewares list
mut middlewares := []string{}
if middlewares_str := p.get_default('middlewares', '') {
if middlewares_str.len > 0 {
middlewares = middlewares_str.split(',').map(it.trim_space())
}
}
manager.router_add(
name: p.get('name')!
rule: p.get('rule')!
service: p.get('service')!
entrypoints: entrypoints
middlewares: middlewares
tls: p.get_default_false('tls')
priority: p.get_int_default('priority', 0)
)!
action.done = true
}
}
fn play_services(mut plbook PlayBook, mut manager TraefikManager) ! {
service_actions := plbook.find(filter: 'traefik.service')!
for mut action in service_actions {
mut p := action.params
// Parse servers list
servers_str := p.get('servers')!
servers := servers_str.split(',').map(it.trim_space())
manager.service_add(
name: p.get('name')!
servers: servers
strategy: p.get_default('strategy', 'wrr')!
)!
action.done = true
}
}
fn play_middlewares(mut plbook PlayBook, mut manager TraefikManager) ! {
middleware_actions := plbook.find(filter: 'traefik.middleware')!
for mut action in middleware_actions {
mut p := action.params
// Build settings map from remaining parameters
mut settings := map[string]string{}
middleware_type := p.get('type')!
// Handle common middleware types
match middleware_type {
'basicAuth' {
if users := p.get_default('users', '') {
settings['users'] = '["${users}"]'
}
}
'stripPrefix' {
if prefixes := p.get_default('prefixes', '') {
settings['prefixes'] = '["${prefixes}"]'
}
}
'addPrefix' {
if prefix := p.get_default('prefix', '') {
settings['prefix'] = prefix
}
}
'headers' {
if custom_headers := p.get_default('customRequestHeaders', '') {
settings['customRequestHeaders'] = custom_headers
}
if custom_response_headers := p.get_default('customResponseHeaders', '') {
settings['customResponseHeaders'] = custom_response_headers
}
}
'rateLimit' {
if rate := p.get_default('rate', '') {
settings['rate'] = rate
}
if burst := p.get_default('burst', '') {
settings['burst'] = burst
}
}
else {
// For other middleware types, get all parameters as settings
param_map := p.get_map()
for key, value in param_map {
if key !in ['name', 'type'] {
settings[key] = value
}
}
}
}
manager.middleware_add(
name: p.get('name')!
typ: middleware_type
settings: settings
)!
action.done = true
}
}

View File

@@ -2,6 +2,80 @@
This module provides functionality for managing DNS records in Redis for use with CoreDNS. It supports various DNS record types and provides a simple interface for adding and managing DNS records.
## Heroscript Examples
The following examples demonstrate how to define DNS records using heroscript actions:
### A Record
```
!!dns.a_record
sub_domain: 'host1'
ip: '1.2.3.4'
ttl: 300
```
### AAAA Record
```
!!dns.aaaa_record
sub_domain: 'host1'
ip: '2001:db8::1'
ttl: 300
```
### MX Record
```
!!dns.mx_record
sub_domain: '*'
host: 'mail.example.com'
preference: 10
ttl: 300
```
### TXT Record
```
!!dns.txt_record
sub_domain: '*'
text: 'v=spf1 mx ~all'
ttl: 300
```
### SRV Record
```
!!dns.srv_record
service: 'ssh'
protocol: 'tcp'
host: 'host1'
target: 'sip.example.com'
port: 5060
priority: 10
weight: 100
ttl: 300
```
### NS Record
```
!!dns.ns_record
sub_domain: '@'
host: 'ns1.example.com'
ttl: 300
```
### SOA Record
```
!!dns.soa_record
mbox: 'hostmaster.example.com'
ns: 'ns1.example.com'
refresh: 44
retry: 55
expire: 66
minttl: 100
ttl: 300
```
## v
```v
import freeflowuniverse.herolib.osal.core.coredns
@@ -93,3 +167,5 @@ SOARecord {
ttl int // Default: 300
}
```

94
lib/osal/linux/play.v Normal file
View File

@@ -0,0 +1,94 @@
module linux
import freeflowuniverse.herolib.core.playbook { PlayBook }
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'usermgmt.') {
return
}
mut lf := new()!
// Process user_create actions
play_user_create(mut plbook, mut lf)!
// Process user_delete actions
play_user_delete(mut plbook, mut lf)!
// Process sshkey_create actions
play_sshkey_create(mut plbook, mut lf)!
// Process sshkey_delete actions
play_sshkey_delete(mut plbook, mut lf)!
}
fn play_user_create(mut plbook PlayBook, mut lf LinuxFactory) ! {
mut actions := plbook.find(filter: 'usermgmt.user_create')!
for mut action in actions {
mut p := action.params
mut args := UserCreateArgs{
name: p.get('name')!
giteakey: p.get_default('giteakey', '')!
giteaurl: p.get_default('giteaurl', '')!
passwd: p.get_default('passwd', '')!
description: p.get_default('description', '')!
email: p.get_default('email', '')!
tel: p.get_default('tel', '')!
sshkey: p.get_default('sshkey', '')! // SSH public key
}
lf.user_create(args)!
action.done = true
}
}
fn play_user_delete(mut plbook PlayBook, mut lf LinuxFactory) ! {
mut actions := plbook.find(filter: 'usermgmt.user_delete')!
for mut action in actions {
mut p := action.params
mut args := UserDeleteArgs{
name: p.get('name')!
}
lf.user_delete(args)!
action.done = true
}
}
fn play_sshkey_create(mut plbook PlayBook, mut lf LinuxFactory) ! {
mut actions := plbook.find(filter: 'usermgmt.sshkey_create')!
for mut action in actions {
mut p := action.params
mut args := SSHKeyCreateArgs{
username: p.get('username')!
sshkey_name: p.get('sshkey_name')!
sshkey_pub: p.get_default('sshkey_pub', '')!
sshkey_priv: p.get_default('sshkey_priv', '')!
}
lf.sshkey_create(args)!
action.done = true
}
}
fn play_sshkey_delete(mut plbook PlayBook, mut lf LinuxFactory) ! {
mut actions := plbook.find(filter: 'usermgmt.sshkey_delete')!
for mut action in actions {
mut p := action.params
mut args := SSHKeyDeleteArgs{
username: p.get('username')!
sshkey_name: p.get('sshkey_name')!
}
lf.sshkey_delete(args)!
action.done = true
}
}

View File

@@ -57,51 +57,19 @@ chown root:ourworld /code
chmod 2775 /code # rwx for user+group, SGID bit so new files inherit group
echo "✅ /code prepared (group=ourworld, rwx for group, SGID bit set)"
# --- create login helper script for gpg-agent ---
PROFILE_SCRIPT="$USERHOME/.profile_gpgagent"
# --- create login helper script for ssh-agent ---
PROFILE_SCRIPT="$USERHOME/.profile_sshagent"
cat > "$PROFILE_SCRIPT" <<'EOF'
# Auto-start gpg-agent with SSH support if not running
mkdir -p "$HOME/.gnupg"
chmod 700 "$HOME/.gnupg"
# Always overwrite gpg-agent.conf with required config
cat > "$HOME/.gnupg/gpg-agent.conf" <<CONF
enable-ssh-support
default-cache-ttl 7200
max-cache-ttl 7200
CONF
# Kill old agent if any (so config is applied)
gpgconf --kill gpg-agent 2>/dev/null || true
# Launch gpg-agent
gpgconf --launch gpg-agent
# Export socket path so ssh-add works
export SSH_AUTH_SOCK="$(gpgconf --list-dirs agent-ssh-socket)"
# Load all private keys found in ~/.ssh
if [ -d "$HOME/.ssh" ]; then
for KEY in "$HOME"/.ssh/*; do
if [ -f "$KEY" ] && grep -q "PRIVATE KEY" "$KEY" 2>/dev/null; then
ssh-add "$KEY" >/dev/null 2>&1 && echo "🔑 Loaded key: $KEY"
fi
done
fi
# For interactive shells
if [[ $- == *i* ]]; then
echo "🔑 GPG Agent ready at \$SSH_AUTH_SOCK"
fi
EOF
# Auto-start ssh-agent if not running
SSH_AGENT_PID_FILE="$HOME/.ssh/agent.pid"
SSH_AUTH_SOCK_FILE="$HOME/.ssh/agent.sock"
chown "$NEWUSER":"$NEWUSER" "$PROFILE_SCRIPT"
chmod 644 "$PROFILE_SCRIPT"
# --- source it on login ---
if ! grep -q ".profile_gpgagent" "$USERHOME/.bashrc"; then
echo "[ -f ~/.profile_gpgagent ] && source ~/.profile_gpgagent" >> "$USERHOME/.bashrc"
if ! grep -q ".profile_sshagent" "$USERHOME/.bashrc"; then
echo "[ -f ~/.profile_sshagent ] && source ~/.profile_sshagent" >> "$USERHOME/.bashrc"
fi
echo "🎉 Setup complete for user $NEWUSER"

View File

@@ -1,29 +1,354 @@
module linux
// import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.core.texttools
// import freeflowuniverse.herolib.screen
import os
import time
// import freeflowuniverse.herolib.ui.console
import json
import freeflowuniverse.herolib.core.pathlib
import freeflowuniverse.herolib.osal.core as osal
import freeflowuniverse.herolib.ui.console
@[heap]
pub struct LinuxFactory {
@[params]
pub struct UserCreateArgs {
pub mut:
username string
name string @[required]
giteakey string
giteaurl string
passwd string
description string
email string
tel string
sshkey string // SSH public key
}
@[params]
pub struct LinuxNewArgs {
pub:
username string
pub struct UserDeleteArgs {
pub mut:
name string @[required]
}
// return screen instance
pub fn new(args LinuxNewArgs) !LinuxFactory {
mut t := LinuxFactory{
username: args.username
}
return t
@[params]
pub struct SSHKeyCreateArgs {
pub mut:
username string @[required]
sshkey_name string @[required]
sshkey_pub string
sshkey_priv string
}
@[params]
pub struct SSHKeyDeleteArgs {
pub mut:
username string @[required]
sshkey_name string @[required]
}
struct UserConfig {
pub mut:
name string
giteakey string
giteaurl string
email string
description string
tel string
}
// Check if running as root
pub fn (mut lf LinuxFactory) check_root() ! {
if os.getuid() != 0 {
return error(' Must be run as root')
}
}
// Create a new user with all the configuration
pub fn (mut lf LinuxFactory) user_create(args UserCreateArgs) ! {
lf.check_root()!
console.print_header('Creating user: ${args.name}')
// Save config to ~/hero/cfg/myconfig.json
lf.save_user_config(args)!
// Create user using system commands
lf.create_user_system(args)!
}
// Delete a user
pub fn (mut lf LinuxFactory) user_delete(args UserDeleteArgs) ! {
lf.check_root()!
console.print_header('Deleting user: ${args.name}')
// Check if user exists
if !osal.user_exists(args.name) {
return error('User ${args.name} does not exist')
}
// Delete user and home directory
osal.exec(cmd: 'userdel -r ${args.name}')!
console.print_green(' User ${args.name} deleted')
// Remove from config
lf.remove_user_config(args.name)!
}
// Create SSH key for user
pub fn (mut lf LinuxFactory) sshkey_create(args SSHKeyCreateArgs) ! {
lf.check_root()!
console.print_header('Creating SSH key for user: ${args.username}')
user_home := '/home/${args.username}'
ssh_dir := '${user_home}/.ssh'
// Ensure SSH directory exists
osal.dir_ensure(ssh_dir)!
osal.exec(cmd: 'chmod 700 ${ssh_dir}')!
if args.sshkey_priv != '' && args.sshkey_pub != '' {
// Both private and public keys provided
priv_path := '${ssh_dir}/${args.sshkey_name}'
pub_path := '${ssh_dir}/${args.sshkey_name}.pub'
osal.file_write(priv_path, args.sshkey_priv)!
osal.file_write(pub_path, args.sshkey_pub)!
// Set permissions
osal.exec(cmd: 'chmod 600 ${priv_path}')!
osal.exec(cmd: 'chmod 644 ${pub_path}')!
console.print_green(' SSH keys installed for ${args.username}')
} else {
// Generate new SSH key (modern ed25519)
key_path := '${ssh_dir}/${args.sshkey_name}'
osal.exec(cmd: 'ssh-keygen -t ed25519 -f ${key_path} -N "" -C "${args.username}@$(hostname)"')!
console.print_green(' New SSH key generated for ${args.username}')
}
// Set ownership
osal.exec(cmd: 'chown -R ${args.username}:${args.username} ${ssh_dir}')!
}
// Delete SSH key for user
pub fn (mut lf LinuxFactory) sshkey_delete(args SSHKeyDeleteArgs) ! {
lf.check_root()!
console.print_header('Deleting SSH key for user: ${args.username}')
user_home := '/home/${args.username}'
ssh_dir := '${user_home}/.ssh'
priv_path := '${ssh_dir}/${args.sshkey_name}'
pub_path := '${ssh_dir}/${args.sshkey_name}.pub'
// Remove keys if they exist
if os.exists(priv_path) {
os.rm(priv_path)!
console.print_green(' Removed private key: ${priv_path}')
}
if os.exists(pub_path) {
os.rm(pub_path)!
console.print_green(' Removed public key: ${pub_path}')
}
}
// Save user configuration to JSON file
fn (mut lf LinuxFactory) save_user_config(args UserCreateArgs) ! {
config_dir := '${os.home_dir()}/hero/cfg'
osal.dir_ensure(config_dir)!
config_path := '${config_dir}/myconfig.json'
mut configs := []UserConfig{}
// Load existing configs if file exists
if os.exists(config_path) {
content := osal.file_read(config_path)!
configs = json.decode([]UserConfig, content) or { []UserConfig{} }
}
// Check if user already exists in config
mut found_idx := -1
for i, config in configs {
if config.name == args.name {
found_idx = i
break
}
}
new_config := UserConfig{
name: args.name
giteakey: args.giteakey
giteaurl: args.giteaurl
email: args.email
description: args.description
tel: args.tel
}
if found_idx >= 0 {
configs[found_idx] = new_config
} else {
configs << new_config
}
// Save updated configs
content := json.encode_pretty(configs)
osal.file_write(config_path, content)!
console.print_green(' User config saved to ${config_path}')
}
// Remove user from configuration
fn (mut lf LinuxFactory) remove_user_config(username string) ! {
config_dir := '${os.home_dir()}/hero/cfg'
config_path := '${config_dir}/myconfig.json'
if !os.exists(config_path) {
return // Nothing to remove
}
content := osal.file_read(config_path)!
mut configs := json.decode([]UserConfig, content) or { return }
// Filter out the user
configs = configs.filter(it.name != username)
// Save updated configs
updated_content := json.encode_pretty(configs)
osal.file_write(config_path, updated_content)!
console.print_green(' User config removed for ${username}')
}
// Create user in the system
fn (mut lf LinuxFactory) create_user_system(args UserCreateArgs) ! {
// Check if user exists
if osal.user_exists(args.name) {
console.print_green(' User ${args.name} already exists')
} else {
console.print_item(' Creating user ${args.name}')
osal.exec(cmd: 'useradd -m -s /bin/bash ${args.name}')!
}
user_home := '/home/${args.name}'
// Setup SSH if key provided
if args.sshkey != '' {
ssh_dir := '${user_home}/.ssh'
osal.dir_ensure(ssh_dir)!
osal.exec(cmd: 'chmod 700 ${ssh_dir}')!
authorized_keys := '${ssh_dir}/authorized_keys'
osal.file_write(authorized_keys, args.sshkey)!
osal.exec(cmd: 'chmod 600 ${authorized_keys}')!
osal.exec(cmd: 'chown -R ${args.name}:${args.name} ${ssh_dir}')!
console.print_green(' SSH key installed for ${args.name}')
}
// Ensure ourworld group exists
group_check := osal.exec(cmd: 'getent group ourworld', raise_error: false) or {
osal.Job{ exit_code: 1 }
}
if group_check.exit_code != 0 {
console.print_item(' Creating group ourworld')
osal.exec(cmd: 'groupadd ourworld')!
} else {
console.print_green(' Group ourworld exists')
}
// Add user to group
user_groups := osal.exec(cmd: 'id -nG ${args.name}', stdout: false)!
if !user_groups.output.contains('ourworld') {
osal.exec(cmd: 'usermod -aG ourworld ${args.name}')!
console.print_green(' Added ${args.name} to ourworld group')
} else {
console.print_green(' ${args.name} already in ourworld')
}
// Setup /code directory
osal.dir_ensure('/code')!
osal.exec(cmd: 'chown root:ourworld /code')!
osal.exec(cmd: 'chmod 2775 /code')! // rwx for user+group, SGID bit
console.print_green(' /code prepared (group=ourworld, rwx for group, SGID bit set)')
// Create SSH agent profile script
lf.create_ssh_agent_profile(args.name)!
// Set password if provided
if args.passwd != '' {
osal.exec(cmd: 'echo "${args.name}:${args.passwd}" | chpasswd')!
console.print_green(' Password set for ${args.name}')
}
console.print_header('🎉 Setup complete for user ${args.name}')
}
// Create SSH agent profile script
fn (mut lf LinuxFactory) create_ssh_agent_profile(username string) ! {
user_home := '/home/${username}'
profile_script := '${user_home}/.profile_sshagent'
script_content := '# Auto-start ssh-agent if not running
SSH_AGENT_PID_FILE="$HOME/.ssh/agent.pid"
SSH_AUTH_SOCK_FILE="$HOME/.ssh/agent.sock"
# Function to start ssh-agent
start_ssh_agent() {
mkdir -p "$HOME/.ssh"
chmod 700 "$HOME/.ssh"
# Start ssh-agent and save connection info
ssh-agent -s > "$SSH_AGENT_PID_FILE"
source "$SSH_AGENT_PID_FILE"
# Save socket path for future sessions
echo "$SSH_AUTH_SOCK" > "$SSH_AUTH_SOCK_FILE"
# Load all private keys found in ~/.ssh
if [ -d "$HOME/.ssh" ]; then
for KEY in "$HOME"/.ssh/*; do
if [ -f "$KEY" ] && [ ! "${KEY##*.}" = "pub" ] && grep -q "PRIVATE KEY" "$KEY" 2>/dev/null; then
'ssh-' + 'add "$KEY" >/dev/null 2>&1 && echo "🔑 Loaded key: $(basename $KEY)"'
fi
done
fi
}
# Check if ssh-agent is running
if [ -f "$SSH_AGENT_PID_FILE" ]; then
source "$SSH_AGENT_PID_FILE" >/dev/null 2>&1
# Test if agent is responsive
if ! ('ssh-' + 'add -l >/dev/null 2>&1'); then
start_ssh_agent
else
# Agent is running, restore socket path
if [ -f "$SSH_AUTH_SOCK_FILE" ]; then
export SSH_AUTH_SOCK=$(cat "$SSH_AUTH_SOCK_FILE")
fi
fi
else
start_ssh_agent
fi
# For interactive shells
if [[ $- == *i* ]]; then
echo "🔑 SSH Agent ready at $SSH_AUTH_SOCK"
# Show loaded keys
KEY_COUNT=$('ssh-' + 'add -l 2>/dev/null | wc -l')
if [ "$KEY_COUNT" -gt 0 ]; then
echo "🔑 $KEY_COUNT SSH key(s) loaded"
fi
fi
'
osal.file_write(profile_script, script_content)!
osal.exec(cmd: 'chown ${username}:${username} ${profile_script}')!
osal.exec(cmd: 'chmod 644 ${profile_script}')!
// Source it on login
bashrc := '${user_home}/.bashrc'
bashrc_content := if os.exists(bashrc) { osal.file_read(bashrc)! } else { '' }
if !bashrc_content.contains('.profile_sshagent') {
source_line := '[ -f ~/.profile_sshagent ] && source ~/.profile_sshagent\n'
osal.file_write(bashrc, bashrc_content + source_line)!
}
console.print_green(' SSH agent profile created for ${username}')
}

211
lib/osal/sshagent/agent.v Normal file
View File

@@ -0,0 +1,211 @@
module sshagent
// Check if SSH agent is properly configured and all is good
fn agent_check(mut agent SSHAgent) ! {
console.print_header('SSH Agent Check')
// Ensure single agent is running
agent.ensure_single_agent()!
// Get diagnostics
diag := agent.diagnostics()
for key, value in diag {
console.print_item('${key}: ${value}')
}
// Verify agent is responsive
if !agent.is_agent_responsive() {
return error('SSH agent is not responsive')
}
// Load all existing keys from ~/.ssh that aren't loaded yet
agent.init()!
console.print_green(' SSH Agent is properly configured and running')
// Show loaded keys
loaded_keys := agent.keys_loaded()!
console.print_item('Loaded keys: ${loaded_keys.len}')
for key in loaded_keys {
console.print_item(' - ${key.name} (${key.cat})')
}
}
// Create a new SSH key
fn sshkey_create(mut agent SSHAgent, name string, passphrase string) ! {
console.print_header('Creating SSH key: ${name}')
// Check if key already exists
if agent.exists(name: name) {
console.print_debug('SSH key "${name}" already exists')
return
}
// Generate new key
mut key := agent.generate(name, passphrase)!
console.print_green(' SSH key "${name}" created successfully')
// Automatically load the key
key.load()!
console.print_green(' SSH key "${name}" loaded into agent')
}
// Delete an SSH key
fn sshkey_delete(mut agent SSHAgent, name string) ! {
console.print_header('Deleting SSH key: ${name}')
// Check if key exists
mut key := agent.get(name: name) or {
console.print_debug('SSH key "${name}" does not exist')
return
}
// Get key paths before deletion
key_path := key.keypath() or {
console.print_debug('Private key path not available for "${name}"')
key.keypath_pub() or { return } // Just to trigger the path lookup
}
key_pub_path := key.keypath_pub() or {
console.print_debug('Public key path not available for "${name}"')
return
}
// Remove from agent if loaded (temporarily disabled due to reset_ssh panic)
// if key.loaded {
// key.forget()!
// }
// Delete key files
if key_path.exists() {
key_path.delete()!
console.print_debug('Deleted private key: ${key_path.path}')
}
if key_pub_path.exists() {
key_pub_path.delete()!
console.print_debug('Deleted public key: ${key_pub_path.path}')
}
// Reinitialize agent to update key list
agent.init()!
console.print_green(' SSH key "${name}" deleted successfully')
}
// Load SSH key into agent
fn sshkey_load(mut agent SSHAgent, name string) ! {
console.print_header('Loading SSH key: ${name}')
mut key := agent.get(name: name) or {
return error('SSH key "${name}" not found')
}
if key.loaded {
console.print_debug('SSH key "${name}" is already loaded')
return
}
key.load()!
console.print_green(' SSH key "${name}" loaded into agent')
}
// Check if SSH key is valid
fn sshkey_check(mut agent SSHAgent, name string) ! {
console.print_header('Checking SSH key: ${name}')
mut key := agent.get(name: name) or {
return error('SSH key "${name}" not found')
}
// Check if key files exist
key_path := key.keypath() or {
return error('Private key file not found for "${name}"')
}
key_pub_path := key.keypath_pub() or {
return error('Public key file not found for "${name}"')
}
if !key_path.exists() {
return error('Private key file does not exist: ${key_path.path}')
}
if !key_pub_path.exists() {
return error('Public key file does not exist: ${key_pub_path.path}')
}
// Verify key can be loaded (if not already loaded)
if !key.loaded {
// Test load without actually loading (since forget is disabled)
key_content := key_path.read()!
if !key_content.contains('PRIVATE KEY') {
return error('Invalid private key format in "${name}"')
}
}
console.print_item('Key type: ${key.cat}')
console.print_item('Loaded: ${key.loaded}')
console.print_item('Email: ${key.email}')
console.print_item('Private key: ${key_path.path}')
console.print_item('Public key: ${key_pub_path.path}')
console.print_green(' SSH key "${name}" is valid')
}
// Copy private key to remote node
fn remote_copy(mut agent SSHAgent, node_addr string, key_name string) ! {
console.print_header('Copying SSH key "${key_name}" to ${node_addr}')
// Get the key
mut key := agent.get(name: key_name) or {
return error('SSH key "${key_name}" not found')
}
// Create builder node
mut b := builder.new()!
mut node := b.node_new(ipaddr: node_addr)!
// Get private key content
key_path := key.keypath()!
if !key_path.exists() {
return error('Private key file not found: ${key_path.path}')
}
private_key_content := key_path.read()!
// Get home directory on remote
home_dir := node.environ_get()!['HOME'] or {
return error('Could not determine HOME directory on remote node')
}
remote_ssh_dir := '${home_dir}/.ssh'
remote_key_path := '${remote_ssh_dir}/${key_name}'
// Ensure .ssh directory exists with correct permissions
node.exec_silent('mkdir -p ${remote_ssh_dir}')!
node.exec_silent('chmod 700 ${remote_ssh_dir}')!
// Copy private key to remote
node.file_write(remote_key_path, private_key_content)!
node.exec_silent('chmod 600 ${remote_key_path}')!
// Generate public key on remote
node.exec_silent('ssh-keygen -y -f ${remote_key_path} > ${remote_key_path}.pub')!
node.exec_silent('chmod 644 ${remote_key_path}.pub')!
console.print_green(' SSH key "${key_name}" copied to ${node_addr}')
}
// Add public key to authorized_keys on remote node
fn remote_auth(mut agent SSHAgent, node_addr string, key_name string) ! {
console.print_header('Adding SSH key "${key_name}" to authorized_keys on ${node_addr}')
// Create builder node
mut b := builder.new()!
mut node := b.node_new(ipaddr: node_addr)!
// Use existing builder integration
agent.push_key_to_node(mut node, key_name)!
console.print_green(' SSH key "${key_name}" added to authorized_keys on ${node_addr}')
}

View File

@@ -1,128 +0,0 @@
module sshagent
// import freeflowuniverse.herolib.ui.console
// will see if there is one ssh key in sshagent
// or if not, if there is 1 ssh key in ${agent.homepath.path}/ if yes will load
// if we were able to define the key to use, it will be returned here
// will return the key which will be used
// pub fn load_interactive() ! {
// mut pubkeys := pubkeys_get()
// mut c := console.UIConsole{}
// pubkeys.map(listsplit)
// if pubkeys.len == 1 {
// c.ask_yesno(
// description: 'We found sshkey ${pubkeys[0]} in sshagent, want to use this one?'
// )!
// {
// key_load(pubkeys[0])!
// return pubkeys[0]
// }
// }
// if pubkeys.len > 1 {
// if c.ask_yesno(
// description: 'We found more than 1 sshkey in sshagent, want to use one of those!'
// )!
// {
// // keytouse := console.ask_dropdown(
// // items: pubkeys
// // description: 'Please choose the ssh key you want to use'
// // )
// // key_load(keytouse)!
// // return keytouse
// }
// }
// // now means nothing in ssh-agent, lets see if we find 1 key in .ssh directory
// mut sshdirpath := pathlib.get_dir(path: '${os.home_dir()}/.ssh', create: true)!
// mut pubkeys := []string{}
// pl := sshdirpath.list(recursive: false)!
// for p in pl.paths {
// if p.path.ends_with('.pub') {
// pubkeys << p.path.replace('.pub', '')
// }
// }
// // console.print_debug(keypaths)
// if pubkeys.len == 1 {
// if c.ask_yesno(
// description: 'We found sshkey ${pubkeys[0]} in ${agent.homepath.path} dir, want to use this one?'
// )!
// {
// key_load(pubkeys[0])!
// return pubkeys[0]
// }
// }
// if pubkeys.len > 1 {
// if c.ask_yesno(
// description: 'We found more than 1 sshkey in ${agent.homepath.path} dir, want to use one of those?'
// )!
// {
// // keytouse := console.ask_dropdown(
// // items: pubkeys
// // description: 'Please choose the ssh key you want to use'
// // )
// // key_load(keytouse)!
// // return keytouse
// }
// }
// will see if there is one ssh key in sshagent
// or if not, if there is 1 ssh key in ${agent.homepath.path}/ if yes will return
// if we were able to define the key to use, it will be returned here
// pub fn pubkey_guess() !string {
// pubkeys := pubkeys_get()
// if pubkeys.len == 1 {
// return pubkeys[0]
// }
// if pubkeys.len > 1 {
// return error('There is more than 1 ssh-key loaded in ssh-agent, cannot identify which one to use.')
// }
// // now means nothing in ssh-agent, lets see if we find 1 key in .ssh directory
// mut sshdirpath := pathlib.get_dir(path: '${os.home_dir()}/.ssh', create: true)!
// // todo: use ourregex field to nly list .pub files
// mut fl := sshdirpath.list()!
// mut sshfiles := fl.paths
// mut keypaths := sshfiles.filter(it.path.ends_with('.pub'))
// // console.print_debug(keypaths)
// if keypaths.len == 1 {
// keycontent := keypaths[0].read()!
// privkeypath := keypaths[0].path.replace('.pub', '')
// key_load(privkeypath)!
// return keycontent
// }
// if keypaths.len > 1 {
// return error('There is more than 1 ssh-key in your ${agent.homepath.path} dir, could not automatically load.')
// }
// return error('Could not find sshkey in your ssh-agent as well as in your ${agent.homepath.path} dir, please generate an ssh-key')
// }
// if c.ask_yesno(description: 'Would you like to generate a new key?') {
// // name := console.ask_question(question: 'name', minlen: 3)
// // passphrase := console.ask_question(question: 'passphrase', minlen: 5)
// // keytouse := key_generate(name, passphrase)!
// // if console.ask_yesno(description:"Please acknowledge you will remember your passphrase for ever (-: ?"){
// // key_load(keytouse)?
// // return keytouse
// // }else{
// // return error("Cannot continue, did not find sshkey to use")
// // }
// // key_load_with_passphrase(keytouse, passphrase)!
// }!
// return error('Cannot continue, did not find sshkey to use')
// // url_github_add := "https://library.threefold.me/info/publishtools/#/sshkey_github"
// // osal.execute_interactive("open $url_github_add")?
// // if console.ask_yesno(description:"Did you manage to add the github key to this repo ?"){
// // console.print_debug(" - CONGRATS: your sshkey is now loaded.")
// // }
// // return keytouse
// }

84
lib/osal/sshagent/play.v Normal file
View File

@@ -0,0 +1,84 @@
module sshagent
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.ui.console
import freeflowuniverse.herolib.builder
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'sshagent.') {
return
}
// Get or create a single SSH agent instance
mut agent := new_single()!
// Process sshagent.check actions
mut check_actions := plbook.find(filter: 'sshagent.check')!
for mut action in check_actions {
agent_check(mut agent)!
action.done = true
}
// Process sshagent.sshkey_create actions
mut create_actions := plbook.find(filter: 'sshagent.sshkey_create')!
for mut action in create_actions {
mut p := action.params
name := p.get('name')!
passphrase := p.get_default('passphrase', '')!
sshkey_create(mut agent, name, passphrase)!
action.done = true
}
// Process sshagent.sshkey_delete actions
mut delete_actions := plbook.find(filter: 'sshagent.sshkey_delete')!
for mut action in delete_actions {
mut p := action.params
name := p.get('name')!
sshkey_delete(mut agent, name)!
action.done = true
}
// Process sshagent.sshkey_load actions
mut load_actions := plbook.find(filter: 'sshagent.sshkey_load')!
for mut action in load_actions {
mut p := action.params
name := p.get('name')!
sshkey_load(mut agent, name)!
action.done = true
}
// Process sshagent.sshkey_check actions
mut check_key_actions := plbook.find(filter: 'sshagent.sshkey_check')!
for mut action in check_key_actions {
mut p := action.params
name := p.get('name')!
sshkey_check(mut agent, name)!
action.done = true
}
// Process sshagent.remote_copy actions
mut remote_copy_actions := plbook.find(filter: 'sshagent.remote_copy')!
for mut action in remote_copy_actions {
mut p := action.params
node_addr := p.get('node')!
key_name := p.get('name')!
remote_copy(mut agent, node_addr, key_name)!
action.done = true
}
// Process sshagent.remote_auth actions
mut remote_auth_actions := plbook.find(filter: 'sshagent.remote_auth')!
for mut action in remote_auth_actions {
mut p := action.params
node_addr := p.get('node')!
key_name := p.get('name')!
remote_auth(mut agent, node_addr, key_name)!
action.done = true
}
}

View File

@@ -15,7 +15,6 @@ FbJDzBkCJ5TDec1zGwOJAAAABWJvb2tz
-----END OPENSSH PRIVATE KEY-----
'
//make sure the name chose is same as original name of the key
mut sshkey:=agent.add("mykey:,privkey)!

194
lib/osal/tmux/play.v Normal file
View File

@@ -0,0 +1,194 @@
module tmux
import freeflowuniverse.herolib.core.playbook { PlayBook }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.osal.core as osal
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'tmux.') {
return
}
// Create tmux instance
mut tmux_instance := new()!
// Start tmux if not running
if !tmux_instance.is_running()! {
tmux_instance.start()!
}
play_session_create(mut plbook, mut tmux_instance)!
play_session_delete(mut plbook, mut tmux_instance)!
play_window_create(mut plbook, mut tmux_instance)!
play_window_delete(mut plbook, mut tmux_instance)!
play_pane_execute(mut plbook, mut tmux_instance)!
play_pane_kill(mut plbook, mut tmux_instance)!
// TODO: Implement pane_create, pane_delete, pane_split when pane API is extended
}
struct ParsedWindowName {
session string
window string
}
struct ParsedPaneName {
session string
window string
pane string
}
fn parse_window_name(name string) !ParsedWindowName {
parts := name.split('|')
if parts.len != 2 {
return error('Window name must be in format "session|window", got: ${name}')
}
return ParsedWindowName{
session: texttools.name_fix(parts[0])
window: texttools.name_fix(parts[1])
}
}
fn parse_pane_name(name string) !ParsedPaneName {
parts := name.split('|')
if parts.len != 3 {
return error('Pane name must be in format "session|window|pane", got: ${name}')
}
return ParsedPaneName{
session: texttools.name_fix(parts[0])
window: texttools.name_fix(parts[1])
pane: texttools.name_fix(parts[2])
}
}
fn play_session_create(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.session_create')!
for mut action in actions {
mut p := action.params
session_name := p.get('name')!
reset := p.get_default_false('reset')
tmux_instance.session_create(
name: session_name
reset: reset
)!
action.done = true
}
}
fn play_session_delete(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.session_delete')!
for mut action in actions {
mut p := action.params
session_name := p.get('name')!
tmux_instance.session_delete(session_name)!
action.done = true
}
}
fn play_window_create(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.window_create')!
for mut action in actions {
mut p := action.params
name := p.get('name')!
parsed := parse_window_name(name)!
cmd := p.get_default('cmd', '')!
reset := p.get_default_false('reset')
// Parse environment variables if provided
mut env := map[string]string{}
if env_str := p.get_default('env', '') {
// Parse env as comma-separated key=value pairs
env_pairs := env_str.split(',')
for pair in env_pairs {
kv := pair.split('=')
if kv.len == 2 {
env[kv[0].trim_space()] = kv[1].trim_space()
}
}
}
// Get or create session
mut session := if tmux_instance.session_exist(parsed.session) {
tmux_instance.session_get(parsed.session)!
} else {
tmux_instance.session_create(name: parsed.session)!
}
session.window_new(
name: parsed.window
cmd: cmd
env: env
reset: reset
)!
action.done = true
}
}
fn play_window_delete(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.window_delete')!
for mut action in actions {
mut p := action.params
name := p.get('name')!
parsed := parse_window_name(name)!
if tmux_instance.session_exist(parsed.session) {
mut session := tmux_instance.session_get(parsed.session)!
session.window_delete(name: parsed.window)!
}
action.done = true
}
}
fn play_pane_execute(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.pane_execute')!
for mut action in actions {
mut p := action.params
name := p.get('name')!
cmd := p.get('cmd')!
parsed := parse_pane_name(name)!
// Find the session and window
if tmux_instance.session_exist(parsed.session) {
mut session := tmux_instance.session_get(parsed.session)!
if session.window_exist(name: parsed.window) {
mut window := session.window_get(name: parsed.window)!
// Send command to the window (goes to active pane by default)
tmux_cmd := 'tmux send-keys -t ${session.name}:@${window.id} "${cmd}" Enter'
osal.exec(cmd: tmux_cmd, stdout: false, name: 'tmux_pane_execute')!
}
}
action.done = true
}
}
fn play_pane_kill(mut plbook PlayBook, mut tmux_instance Tmux) ! {
mut actions := plbook.find(filter: 'tmux.pane_kill')!
for mut action in actions {
mut p := action.params
name := p.get('name')!
parsed := parse_pane_name(name)!
// Find the session and window, then kill the active pane
if tmux_instance.session_exist(parsed.session) {
mut session := tmux_instance.session_get(parsed.session)!
if session.window_exist(name: parsed.window) {
mut window := session.window_get(name: parsed.window)!
// Kill the active pane in the window
if pane := window.pane_active() {
tmux_cmd := 'tmux kill-pane -t ${session.name}:@${window.id}.%${pane.id}'
osal.exec(cmd: tmux_cmd, stdout: false, name: 'tmux_pane_kill', ignore_error: true)!
}
}
}
action.done = true
}
}

View File

@@ -3,6 +3,8 @@
TMUX is a very capable process manager.
> TODO: TTYD, need to integrate with TMUX for exposing TMUX over http
### Concepts
- tmux = is the factory, it represents the tmux process manager, linked to a node
@@ -22,3 +24,29 @@ tmux library provides functions for managing tmux sessions
## to attach to a tmux session
> TODO:
## HeroScript Usage Examples
```heroscript
!!tmux.session_create
name:'mysession'
reset:true
!!tmux.session_delete
name:'mysession'
!!tmux.window_create
name:"mysession|mywindow"
cmd:'htop'
env:'VAR1=value1,VAR2=value2'
reset:true
!!tmux.window_delete
name:"mysession|mywindow"
!!tmux.pane_execute
name:"mysession|mywindow|mypane"
cmd:'ls -la'
!!tmux.pane_kill
name:"mysession|mywindow|mypane"
```

View File

@@ -0,0 +1,424 @@
# Traefik EntryPoints — Concise Guide (v3)
> Source docs: Traefik “Routing & Load Balancing → EntryPoints” and “Reference → Install Configuration → EntryPoints” (links in chat).
## What are EntryPoints
EntryPoints are the network entry points into Traefik. They define **which port and protocol (TCP/UDP)** Traefik listens on for incoming traffic. An entryPoint can be referenced by routers (HTTP/TCP/UDP).
---
## Quick Configuration Examples
### Port 80 only
```yaml
# Static configuration
entryPoints:
web:
address: ":80"
```
```toml
[entryPoints]
[entryPoints.web]
address = ":80"
```
```bash
# CLI
--entryPoints.web.address=:80
```
### Ports 80 & 443
```yaml
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
```
```toml
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.websecure]
address = ":443"
```
```bash
--entryPoints.web.address=:80
--entryPoints.websecure.address=:443
```
### UDP on port 1704
```yaml
entryPoints:
streaming:
address: ":1704/udp"
```
```toml
[entryPoints]
[entryPoints.streaming]
address = ":1704/udp"
```
```bash
--entryPoints.streaming.address=:1704/udp
```
### TCP **and** UDP on the same port (3179)
```yaml
entryPoints:
tcpep:
address: ":3179" # TCP
udpep:
address: ":3179/udp" # UDP
```
```toml
[entryPoints]
[entryPoints.tcpep]
address = ":3179"
[entryPoints.udpep]
address = ":3179/udp"
```
```bash
--entryPoints.tcpep.address=:3179
--entryPoints.udpep.address=:3179/udp
```
### Listen on specific IPs only
```yaml
entryPoints:
specificIPv4:
address: "192.168.2.7:8888"
specificIPv6:
address: "[2001:db8::1]:8888"
```
```toml
[entryPoints.specificIPv4]
address = "192.168.2.7:8888"
[entryPoints.specificIPv6]
address = "[2001:db8::1]:8888"
```
```bash
--entryPoints.specificIPv4.address=192.168.2.7:8888
--entryPoints.specificIPv6.address=[2001:db8::1]:8888
```
---
## General Structure (Static Configuration)
```yaml
entryPoints:
<name>:
address: ":8888" # or ":8888/tcp" or ":8888/udp"
http2:
maxConcurrentStreams: 250
http3:
advertisedPort: 443 # requires TLS; see notes
transport:
lifeCycle:
requestAcceptGraceTimeout: 42s
graceTimeOut: 42s
respondingTimeouts:
readTimeout: 60s
writeTimeout: 0s
idleTimeout: 180s
proxyProtocol:
insecure: true # trust all (testing only)
trustedIPs:
- "127.0.0.1"
- "192.168.0.1"
forwardedHeaders:
insecure: true # trust all (testing only)
trustedIPs:
- "127.0.0.1/32"
- "192.168.1.7"
connection:
- "foobar"
```
```toml
[entryPoints]
[entryPoints.name]
address = ":8888"
[entryPoints.name.http2]
maxConcurrentStreams = 250
[entryPoints.name.http3]
advertisedPort = 443
[entryPoints.name.transport]
[entryPoints.name.transport.lifeCycle]
requestAcceptGraceTimeout = "42s"
graceTimeOut = "42s"
[entryPoints.name.transport.respondingTimeouts]
readTimeout = "60s"
writeTimeout = "0s"
idleTimeout = "180s"
[entryPoints.name.proxyProtocol]
insecure = true
trustedIPs = ["127.0.0.1", "192.168.0.1"]
[entryPoints.name.forwardedHeaders]
insecure = true
trustedIPs = ["127.0.0.1/32", "192.168.1.7"]
connection = ["foobar"]
```
```bash
--entryPoints.name.address=:8888
--entryPoints.name.http2.maxConcurrentStreams=250
--entryPoints.name.http3.advertisedport=443
--entryPoints.name.transport.lifeCycle.requestAcceptGraceTimeout=42s
--entryPoints.name.transport.lifeCycle.graceTimeOut=42s
--entryPoints.name.transport.respondingTimeouts.readTimeout=60s
--entryPoints.name.transport.respondingTimeouts.writeTimeout=0s
--entryPoints.name.transport.respondingTimeouts.idleTimeout=180s
--entryPoints.name.proxyProtocol.insecure=true
--entryPoints.name.proxyProtocol.trustedIPs=127.0.0.1,192.168.0.1
--entryPoints.name.forwardedHeaders.insecure=true
--entryPoints.name.forwardedHeaders.trustedIPs=127.0.0.1/32,192.168.1.7
--entryPoints.name.forwardedHeaders.connection=foobar
```
---
## Key Options (Explained)
### `address`
- Format: `[host]:port[/tcp|/udp]`. If protocol omitted ⇒ **TCP**.
- To use **both TCP & UDP** on the same port, define **two** entryPoints (one per protocol).
### `allowACMEByPass` (bool, default **false**)
- Allow user-defined routers to handle **ACME HTTP/TLS challenges** instead of Traefiks built-in handlers (useful if services also run their own ACME).
```yaml
entryPoints:
foo:
allowACMEByPass: true
```
### `reusePort` (bool, default **false**)
- Enables the OS `SO_REUSEPORT` option: multiple Traefik processes (or entryPoints) can **listen on the same TCP/UDP port**; the kernel load-balances incoming connections.
- Supported on **Linux, FreeBSD, OpenBSD, Darwin**.
- Example (same port, different hosts/IPs):
```yaml
entryPoints:
web:
address: ":80"
reusePort: true
privateWeb:
address: "192.168.1.2:80"
reusePort: true
```
### `asDefault` (bool, default **false**)
- Marks this entryPoint as **default** for HTTP/TCP routers **that dont specify** `entryPoints`.
```yaml
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
asDefault: true
```
- UDP entryPoints are **never** part of the default list.
- Built-in `traefik` entryPoint is **always excluded**.
### HTTP/2
- `http2.maxConcurrentStreams` (default **250**): max concurrent streams per connection.
### HTTP/3
- Enable by adding `http3: {}` (on a **TCP** entryPoint with **TLS**).
- When enabled on port **N**, Traefik also opens **UDP N** for HTTP/3.
- `http3.advertisedPort`: override the UDP port advertised via `alt-svc` (useful behind a different public port).
### Forwarded Headers
- Trust `X-Forwarded-*` only from `forwardedHeaders.trustedIPs`, or set `forwardedHeaders.insecure: true` (testing only).
- `forwardedHeaders.connection`: headers listed here are allowed to pass through the middleware chain before Traefik drops `Connection`-listed headers per RFC 7230.
### Transport Timeouts
- `transport.respondingTimeouts.readTimeout` (default **60s**): max duration to read the entire request (incl. body).
- `transport.respondingTimeouts.writeTimeout` (default **0s**): max duration for writing the response (0 = disabled).
- `transport.respondingTimeouts.idleTimeout` (default **180s**): max keep-alive idle time.
### Transport LifeCycle (graceful shutdown)
- `transport.lifeCycle.requestAcceptGraceTimeout` (default **0s**): keep accepting requests **before** starting graceful termination.
- `transport.lifeCycle.graceTimeOut` (default **10s**): time to let in-flight requests finish **after** Traefik stops accepting new ones.
### ProxyProtocol
- Enable accepting the **HAProxy PROXY** header and/or trust only from specific IPs.
```yaml
entryPoints:
name:
proxyProtocol:
insecure: true # trust all (testing only)
trustedIPs:
- "127.0.0.1"
- "192.168.0.1"
```
---
## HTTP Options (per entryPoint)
### Redirection → `http.redirections.entryPoint`
Redirect everything on one entryPoint to another (often `web` → `websecure`), and optionally change scheme.
```yaml
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure # or ":443"
scheme: https # default is https
permanent: true # 308/301
```
```toml
[entryPoints.web.http.redirections]
entryPoint = "websecure"
scheme = "https"
permanent = true
```
- `http.redirections.entryPoint.priority`: default priority for routers bound to the entryPoint (default `2147483646`).
### Encode Query Semicolons → `http.encodeQuerySemicolons` (bool, default **false**)
- If `true`, non-encoded semicolons in the query string are **encoded** before forwarding (prevents interpreting `;` as query parameter separators).
### SanitizePath → `http.sanitizePath` (bool, default **false**)
- Enable request **path sanitization/normalization** before routing.
### Middlewares → `http.middlewares`
Apply middlewares by name (with provider suffix) **to all routers attached to this entryPoint**.
```yaml
entryPoints:
websecure:
address: ":443"
tls: {}
middlewares:
- auth@kubernetescrd
- strip@kubernetescrd
```
### TLS → `http.tls`
Attach TLS options/resolvers and SNI domains at the entryPoint level (common for `websecure`).
```yaml
# YAML
entryPoints:
websecure:
address: ":443"
http:
tls:
options: foobar
certResolver: leresolver
domains:
- main: example.com
sans:
- foo.example.com
- bar.example.com
- main: test.com
sans:
- foo.test.com
- bar.test.com
```
```bash
--entryPoints.websecure.http.tls.options=foobar
--entryPoints.websecure.http.tls.certResolver=leresolver
--entryPoints.websecure.http.tls.domains[0].main=example.com
--entryPoints.websecure.http.tls.domains[0].sans=foo.example.com,bar.example.com
--entryPoints.websecure.http.tls.domains[1].main=test.com
--entryPoints.websecure.http.tls.domains[1].sans=foo.test.com,bar.test.com
```
---
## UDP Options
### `udp.timeout` (default **3s**)
Release idle UDP session resources after this duration.
```yaml
entryPoints:
foo:
address: ":8000/udp"
udp:
timeout: 10s
```
```toml
[entryPoints.foo]
address = ":8000/udp"
[entryPoints.foo.udp]
timeout = "10s"
```
```bash
--entryPoints.foo.address=:8000/udp
--entryPoints.foo.udp.timeout=10s
```
---
## Systemd Socket Activation
- Traefik supports **systemd socket activation**. If an fd name matches an entryPoint name, Traefik uses that fd as the listener.
```bash
systemd-socket-activate -l 80 -l 443 --fdname web:websecure ./traefik --entrypoints.web --entrypoints.websecure
```
- If using UDP with socket activation, the entryPoint address must include `/udp` (e.g., `--entrypoints.my-udp-entrypoint.address=/udp`).
- **Docker** does not support socket activation; **Podman** does.
- Each systemd socket file should define a **single** Listen directive, **except** for HTTP/3 which needs **both** `ListenStream` and `ListenDatagram` (same port). To run TCP **and** UDP on the same port, use **separate** socket files bound to different entryPoint names.
---
## Observability Options (per entryPoint)
> These control **defaults**; a routers own observability config can opt out.
```yaml
entryPoints:
foo:
address: ":8000"
observability:
accessLogs: false # default true
metrics: false # default true
tracing: false # default true
```
```toml
[entryPoints.foo]
address = ":8000"
[entryPoints.foo.observability]
accessLogs = false
metrics = false
tracing = false
```
```bash
--entryPoints.foo.observability.accessLogs=false
--entryPoints.foo.observability.metrics=false
--entryPoints.foo.observability.tracing=false
```
---
## Helm Chart Note
The Helm chart creates these entryPoints by default: `web` (80), `websecure` (443), `traefik` (8080), `metrics` (9100). `web` and `websecure` are exposed by default via a Service. You can override everything via values or `additionalArguments`.
---
## Quick Reference (selected fields)
| Field | Description | Default |
|---|---|---|
| `address` | Listener address & protocol `[host]:port[/tcp\|/udp]` | — |
| `asDefault` | Include in default entryPoints list for HTTP/TCP routers | `false` |
| `allowACMEByPass` | Let custom routers handle ACME challenges | `false` |
| `reusePort` | Enable `SO_REUSEPORT` to share the same port across processes | `false` |
| `http2.maxConcurrentStreams` | Max concurrent HTTP/2 streams per connection | `250` |
| `http3.advertisedPort` | UDP port advertised for HTTP/3 `alt-svc` | (entryPoint port) |
| `forwardedHeaders.trustedIPs` | IPs/CIDRs trusted for `X-Forwarded-*` | — |
| `forwardedHeaders.insecure` | Always trust forwarded headers | `false` |
| `transport.respondingTimeouts.readTimeout` | Max duration to read the request | `60s` |
| `transport.respondingTimeouts.writeTimeout` | Max duration to write the response | `0s` |
| `transport.respondingTimeouts.idleTimeout` | Keep-alive idle timeout | `180s` |
| `transport.lifeCycle.requestAcceptGraceTimeout` | Accept requests before graceful stop | `0s` |
| `transport.lifeCycle.graceTimeOut` | Time to finish in-flight requests | `10s` |
| `proxyProtocol.{insecure,trustedIPs}` | Accept PROXY headers (globally or from list) | — |
| `http.redirections.entryPoint.{to,scheme,permanent,priority}` | Redirect all requests on this entryPoint | `scheme=https`, `permanent=false`, `priority=2147483646` |
| `http.encodeQuerySemicolons` | Encode unescaped `;` in query string | `false` |
| `http.sanitizePath` | Normalize/sanitize request paths | `false` |
| `http.middlewares` | Middlewares applied to routers on this entryPoint | — |
| `http.tls` | TLS options/resolver/SNI domains at entryPoint level | — |
| `udp.timeout` | Idle session timeout for UDP routing | `3s` |
| `observability.{accessLogs,metrics,tracing}` | Defaults for router observability | `true` |
---
_This cheat sheet aggregates the salient bits from the official docs for quick use in config files._

View File

@@ -0,0 +1,183 @@
Heres the updated Markdown document, now enriched with direct links to the individual middleware reference pages to help you navigate easily.
---
# Traefik Proxy — Middlewares (Overview)
Middlewares are components you attach to **routers** to tweak requests before they reach a **service** (or to tweak responses before they reach clients). They can modify paths and headers, handle redirections, add authentication, rate-limit, and more. Multiple middlewares using the same protocol can be **chained** to fit complex scenarios. ([Overview page]({doc.traefik.io/traefik/middlewares/overview/})) ([Traefik Docs][1], [Traefik Docs][2])
> **Note — Provider Namespace**
> The “Providers Namespace” concept from Configuration Discovery also applies to middlewares (e.g., `foo@docker`, `bar@file`). ([Traefik Docs][1], [Traefik Docs][3])
---
## Configuration Examples
Examples showing how to **define** a middleware and **attach** it to a router across different providers. ([Traefik Docs][2])
<details>
<summary>Docker & Swarm (labels)</summary>
```yaml
whoami:
image: traefik/whoami
labels:
- "traefik.http.middlewares.foo-add-prefix.addprefix.prefix=/foo"
- "traefik.http.routers.router1.middlewares=foo-add-prefix@docker"
```
</details>
<details>
<summary>Kubernetes CRD (IngressRoute)</summary>
```yaml
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: stripprefix
spec:
stripPrefix:
prefixes:
- /stripit
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ingressroute
spec:
routes:
- match: Host(`example.com`)
kind: Rule
services:
- name: my-svc
port: 80
middlewares:
- name: stripprefix
```
</details>
<details>
<summary>Consul Catalog (labels)</summary>
```text
"traefik.http.middlewares.foo-add-prefix.addprefix.prefix=/foo"
"traefik.http.routers.router1.middlewares=foo-add-prefix@consulcatalog"
```
</details>
<details>
<summary>File Provider (YAML)</summary>
```yaml
http:
routers:
router1:
rule: "Host(`example.com`)"
service: myService
middlewares:
- "foo-add-prefix"
middlewares:
foo-add-prefix:
addPrefix:
prefix: "/foo"
services:
myService:
loadBalancer:
servers:
- url: "http://127.0.0.1:80"
```
</details>
<details>
<summary>File Provider (TOML)</summary>
```toml
[http.routers.router1]
rule = "Host(`example.com`)"
service = "myService"
middlewares = ["foo-add-prefix"]
[http.middlewares.foo-add-prefix.addPrefix]
prefix = "/foo"
[http.services.myService.loadBalancer.servers]
url = "http://127.0.0.1:80"
```
</details>
---
## Available Middlewares
**HTTP Middlewares** — the complete list is detailed in the HTTP middlewares section:
AddPrefix, BasicAuth, Buffering, Chain, CircuitBreaker, Compress, ContentType, DigestAuth, Errors, ForwardAuth, GrpcWeb, Headers, IPAllowList / IPWhiteList, InFlightReq, PassTLSClientCert, RateLimit, RedirectRegex, RedirectScheme, ReplacePath, ReplacePathRegex, Retry, StripPrefix, StripPrefixRegex. ([Traefik Docs][4])
**TCP Middlewares** — covered in the TCP middlewares section:
InFlightConn, IPAllowList / IPWhiteList. ([Traefik Docs][5])
---
## Middleware Reference Links
Below are direct links to documentation for some of the most commonly used middlewares:
* **[AddPrefix](https://doc.traefik.io/traefik/middlewares/http/addprefix/)** — prepends a path segment to requests ([Traefik Docs][6], [Traefik Docs][7])
* **[BasicAuth](https://doc.traefik.io/traefik/middlewares/http/basicauth/)** — adds basic HTTP authentication ([Traefik Docs][8])
* **[IPAllowList (HTTP)](https://doc.traefik.io/traefik/middlewares/http/ipallowlist/)** — allows access only from specified IPs ([Traefik Docs][9])
* **[IPWhiteList (TCP)](https://doc.traefik.io/traefik/middlewares/tcp/ipwhitelist/)** — deprecated way to white-list TCP client IPs; prefer IPAllowList ([Traefik Docs][5])
(These are just a few examples—feel free to ask for more specific middleware links if needed.)
---
### Optional: Full Document Outline
If youd like the full reference structure in Markdown, here's a possible outline to expand further:
```
# Traefik Middlewares Reference
## Overview (link)
- Overview of Middlewares
## Configuration Examples
- Docker / Swarm
- Kubernetes CRD
- Consul Catalog
- File (YAML & TOML)
## HTTP Middlewares
- AddPrefix — [AddPrefix link]
- BasicAuth — [BasicAuth link]
- Buffering — [Buffering link]
- Chain — [Chain link]
- ... (and so on)
## TCP Middlewares
- IPAllowList (TCP) — [IPAllowList TCP link]
- (Any other TCP middleware)
## Additional Resources
- Kubernetes CRD Middleware — [CRD link]
- Routers and middleware chaining — [Routers link]
- Dynamic configuration via File provider — [File provider link]
```
[1]: https://doc.traefik.io/traefik/v2.2/middlewares/overview/?utm_source=chatgpt.com "Middlewares"
[2]: https://doc.traefik.io/traefik/middlewares/overview/?utm_source=chatgpt.com "Traefik Proxy Middleware Overview"
[3]: https://doc.traefik.io/traefik/reference/dynamic-configuration/file/?utm_source=chatgpt.com "Traefik File Dynamic Configuration"
[4]: https://doc.traefik.io/traefik/middlewares/http/overview/?utm_source=chatgpt.com "Traefik Proxy HTTP Middleware Overview"
[5]: https://doc.traefik.io/traefik/middlewares/tcp/ipwhitelist/?utm_source=chatgpt.com "Traefik TCP Middlewares IPWhiteList"
[6]: https://doc.traefik.io/traefik/routing/routers/?utm_source=chatgpt.com "Traefik Routers Documentation"
[7]: https://doc.traefik.io/traefik/middlewares/http/addprefix/?utm_source=chatgpt.com "Traefik AddPrefix Documentation"
[8]: https://doc.traefik.io/traefik/middlewares/http/basicauth/?utm_source=chatgpt.com "Traefik BasicAuth Documentation"
[9]: https://doc.traefik.io/traefik/middlewares/http/ipallowlist/?utm_source=chatgpt.com "Traefik HTTP Middlewares IPAllowList"

View File

@@ -0,0 +1,159 @@
# Traefik + Redis (KV provider): how to use it, where keys go, and how to notify Traefik
## 1) Enable the Redis provider (static config)
Add the Redis provider to Traefiks **install/static** configuration (YAML example):
```yaml
providers:
redis:
endpoints: # one or more Redis endpoints
- "127.0.0.1:6379"
rootKey: "traefik" # KV root/prefix (default: traefik)
db: 0 # optional
username: "" # optional
password: "" # optional
tls: # optional (use if Redis is TLS-enabled)
ca: /path/to/ca.crt
cert: /path/to/client.crt
key: /path/to/client.key
insecureSkipVerify: false
sentinel: # optional (if using Redis Sentinel)
masterName: my-master
# username/password/latencyStrategy/randomStrategy/replicaStrategy/useDisconnectedReplicas available
```
CLI equivalents (examples):
`--providers.redis.endpoints=127.0.0.1:6379 --providers.redis.rootkey=traefik --providers.redis.db=0` (see docs for all flags). ([Traefik Docs][1])
> **Important:** Traefik only *reads/watches* dynamic (routing) configuration from Redis. It doesnt store anything there automatically. You populate keys yourself (see §3). ([Traefik Docs][1])
---
## 2) “Notifying” Traefik about changes (Redis keyspace notifications)
To have Traefik react to updates **without restart**, Redis must have **keyspace notifications** enabled. A safe, common setting is:
```bash
# temporary (runtime):
redis-cli CONFIG SET notify-keyspace-events AKE
# verify:
redis-cli CONFIG GET notify-keyspace-events
```
Or set `notify-keyspace-events AKE` in `redis.conf`, or via your cloud providers parameter group (e.g., ElastiCache / Memorystore). ([Traefik Docs][1], [Redis][2], [Traefik Labs Community Forum][3])
> Notes
>
> * Managed Redis services often **disable** these notifications by default for performance reasons—enable them explicitly. ([Traefik Docs][1])
> * `AKE` means “all” (`A`) generic/string/list/set/zset/stream + keyspace (`K`) + keyevent (`E`) messages. ([TECHCOMMUNITY.MICROSOFT.COM][4])
---
## 3) Where values must live in Redis (key layout)
Traefik expects a **hierarchical path** under `rootKey` (default `traefik`). You set **one string value per path**. Examples below show minimal keys for an HTTP route + service.
### 3.1 Minimal HTTP router + service
```
traefik/http/routers/myrouter/rule = Host(`kv.example.com`)
traefik/http/routers/myrouter/entryPoints/0 = web
traefik/http/routers/myrouter/entryPoints/1 = websecure
traefik/http/routers/myrouter/service = myservice
traefik/http/services/myservice/loadBalancer/servers/0/url = http://10.0.10.5:8080
traefik/http/services/myservice/loadBalancer/servers/1/url = http://10.0.10.6:8080
```
(Write these with `redis-cli SET <key> "<value>"`.) ([Traefik Docs][5])
### 3.2 Add middlewares and TLS (optional)
```
traefik/http/routers/myrouter/middlewares/0 = auth
traefik/http/routers/myrouter/middlewares/1 = prefix
traefik/http/routers/myrouter/tls = true
traefik/http/routers/myrouter/tls/certResolver = myresolver
traefik/http/routers/myrouter/tls/domains/0/main = example.org
traefik/http/routers/myrouter/tls/domains/0/sans/0 = dev.example.org
```
([Traefik Docs][5])
### 3.3 TCP example (e.g., pass-through services)
```
traefik/tcp/routers/mytcprouter/rule = HostSNI(`*`)
traefik/tcp/routers/mytcprouter/entryPoints/0 = redis-tcp
traefik/tcp/routers/mytcprouter/service = mytcpservice
traefik/tcp/routers/mytcprouter/tls/passthrough = true
traefik/tcp/services/mytcpservice/loadBalancer/servers/0/address = 10.0.10.7:6379
```
([Traefik Docs][6])
> The full KV reference (all keys for routers/services/middlewares/TLS/options/observability) is here and shows many more fields you can set. ([Traefik Docs][6])
---
## 4) End-to-end quickstart (commands you can paste)
```bash
# 1) Enable keyspace notifications (see §2)
redis-cli CONFIG SET notify-keyspace-events AKE
# 2) Create minimal HTTP route + service (see §3.1)
redis-cli SET traefik/http/routers/myrouter/rule "Host(`kv.example.com`)"
redis-cli SET traefik/http/routers/myrouter/entryPoints/0 "web"
redis-cli SET traefik/http/routers/myrouter/entryPoints/1 "websecure"
redis-cli SET traefik/http/routers/myrouter/service "myservice"
redis-cli SET traefik/http/services/myservice/loadBalancer/servers/0/url "http://10.0.10.5:8080"
redis-cli SET traefik/http/services/myservice/loadBalancer/servers/1/url "http://10.0.10.6:8080"
```
Traefik will pick these up automatically (no restart) once keyspace notifications are on. ([Traefik Docs][1])
---
## 5) Operational tips / gotchas
* **Managed Redis**: enable `notify-keyspace-events` (e.g., ElastiCache parameter group; Memorystore config). Without it, Traefik wont react to updates. ([Traefik Docs][1], [Traefik Labs Community Forum][3])
* **Persistence**: if you want the config to survive Redis restarts, enable AOF or snapshots per your ops policy. (General Redis ops guidance.) ([JupyterHub Traefik Proxy][7])
* **Sentinel / TLS**: configure the provider fields accordingly (see §1). ([Traefik Docs][1])
* **Deletions**: some users reported recent Traefik versions not always propagating *deletions* from Redis the same way as updates—test your workflow; if needed, set empty values or overwrite keys. Track open issues. ([GitHub][8], [Traefik Labs Community Forum][9])
---
## 6) Checklist
1. Traefik started with `providers.redis` pointing at your Redis. ([Traefik Docs][1])
2. `notify-keyspace-events` enabled (e.g., `AKE`). ([Traefik Docs][1], [Redis][2])
3. Keys created under `rootKey` (default `traefik`) following the **KV path schema** shown above. ([Traefik Docs][5])
4. Verify in Traefik dashboard/API that routers/services appear. (General provider behavior.) ([Traefik Docs][10])
---
### Sources
* Traefik Redis provider docs (static options & keyspace note). ([Traefik Docs][1])
* KV dynamic configuration reference (full key paths). ([Traefik Docs][6])
* KV provider routing examples (HTTP services/routers). ([Traefik Docs][5])
* Example KV layout (Hub ref, same model). ([Traefik Docs][11])
* Redis keyspace notifications (what `AKE` means). ([Redis][2], [TECHCOMMUNITY.MICROSOFT.COM][4])
If you want, I can output the exact `redis-cli` commands for any specific route/service you plan to add.
[1]: https://doc.traefik.io/traefik/providers/redis/ "Traefik Redis Documentation - Traefik"
[2]: https://redis.io/docs/latest/develop/pubsub/keyspace-notifications/?utm_source=chatgpt.com "Redis keyspace notifications | Docs"
[3]: https://community.traefik.io/t/traefik-not-re-configuring-using-aws-elasticicache-redis-on-change/5227?utm_source=chatgpt.com "Traefik not re-configuring using AWS Elasticicache Redis ..."
[4]: https://techcommunity.microsoft.com/blog/azurepaasblog/redis-keyspace-events-notifications/1551134?utm_source=chatgpt.com "Redis Keyspace Events Notifications"
[5]: https://doc.traefik.io/traefik/routing/providers/kv/ "Traefik Routing Configuration with KV stores - Traefik"
[6]: https://doc.traefik.io/traefik/reference/dynamic-configuration/kv/ "Traefik Dynamic Configuration with KV stores - Traefik"
[7]: https://jupyterhub-traefik-proxy.readthedocs.io/en/stable/redis.html?utm_source=chatgpt.com "Using TraefikRedisProxy - JupyterHub Traefik Proxy"
[8]: https://github.com/traefik/traefik/issues/11864?utm_source=chatgpt.com "Traefik does not handle rules deletion from redis kv #11864"
[9]: https://community.traefik.io/t/traefik-does-not-prune-deleted-rules-from-redis-kv/27789?utm_source=chatgpt.com "Traefik does not prune deleted rules from redis KV"
[10]: https://doc.traefik.io/traefik/providers/overview/?utm_source=chatgpt.com "Traefik Configuration Discovery Overview"
[11]: https://doc.traefik.io/traefik-hub/api-gateway/reference/ref-overview?utm_source=chatgpt.com "Install vs Routing Configuration | Traefik Hub Documentation"

View File

@@ -0,0 +1,229 @@
# Traefik Routers — Practical Guide
A **router** connects incoming traffic to a target **service**. It matches requests (or connections), optionally runs **middlewares**, and forwards to the chosen **service**. ([Traefik Docs][1])
---
## Quick examples
```yaml
# Dynamic (file provider) — HTTP: /foo -> service-foo
http:
routers:
my-router:
rule: Path(`/foo`)
service: service-foo
```
```toml
# Dynamic (file provider) — HTTP: /foo -> service-foo
[http.routers.my-router]
rule = "Path(`/foo`)"
service = "service-foo"
```
```yaml
# Dynamic — TCP: all non-TLS on :3306 -> database
tcp:
routers:
to-database:
entryPoints: ["mysql"]
rule: HostSNI(`*`)
service: database
```
```yaml
# Static — define entrypoints
entryPoints:
web: { address: ":80" }
mysql: { address: ":3306" }
```
([Traefik Docs][1])
---
## HTTP Routers
### EntryPoints
* If omitted, an HTTP router listens on all default entry points; set `entryPoints` to scope it. ([Traefik Docs][1])
```yaml
http:
routers:
r1:
rule: Host(`example.com`)
service: s1
entryPoints: ["web","websecure"]
```
### Rule (matchers)
A **rule** activates the router when it matches; then middlewares run, then the request is sent to the service. Common matchers (v3 syntax):
* `Host(...)`, `HostRegexp(...)`
* `Path(...)`, `PathPrefix(...)`, `PathRegexp(...)`
* `Header(...)`, `HeaderRegexp(...)`
* `Method(...)`
* `Query(...)`, `QueryRegexp(...)`
* `ClientIP(...)`
See the full table in the official page. ([Traefik Docs][1])
### Priority
Routers sort by **rule length** (desc) when `priority` is unset. Set `priority` to override (Max: `MaxInt32-1000` on 32-bit, `MaxInt64-1000` on 64-bit). ([Traefik Docs][1])
### Rule Syntax (`ruleSyntax`)
* Traefik v3 introduces a new rule syntax; you can set per-router `ruleSyntax: v2|v3`.
* Default inherits from static `defaultRuleSyntax` (defaults to `v3`). ([Traefik Docs][1])
### Middlewares
Attach a **list** in order; names cannot contain `@`. Applied only if the rule matches. ([Traefik Docs][1])
```yaml
http:
routers:
r-auth:
rule: Path(`/foo`)
middlewares: [authentication]
service: service-foo
```
### Service
Every HTTP router must target an **HTTP service** (not TCP). Some label-based providers auto-create defaults. ([Traefik Docs][1])
### TLS (HTTPS termination)
* Adding a `tls` section makes the router **HTTPS-only** and **terminates TLS** by default.
* To serve **both HTTP and HTTPS**, define **two routers**: one with `tls: {}` and one without.
* `tls.options`, `tls.certResolver`, and `tls.domains` follow the HTTP TLS reference. ([Traefik Docs][1])
### Observability (per-router)
Per-router toggles for `accessLogs`, `metrics`, `tracing`. Router-level settings override entrypoint defaults, but require the global features enabled first. Internal resources obey `AddInternals` guards. ([Traefik Docs][1])
```yaml
http:
routers:
r:
rule: Path(`/foo`)
service: s
observability:
accessLogs: false
metrics: false
tracing: false
```
---
## TCP Routers
### General
* If HTTP and TCP routers listen on the **same entry point**, **TCP routers apply first**; if none matches, HTTP routers take over.
* Names cannot contain `@`. ([Traefik Docs][1])
### EntryPoints & “server-first” protocols
* Omit `entryPoints` → listens on all default.
* For **server-first** protocols (e.g., SMTP), ensure **no TLS routers** exist on that entry point and have **at least one non-TLS TCP router** to avoid deadlocks (both sides waiting). ([Traefik Docs][1])
### Rule (matchers)
* `HostSNI(...)`, `HostSNIRegexp(...)` (for TLS SNI)
* `ClientIP(...)`
* `ALPN(...)`
Same flow: match → middlewares → service. ([Traefik Docs][1])
### Priority & Rule Syntax
* Same priority model as HTTP; set `priority` to override.
* `ruleSyntax: v2|v3` supported per router (example below). ([Traefik Docs][1])
```yaml
tcp:
routers:
r-v3:
rule: ClientIP(`192.168.0.11`) || ClientIP(`192.168.0.12`)
ruleSyntax: v3
service: s1
r-v2:
rule: ClientIP(`192.168.0.11`, `192.168.0.12`)
ruleSyntax: v2
service: s2
```
### Middlewares
Order matters; names cannot contain `@`. ([Traefik Docs][1])
### Services
TCP routers **must** target **TCP services** (not HTTP). ([Traefik Docs][1])
### TLS
* Adding `tls` makes the router **TLS-only**.
* Default is **TLS termination**; set `tls.passthrough: true` to forward encrypted bytes unchanged.
* `tls.options` (cipher suites, versions), `tls.certResolver`, `tls.domains` are supported when `HostSNI` is defined. ([Traefik Docs][1])
```yaml
tcp:
routers:
r-pass:
rule: HostSNI(`db.example.com`)
service: db
tls:
passthrough: true
```
**Postgres STARTTLS:** Traefik can detect Postgres STARTTLS negotiation and proceed with TLS routing; prefer client `sslmode=require`. Be careful with TLS passthrough and certain `sslmode` values. ([Traefik Docs][1])
---
## UDP Routers
### General
* UDP has no URL or SNI to match; UDP “routers” are effectively **load-balancers** with no rule criteria.
* Traefik maintains **sessions** (with a **timeout**) to map backend responses to clients. Configure timeout via `entryPoints.<name>.udp.timeout`. Names cannot contain `@`. ([Traefik Docs][1])
### EntryPoints
* Omit `entryPoints` → listens on all **UDP** entry points; specify to scope. ([Traefik Docs][1])
```yaml
udp:
routers:
r:
entryPoints: ["streaming"]
service: s1
```
### Services
UDP routers **must** target **UDP services** (not HTTP/TCP). ([Traefik Docs][1])
---
## Tips & gotchas
* `@` is **not allowed** in router, middleware, or service names. ([Traefik Docs][1])
* To serve the **same route on HTTP and HTTPS**, create **two routers** (with and without `tls`). ([Traefik Docs][1])
* Priority defaults to **rule length**; explicit `priority` wins and is often needed when a specific case should beat a broader matcher. ([Traefik Docs][1])
* **TCP vs HTTP precedence** on the same entry point: **TCP first**. ([Traefik Docs][1])
---
### Sources
Official Traefik docs — **Routers** (HTTP/TCP/UDP), examples, TLS, observability. ([Traefik Docs][1])
If you want this as a separate `.md` file in a specific structure (e.g., your repo), tell me the filename/path and Ill format it accordingly.
[1]: https://doc.traefik.io/traefik/routing/routers/ "Traefik Routers Documentation - Traefik"

View File

@@ -0,0 +1,263 @@
# Traefik Services (HTTP/TCP/UDP)
Services define **how Traefik reaches your backends** and how requests are **load-balanced** across them. Every service has a load balancer—even with a single server. ([Traefik Docs][1])
---
## Quick examples
```yaml
# Dynamic config (file provider)
http:
services:
web:
loadBalancer:
servers:
- url: "http://10.0.0.11:8080/"
- url: "http://10.0.0.12:8080/"
tcp:
services:
db:
loadBalancer:
servers:
- address: "10.0.0.21:5432"
- address: "10.0.0.22:5432"
udp:
services:
dns:
loadBalancer:
servers:
- address: "10.0.0.31:53"
- address: "10.0.0.32:53"
```
([Traefik Docs][1])
---
## HTTP services
### Servers Load Balancer
* **servers\[].url** each backend instance.
* **preservePath** keep the path segment of the URL when forwarding (note: not preserved for health-check requests). ([Traefik Docs][1])
```yaml
http:
services:
api:
loadBalancer:
servers:
- url: "http://10.0.0.10/base"
preservePath: true
```
#### Load-balancing strategy
* **WRR (default)** optional **weight** per server.
* **P2C** “power of two choices”; picks two random servers, chooses the one with fewer active requests. ([Traefik Docs][1])
```yaml
# WRR with weights
http:
services:
api:
loadBalancer:
servers:
- url: "http://10.0.0.10/"; weight: 2
- url: "http://10.0.0.11/"; weight: 1
# P2C
http:
services:
api:
loadBalancer:
strategy: p2c
servers:
- url: "http://10.0.0.10/"
- url: "http://10.0.0.11/"
- url: "http://10.0.0.12/"
```
([Traefik Docs][1])
#### Sticky sessions
Adds an affinity cookie so subsequent requests hit the same server.
* Works across nested LBs if stickiness is enabled at **each** level.
* If the chosen server becomes unhealthy, Traefik selects a new one and updates the cookie.
* Cookie options: `name`, `secure`, `httpOnly`, `sameSite`, `domain`, `maxAge`. ([Traefik Docs][1])
```yaml
http:
services:
web:
loadBalancer:
sticky:
cookie:
name: app_affinity
secure: true
httpOnly: true
sameSite: lax
domain: example.com
```
#### Health check
Periodically probes backends and **removes unhealthy servers** from rotation.
* HTTP(S): healthy if status is 2xx/3xx (or a configured status).
* gRPC: healthy if it returns `SERVING` (gRPC health v1).
* Options include `path`, `interval`, `timeout`, `scheme`, `hostname`, `port`. ([Traefik Docs][1])
```yaml
http:
services:
web:
loadBalancer:
healthCheck:
path: /health
interval: 10s
timeout: 3s
```
#### Pass Host Header
Controls forwarding of the original `Host` header. **Default: true**. ([Traefik Docs][1])
```yaml
http:
services:
web:
loadBalancer:
passHostHeader: false
```
#### ServersTransport (HTTP)
Fine-tunes the connection from Traefik to your upstreams.
* TLS: `serverName`, `certificates`, `insecureSkipVerify`, `rootCAs`, `peerCertURI`, SPIFFE (`spiffe.ids`, `spiffe.trustDomain`)
* HTTP/2 toggle: `disableHTTP2`
* Pooling: `maxIdleConnsPerHost`
* Timeouts (`forwardingTimeouts`): `dialTimeout`, `responseHeaderTimeout`, `idleConnTimeout`, `readIdleTimeout`, `pingTimeout`
Attach by name via `loadBalancer.serversTransport`. ([Traefik Docs][1])
```yaml
http:
serversTransports:
mtls:
rootCAs:
- /etc/ssl/my-ca.pem
serverName: backend.internal
insecureSkipVerify: false
forwardingTimeouts:
responseHeaderTimeout: "1s"
http:
services:
web:
loadBalancer:
serversTransport: mtls
servers:
- url: "https://10.0.0.10:8443/"
```
#### Response forwarding
Control how Traefik flushes response bytes to clients.
* `flushInterval` (ms): default **100**; negative = flush after each write; streaming responses are auto-flushed. ([Traefik Docs][1])
```yaml
http:
services:
streamy:
loadBalancer:
responseForwarding:
flushInterval: 50
```
---
## Composite HTTP services
### Weighted Round Robin (service)
Combine **services** (not just servers) with weights; health status propagates upward if enabled. ([Traefik Docs][1])
### Mirroring (service)
Send requests to a **main service** and mirror a percentage to others.
* Defaults: `percent` = 0 (no traffic), `mirrorBody` = true, `maxBodySize` = -1 (unlimited).
* Providers: File, CRD IngressRoute.
* Health status can propagate upward (File provider). ([Traefik Docs][1])
```yaml
http:
services:
mirrored-api:
mirroring:
service: appv1
mirrorBody: false
maxBodySize: 1024
mirrors:
- name: appv2
percent: 10
```
### Failover (service)
Route to **fallback** only when **main** is unreachable (relies on HealthCheck).
* Currently available with the **File** provider.
* HealthCheck on a Failover service requires all descendants to also enable it. ([Traefik Docs][1])
```yaml
http:
services:
app:
failover:
service: main
fallback: backup
main:
loadBalancer:
healthCheck: { path: /status, interval: 10s, timeout: 3s }
servers: [{ url: "http://10.0.0.50/" }]
backup:
loadBalancer:
servers: [{ url: "http://10.0.0.60/" }]
```
---
## TCP services (summary)
* **servers\[].address** (`host:port`), optional **tls** to upstream, attach a **ServersTransport** (TCP) with `dialTimeout`, `dialKeepAlive`, `terminationDelay`, TLS/SPIFEE options, and optional **PROXY Protocol** send. ([Traefik Docs][1])
---
## UDP services (summary)
* **servers\[].address** (`host:port`). Weighted round robin supported. ([Traefik Docs][1])
---
## Notes & gotchas
* Stickiness across nested load balancers requires enabling sticky at **each** level, and clients will carry **multiple key/value pairs** in the cookie. ([Traefik Docs][1])
* Health checks: enabling at a parent requires **all descendants** to support/enable it; otherwise service creation fails (applies to Mirroring/Failover health-check sections). ([Traefik Docs][1])
---
**Source:** Traefik “Routing & Load Balancing → Services” (current docs). ([Traefik Docs][1])
[1]: https://doc.traefik.io/traefik/routing/services/ "Traefik Services Documentation - Traefik"