Compare commits
53 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e2941cf2c0 | |||
| ea755113a8 | |||
| 1c199ed923 | |||
| 5dc5c95aa1 | |||
| a0cc698a39 | |||
| 70008e0ead | |||
| ec2b30c1e7 | |||
| 3de7fcb605 | |||
| d0b0698c10 | |||
| 2a2e6c121e | |||
| 24059dc9ee | |||
| bedf62977f | |||
| 5abb4c2f92 | |||
| 046c9a508b | |||
|
f6766547f9
|
|||
| 6aec17d079 | |||
| ab8ffdd507 | |||
| 422e119f88 | |||
| 204f3a49a1 | |||
|
d1198328f6
|
|||
| 7795610caf | |||
| 32046fc1ec | |||
| 2016b27707 | |||
| c038ad4431 | |||
| 159ccf59ac | |||
| 6a071ba5f0 | |||
| 3e90e3ab1e | |||
| 90dd0c1239 | |||
| 033ca2f20e | |||
| 8c32b88e24 | |||
| 1bca1f7da1 | |||
| 5e5958d22e | |||
| 45447275a7 | |||
| e03e5e0d05 | |||
| a655603343 | |||
| c291b218a4 | |||
| 3911a51ccf | |||
| 1fe1cf940d | |||
| f10ea1dc22 | |||
| ac87757b06 | |||
| fe081a4297 | |||
| cea8362fe6 | |||
| b772ec0a3d | |||
| 77ad016e99 | |||
| bf4bfeb3fc | |||
| e3ab931eb1 | |||
| 05e04c758b | |||
| 94011a3a03 | |||
| 025b9072b4 | |||
| 0fb8c1d44b | |||
| a8849b747c | |||
| 879d2b4d1c | |||
| ff4bb97599 |
Vendored
+9
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"ceph",
|
||||
"darvaza",
|
||||
"gofrs",
|
||||
"jpictl",
|
||||
"zerolog"
|
||||
]
|
||||
}
|
||||
+44
-6
@@ -1,21 +1,59 @@
|
||||
package main
|
||||
|
||||
import "git.jpi.io/amery/jpictl/pkg/zones"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"darvaza.org/core"
|
||||
|
||||
"git.jpi.io/amery/jpictl/pkg/cluster"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultConfigFile is read if -f/--config-file isn't specified.
|
||||
// If it doesn't exist, m/ will be scanned
|
||||
DefaultConfigFile = "cloud.yaml"
|
||||
)
|
||||
|
||||
// Config describes the repository
|
||||
type Config struct {
|
||||
Base string
|
||||
Domain string
|
||||
|
||||
ConfigFile string
|
||||
}
|
||||
|
||||
var cfg = &Config{
|
||||
Base: "./m",
|
||||
Domain: "m.jpi.cloud",
|
||||
Base: "m",
|
||||
Domain: "jpi.cloud",
|
||||
}
|
||||
|
||||
// LoadZones loads all zones and machines in the config directory
|
||||
func (cfg *Config) LoadZones(resolve bool) (*zones.Zones, error) {
|
||||
return zones.New(cfg.Base, cfg.Domain,
|
||||
zones.ResolvePublicAddresses(resolve),
|
||||
// or file
|
||||
func (cfg *Config) LoadZones(resolve bool) (*cluster.Cluster, error) {
|
||||
// try config file first
|
||||
zones, err := cluster.NewFromConfig(cfg.ConfigFile,
|
||||
cluster.ResolvePublicAddresses(resolve),
|
||||
cluster.WithLogger(log),
|
||||
)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
// file was good
|
||||
return zones, nil
|
||||
case !os.IsNotExist(err) || cfg.ConfigFile != DefaultConfigFile:
|
||||
// file was bad
|
||||
return nil, core.Wrapf(err, "NewFromConfig(%q)", cfg.ConfigFile)
|
||||
}
|
||||
|
||||
// default file doesn't exist. scan instead.
|
||||
return cluster.NewFromDirectory(cfg.Base, cfg.Domain,
|
||||
cluster.ResolvePublicAddresses(resolve),
|
||||
cluster.WithLogger(log),
|
||||
)
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfg.ConfigFile, "config-file", "f",
|
||||
DefaultConfigFile, "config file (JSON or YAML)")
|
||||
}
|
||||
|
||||
+6
-14
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/burntSushi/toml"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -20,8 +19,8 @@ type Encoder interface {
|
||||
type Encoding int
|
||||
|
||||
const (
|
||||
// TOMLEncoding represents TOML encoding
|
||||
TOMLEncoding Encoding = iota
|
||||
// UndefinedEncoding implies the default encoding
|
||||
UndefinedEncoding Encoding = iota
|
||||
// JSONEncoding represents JSON encoding
|
||||
JSONEncoding
|
||||
// YAMLEncoding represents YAML encoding
|
||||
@@ -42,18 +41,13 @@ func NewYAMLEncoder(w io.Writer) Encoder {
|
||||
return enc
|
||||
}
|
||||
|
||||
// NewTOMLEncoder returns a TOML [Encoder] to work on the given [io.Writer]
|
||||
func NewTOMLEncoder(w io.Writer) Encoder {
|
||||
enc := toml.NewEncoder(w)
|
||||
return enc
|
||||
}
|
||||
|
||||
const encoding = YAMLEncoding
|
||||
|
||||
// Command
|
||||
var dumpCmd = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: "generates a text representation of the config",
|
||||
Use: "dump",
|
||||
Short: "generates a text representation of the config",
|
||||
PreRun: setVerbosity,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
var buf bytes.Buffer
|
||||
var enc Encoder
|
||||
@@ -66,10 +60,8 @@ var dumpCmd = &cobra.Command{
|
||||
switch encoding {
|
||||
case JSONEncoding:
|
||||
enc = NewJSONEncoder(&buf)
|
||||
case YAMLEncoding:
|
||||
enc = NewYAMLEncoder(&buf)
|
||||
default:
|
||||
enc = NewTOMLEncoder(&buf)
|
||||
enc = NewYAMLEncoder(&buf)
|
||||
}
|
||||
|
||||
if err = enc.Encode(m); err != nil {
|
||||
|
||||
+8
-3
@@ -8,15 +8,20 @@ import (
|
||||
|
||||
// Command
|
||||
var envCmd = &cobra.Command{
|
||||
Use: "env",
|
||||
Short: "generates environment variables for shell scripts",
|
||||
Use: "env",
|
||||
Short: "generates environment variables for shell scripts",
|
||||
PreRun: setVerbosity,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
m, err := cfg.LoadZones(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = m.Env(*envExport).WriteTo(os.Stdout)
|
||||
env, err := m.Env(*envExport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = env.WriteTo(os.Stdout)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
+13
-11
@@ -7,8 +7,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.jpi.io/amery/jpictl/pkg/zones"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"git.jpi.io/amery/jpictl/pkg/cluster"
|
||||
)
|
||||
|
||||
// Command
|
||||
@@ -19,8 +20,9 @@ var gatewayCmd = &cobra.Command{
|
||||
|
||||
// gateway set
|
||||
var gatewaySetCmd = &cobra.Command{
|
||||
Use: "set",
|
||||
Short: "gateway set sets machines as gateways",
|
||||
Use: "set",
|
||||
Short: "gateway set sets machines as gateways",
|
||||
PreRun: setVerbosity,
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
m, err := cfg.LoadZones(false)
|
||||
if err != nil {
|
||||
@@ -37,9 +39,9 @@ var gatewaySetCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func gatewaySet(zi zones.ZoneIterator, gw string) error {
|
||||
func gatewaySet(zi cluster.ZoneIterator, gw string) error {
|
||||
var err error
|
||||
zi.ForEachZone(func(z *zones.Zone) bool {
|
||||
zi.ForEachZone(func(z *cluster.Zone) bool {
|
||||
for _, m := range z.Machines {
|
||||
if m.Name == gw {
|
||||
z.SetGateway(m.ID, true)
|
||||
@@ -73,9 +75,9 @@ var gatewayUnsetCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func gatewayUnset(zi zones.ZoneIterator, ngw string) error {
|
||||
func gatewayUnset(zi cluster.ZoneIterator, ngw string) error {
|
||||
var err error
|
||||
zi.ForEachZone(func(z *zones.Zone) bool {
|
||||
zi.ForEachZone(func(z *cluster.Zone) bool {
|
||||
for _, m := range z.Machines {
|
||||
if m.Name == ngw && m.IsGateway() {
|
||||
z.SetGateway(m.ID, false)
|
||||
@@ -114,10 +116,10 @@ var gatewayListCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func gatewayListAll(zi zones.ZoneIterator) error {
|
||||
func gatewayListAll(zi cluster.ZoneIterator) error {
|
||||
var b bytes.Buffer
|
||||
var err error
|
||||
zi.ForEachZone(func(z *zones.Zone) bool {
|
||||
zi.ForEachZone(func(z *cluster.Zone) bool {
|
||||
b.WriteString(z.Name + ":")
|
||||
var sIDs []string
|
||||
ids, num := z.GatewayIDs()
|
||||
@@ -136,10 +138,10 @@ func gatewayListAll(zi zones.ZoneIterator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func gatewayList(zi zones.ZoneIterator, m string) error {
|
||||
func gatewayList(zi cluster.ZoneIterator, m string) error {
|
||||
var b bytes.Buffer
|
||||
var err error
|
||||
zi.ForEachZone(func(z *zones.Zone) bool {
|
||||
zi.ForEachZone(func(z *cluster.Zone) bool {
|
||||
if z.Name == m {
|
||||
b.WriteString(z.Name + ":")
|
||||
ids, num := z.GatewayIDs()
|
||||
|
||||
@@ -3,14 +3,9 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"darvaza.org/sidecar/pkg/logger/zerolog"
|
||||
"darvaza.org/slog"
|
||||
)
|
||||
|
||||
var (
|
||||
log = zerolog.New(nil, slog.Debug)
|
||||
)
|
||||
|
||||
// fatal is a convenience wrapper for slog.Logger.Fatal().Print()
|
||||
func fatal(err error, msg string, args ...any) {
|
||||
l := log.Fatal()
|
||||
|
||||
+23
-4
@@ -2,6 +2,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"darvaza.org/sidecar/pkg/logger/zerolog"
|
||||
"darvaza.org/slog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -10,13 +12,30 @@ const (
|
||||
CmdName = "jpictl"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: CmdName,
|
||||
Short: "control tool for jpi.cloud",
|
||||
}
|
||||
var (
|
||||
log = zerolog.New(nil, slog.Error)
|
||||
verbosity int
|
||||
rootCmd = &cobra.Command{
|
||||
Use: CmdName,
|
||||
Short: "control tool for jpi.cloud",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fatal(err, "")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().CountVarP(&verbosity, "verbosity", "v",
|
||||
"increase the verbosity level to Warn, Info or Debug")
|
||||
}
|
||||
|
||||
func setVerbosity(_ *cobra.Command, _ []string) {
|
||||
desired := int8(slog.Error) + int8(verbosity)
|
||||
if desired > 6 {
|
||||
desired = 6
|
||||
}
|
||||
log = log.WithLevel(slog.LogLevel(desired))
|
||||
}
|
||||
|
||||
+3
-2
@@ -6,8 +6,9 @@ import (
|
||||
|
||||
// Command
|
||||
var writeCmd = &cobra.Command{
|
||||
Use: "write",
|
||||
Short: "rewrites all config files",
|
||||
Use: "write",
|
||||
Short: "rewrites all config files",
|
||||
PreRun: setVerbosity,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
m, err := cfg.LoadZones(false)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,11 +3,13 @@ module git.jpi.io/amery/jpictl
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
darvaza.org/core v0.9.5
|
||||
darvaza.org/resolver v0.5.2
|
||||
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf
|
||||
darvaza.org/slog v0.5.2
|
||||
github.com/burntSushi/toml v0.3.1
|
||||
asciigoat.org/ini v0.2.5
|
||||
darvaza.org/core v0.9.8
|
||||
darvaza.org/resolver v0.5.4
|
||||
darvaza.org/sidecar v0.0.2
|
||||
darvaza.org/slog v0.5.3
|
||||
darvaza.org/slog/handlers/discard v0.4.5
|
||||
github.com/gofrs/uuid/v5 v5.0.0
|
||||
github.com/hack-pad/hackpadfs v0.2.1
|
||||
github.com/mgechev/revive v1.3.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
@@ -17,8 +19,9 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
darvaza.org/slog/handlers/filter v0.4.4 // indirect
|
||||
darvaza.org/slog/handlers/zerolog v0.4.4 // indirect
|
||||
asciigoat.org/core v0.3.9 // indirect
|
||||
darvaza.org/slog/handlers/filter v0.4.5 // indirect
|
||||
darvaza.org/slog/handlers/zerolog v0.4.5 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/chavacava/garif v0.1.0 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
@@ -37,7 +40,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/tools v0.12.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
darvaza.org/core v0.9.5 h1:sS5pZFwicaxJIQixEiqkMr9GknVHYL+EbKDMkR/4jDM=
|
||||
darvaza.org/core v0.9.5/go.mod h1:O3tHBMlw+xB47uGh5CUx7dXAujBAMmD8BCRFPZmIw54=
|
||||
darvaza.org/resolver v0.5.2 h1:VjHhEr/MJBszeDb7tYlXQ9Bsyh4xrDR7Sd10WAmPD6k=
|
||||
darvaza.org/resolver v0.5.2/go.mod h1:fFvsVPEFeMzUIWlLG47Go/6uJYtRLb9R8HIgYg3uaxE=
|
||||
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf h1:ya5ZQicBb/GWll3rlqra8No7oJXks7y1m/cJGYBypv4=
|
||||
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf/go.mod h1:by+bPsMa7Rxc/ZYG1qBunrtKocv/DkrPBmyFlmq/j2Q=
|
||||
darvaza.org/slog v0.5.2 h1:8TG1WyHjOyh2vW6t3pjzZVaWzpko5MIIpeI7LWqHFvs=
|
||||
darvaza.org/slog v0.5.2/go.mod h1:HAkEpxTA/mkiLNUXJo5qsCh8EVCtA3evje8GAaCDWHI=
|
||||
darvaza.org/slog/handlers/filter v0.4.4 h1:b2e2T9fQzMdJ0ia+f6b7kw9/T9GFwhFCKob/2tqhGGU=
|
||||
darvaza.org/slog/handlers/filter v0.4.4/go.mod h1:cQlJWuolB6guLug09sX/8Zrzct++M6SPCGvXR37E7Cc=
|
||||
darvaza.org/slog/handlers/zerolog v0.4.4 h1:OR1ASvH1fBCq3t85t4OU6oJPPuqMB1tsDoSpsh6HVJU=
|
||||
darvaza.org/slog/handlers/zerolog v0.4.4/go.mod h1:t60TeEbFcMLo74CkXC2S0rKlnwF4ixZyBR4fqIJV1GE=
|
||||
asciigoat.org/core v0.3.9 h1:hgDDz4ecm3ZvehX++m8A/IzAt+B5oDPiRtxatzfUHPQ=
|
||||
asciigoat.org/core v0.3.9/go.mod h1:CAaHwyw8MpAq4a1MYtN2dxJrsK+hmIdW50OndaQZYPI=
|
||||
asciigoat.org/ini v0.2.5 h1:4gRIp9rU+XQt8+HMqZO5R7GavMv9Yl2+N+je6djDIAE=
|
||||
asciigoat.org/ini v0.2.5/go.mod h1:gmXzJ9XFqf1NLk5nQkj04USQ4tMtdRJHNQX6vp3DzjU=
|
||||
darvaza.org/core v0.9.8 h1:luLxgfUc2pzuusYPo/Z/dC/qr9XZPKpSQw8/kS7zNUM=
|
||||
darvaza.org/core v0.9.8/go.mod h1:Dbme64naxeshQfxcVJX9ZT7AiGyIY8kldfuELVtf8mw=
|
||||
darvaza.org/resolver v0.5.4 h1:dlSBNV14yYsp7Kg7ipwYOMNsLbrpeXa8Z0HBTa0Ryxs=
|
||||
darvaza.org/resolver v0.5.4/go.mod h1:vHMkQUmHjaetFqG2ZLZJiQHsXEMGoTOFGm+NXwfndhE=
|
||||
darvaza.org/sidecar v0.0.2 h1:4H8FUxc43kkLjxdShN1CoxLTcoHQsZjDVwm7kt6eIK0=
|
||||
darvaza.org/sidecar v0.0.2/go.mod h1:yFC3Qt3j+uS7n9CMpLxwrA68z+FNJhENoenBc9zBJJo=
|
||||
darvaza.org/slog v0.5.3 h1:sQzmZXgqRh9oFMKBwEYrEpucLvKJVZxaxa2bHIA6GJ0=
|
||||
darvaza.org/slog v0.5.3/go.mod h1:59d+yi+C7gn4pDDuwbbOKawERpdXthFFk1Yc+Sv6XB0=
|
||||
darvaza.org/slog/handlers/discard v0.4.5 h1:RRykOItNolHyiUav57lG/GFBL33rcljoa0nWTpY+T0g=
|
||||
darvaza.org/slog/handlers/discard v0.4.5/go.mod h1:HYHfISQjMqcPbPoPZ92ib/u7s9JcXvF6OaygpPFwdF8=
|
||||
darvaza.org/slog/handlers/filter v0.4.5 h1:CX1bMzldd67e3y3s3Sh4jK8Lyo0WMvTGBB2lD315jhc=
|
||||
darvaza.org/slog/handlers/filter v0.4.5/go.mod h1:OuH9rHYg9CIErTJCZliMnFexBfP/HJ9PZ1V1VwSCZ1g=
|
||||
darvaza.org/slog/handlers/zerolog v0.4.5 h1:W4cgGORx4wImr+RL96CWSQGTdkZzKX6YHXPSYJvdoB4=
|
||||
darvaza.org/slog/handlers/zerolog v0.4.5/go.mod h1:mCoh/mIl8Nsa6Yu1Um7d7cos6RuEJzgaTXaX5LDRUao=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/burntSushi/toml v0.3.1 h1:Hu1cOEC2qtKULZJCzym5tyA35bZr3HREuolgiAzMlhY=
|
||||
github.com/burntSushi/toml v0.3.1/go.mod h1:sGTquCpRYr9McuHdv0m6YKIhx8DJGJa4t04/Y9pfSio=
|
||||
github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
|
||||
github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
@@ -26,6 +30,8 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
|
||||
github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/hack-pad/hackpadfs v0.2.1 h1:FelFhIhv26gyjujoA/yeFO+6YGlqzmc9la/6iKMIxMw=
|
||||
github.com/hack-pad/hackpadfs v0.2.1/go.mod h1:khQBuCEwGXWakkmq8ZiFUvUZz84ZkJ2KNwKvChs4OrU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@@ -84,8 +90,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
// Package ceph deals with ceph config
|
||||
package ceph
|
||||
@@ -0,0 +1,67 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/gofrs/uuid/v5"
|
||||
|
||||
"asciigoat.org/ini/basic"
|
||||
)
|
||||
|
||||
// Config represents a ceph.conf file
|
||||
type Config struct {
|
||||
Global GlobalConfig `ini:"global"`
|
||||
}
|
||||
|
||||
// GlobalConfig represents the [global] section of a ceph.conf file
|
||||
type GlobalConfig struct {
|
||||
FSID uuid.UUID `ini:"fsid"`
|
||||
Monitors []string `ini:"mon_initial_members,comma"`
|
||||
MonitorsAddr []netip.Addr `ini:"mon_host,comma"`
|
||||
ClusterNetwork netip.Prefix `ini:"cluster_network"`
|
||||
}
|
||||
|
||||
// WriteTo writes a Wireguard [Config] onto the provided [io.Writer]
|
||||
func (cfg *Config) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
writeGlobalToBuffer(&buf, &cfg.Global)
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
||||
func writeGlobalToBuffer(w *bytes.Buffer, c *GlobalConfig) {
|
||||
_, _ = w.WriteString("[global]\n")
|
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "fsid", c.FSID.String())
|
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_initial_members", strings.Join(c.Monitors, ", "))
|
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_host", joinAddrs(c.MonitorsAddr, ", "))
|
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "cluster_network", c.ClusterNetwork.String())
|
||||
}
|
||||
|
||||
func joinAddrs(addrs []netip.Addr, sep string) string {
|
||||
s := make([]string, len(addrs))
|
||||
|
||||
for i, addr := range addrs {
|
||||
s[i] = addr.String()
|
||||
}
|
||||
|
||||
return strings.Join(s, sep)
|
||||
}
|
||||
|
||||
// NewConfigFromReader parses the ceph.conf file
|
||||
func NewConfigFromReader(r io.Reader) (*Config, error) {
|
||||
doc, err := basic.Decode(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg, err := newConfigFromDocument(doc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/netip"
|
||||
|
||||
"asciigoat.org/ini/basic"
|
||||
"asciigoat.org/ini/parser"
|
||||
|
||||
"darvaza.org/core"
|
||||
)
|
||||
|
||||
var sectionMap = map[string]func(*Config, *basic.Section) error{
|
||||
"global": loadGlobalConfSection,
|
||||
}
|
||||
|
||||
func loadConfSection(out *Config, src *basic.Section) error {
|
||||
h, ok := sectionMap[src.Key]
|
||||
if !ok {
|
||||
return core.Wrapf(fs.ErrInvalid, "unknown section %q", src.Key)
|
||||
}
|
||||
|
||||
return h(out, src)
|
||||
}
|
||||
|
||||
func loadGlobalConfSection(out *Config, src *basic.Section) error {
|
||||
var cfg GlobalConfig
|
||||
|
||||
for _, field := range src.Fields {
|
||||
if err := loadGlobalConfField(&cfg, field); err != nil {
|
||||
return core.Wrap(err, "global")
|
||||
}
|
||||
}
|
||||
|
||||
out.Global = cfg
|
||||
return nil
|
||||
}
|
||||
|
||||
// revive:disable:cyclomatic
|
||||
// revive:disable:cognitive-complexity
|
||||
|
||||
func loadGlobalConfField(cfg *GlobalConfig, field basic.Field) error {
|
||||
// revive:enable:cyclomatic
|
||||
// revive:enable:cognitive-complexity
|
||||
|
||||
// TODO: refactor when asciigoat's ini parser learns to do reflection
|
||||
|
||||
switch field.Key {
|
||||
case "fsid":
|
||||
if !core.IsZero(cfg.FSID) {
|
||||
return core.Wrapf(fs.ErrInvalid, "duplicate field %q", field.Key)
|
||||
}
|
||||
|
||||
err := cfg.FSID.UnmarshalText([]byte(field.Value))
|
||||
switch {
|
||||
case err != nil:
|
||||
return core.Wrap(err, field.Key)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
case "mon_host":
|
||||
entries, _ := parser.SplitCommaArray(field.Value)
|
||||
for _, s := range entries {
|
||||
var addr netip.Addr
|
||||
|
||||
if err := addr.UnmarshalText([]byte(s)); err != nil {
|
||||
return core.Wrap(err, field.Key)
|
||||
}
|
||||
|
||||
cfg.MonitorsAddr = append(cfg.MonitorsAddr, addr)
|
||||
}
|
||||
return nil
|
||||
case "mon_initial_members":
|
||||
entries, _ := parser.SplitCommaArray(field.Value)
|
||||
cfg.Monitors = append(cfg.Monitors, entries...)
|
||||
return nil
|
||||
case "cluster_network":
|
||||
if !core.IsZero(cfg.ClusterNetwork) {
|
||||
err := core.Wrap(fs.ErrInvalid, "fields before the first section")
|
||||
return err
|
||||
}
|
||||
|
||||
err := cfg.ClusterNetwork.UnmarshalText([]byte(field.Value))
|
||||
switch {
|
||||
case err != nil:
|
||||
return core.Wrap(err, field.Key)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newConfigFromDocument(doc *basic.Document) (*Config, error) {
|
||||
var out Config
|
||||
|
||||
if len(doc.Global) > 0 {
|
||||
err := core.Wrap(fs.ErrInvalid, "fields before the first section")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range doc.Sections {
|
||||
src := &doc.Sections[i]
|
||||
if err := loadConfSection(&out, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
@@ -0,0 +1,122 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/netip"
|
||||
"sort"
|
||||
|
||||
"darvaza.org/core"
|
||||
"github.com/gofrs/uuid/v5"
|
||||
|
||||
"git.jpi.io/amery/jpictl/pkg/ceph"
|
||||
)
|
||||
|
||||
// GetCephFSID returns our Ceph's FSID
|
||||
func (m *Cluster) GetCephFSID() (uuid.UUID, error) {
|
||||
if core.IsZero(m.CephFSID) {
|
||||
// generate one
|
||||
v, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
return uuid.Nil, err
|
||||
}
|
||||
m.CephFSID = v
|
||||
}
|
||||
return m.CephFSID, nil
|
||||
}
|
||||
|
||||
// GetCephConfig reads the ceph.conf file
|
||||
func (m *Cluster) GetCephConfig() (*ceph.Config, error) {
|
||||
data, err := m.ReadFile("ceph.conf")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
return ceph.NewConfigFromReader(r)
|
||||
}
|
||||
|
||||
// WriteCephConfig writes the ceph.conf file
|
||||
func (m *Cluster) WriteCephConfig(cfg *ceph.Config) error {
|
||||
f, err := m.CreateTruncFile("ceph.conf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = cfg.WriteTo(f)
|
||||
return err
|
||||
}
|
||||
|
||||
// GenCephConfig prepares a ceph.Config using the cluster information
|
||||
func (m *Cluster) GenCephConfig() (*ceph.Config, error) {
|
||||
fsid, err := m.GetCephFSID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &ceph.Config{
|
||||
Global: ceph.GlobalConfig{
|
||||
FSID: fsid,
|
||||
ClusterNetwork: netip.PrefixFrom(
|
||||
netip.AddrFrom4([4]byte{10, 0, 0, 0}),
|
||||
8,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
for _, p := range z.GetCephMonitors() {
|
||||
addr, _ := RingOneAddress(z.ID, p.ID)
|
||||
|
||||
cfg.Global.Monitors = append(cfg.Global.Monitors, p.Name)
|
||||
cfg.Global.MonitorsAddr = append(cfg.Global.MonitorsAddr, addr)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// GetCephMonitors returns the set of Ceph monitors on
|
||||
// the zone
|
||||
func (z *Zone) GetCephMonitors() Machines {
|
||||
var mons Machines
|
||||
var first, second *Machine
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
switch {
|
||||
case p.CephMonitor:
|
||||
// it is a monitor
|
||||
mons = append(mons, p)
|
||||
case len(mons) > 0:
|
||||
// zone has a monitor
|
||||
case first == nil && !p.IsGateway():
|
||||
// first option for monitor
|
||||
first = p
|
||||
case second == nil:
|
||||
// second option for monitor
|
||||
second = p
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
|
||||
switch {
|
||||
case len(mons) > 0:
|
||||
// ready
|
||||
case first != nil:
|
||||
// make first option our monitor
|
||||
first.CephMonitor = true
|
||||
mons = append(mons, first)
|
||||
case second != nil:
|
||||
// make second option our monitor
|
||||
second.CephMonitor = true
|
||||
mons = append(mons, second)
|
||||
default:
|
||||
// zone without machines??
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
sort.Sort(mons)
|
||||
return mons
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"darvaza.org/slog"
|
||||
"git.jpi.io/amery/jpictl/pkg/ceph"
|
||||
)
|
||||
|
||||
type cephScanTODO struct {
|
||||
names map[string]bool
|
||||
addrs map[string]bool
|
||||
}
|
||||
|
||||
func (todo *cephScanTODO) checkMachine(p *Machine) bool {
|
||||
// on ceph all addresses are ring1
|
||||
ring1, _ := RingOneAddress(p.Zone(), p.ID)
|
||||
addr := ring1.String()
|
||||
|
||||
if _, found := todo.names[p.Name]; found {
|
||||
// found on the TODO by name
|
||||
todo.names[p.Name] = true
|
||||
todo.addrs[addr] = true
|
||||
return true
|
||||
}
|
||||
|
||||
if _, found := todo.addrs[addr]; found {
|
||||
// found on the TODO by address
|
||||
todo.names[p.Name] = true
|
||||
todo.addrs[addr] = true
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (todo *cephScanTODO) LogMissing(log slog.Logger) {
|
||||
for name, found := range todo.names {
|
||||
if !found {
|
||||
log.Warn().
|
||||
WithField("subsystem", "ceph").
|
||||
WithField("monitor", name).
|
||||
Print("unknown monitor")
|
||||
}
|
||||
}
|
||||
|
||||
for addr, found := range todo.addrs {
|
||||
if !found {
|
||||
log.Warn().
|
||||
WithField("subsystem", "ceph").
|
||||
WithField("monitor", addr).
|
||||
Print("unknown monitor")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newCephScanTODO(cfg *ceph.Config) *cephScanTODO {
|
||||
todo := &cephScanTODO{
|
||||
names: make(map[string]bool),
|
||||
addrs: make(map[string]bool),
|
||||
}
|
||||
|
||||
for _, name := range cfg.Global.Monitors {
|
||||
todo.names[name] = false
|
||||
}
|
||||
|
||||
for _, addr := range cfg.Global.MonitorsAddr {
|
||||
todo.addrs[addr.String()] = false
|
||||
}
|
||||
|
||||
return todo
|
||||
}
|
||||
|
||||
func (m *Cluster) scanCephMonitors(_ *ScanOptions) error {
|
||||
cfg, err := m.GetCephConfig()
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
err = nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
// store FSID
|
||||
m.CephFSID = cfg.Global.FSID
|
||||
|
||||
// flag monitors based on config
|
||||
todo := newCephScanTODO(cfg)
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
p.CephMonitor = todo.checkMachine(p)
|
||||
return false
|
||||
})
|
||||
|
||||
todo.LogMissing(m.log)
|
||||
}
|
||||
|
||||
// make sure every zone has one
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
_ = z.GetCephMonitors()
|
||||
return false
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
// Package cluster contains information about the cluster
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
|
||||
"darvaza.org/resolver"
|
||||
"darvaza.org/slog"
|
||||
"github.com/gofrs/uuid/v5"
|
||||
)
|
||||
|
||||
var (
|
||||
_ MachineIterator = (*Cluster)(nil)
|
||||
_ ZoneIterator = (*Cluster)(nil)
|
||||
)
|
||||
|
||||
// revive:disable:line-length-limit
|
||||
|
||||
// Cluster represents all zones in a cluster
|
||||
type Cluster struct {
|
||||
dir fs.FS
|
||||
log slog.Logger
|
||||
resolver resolver.Resolver
|
||||
|
||||
BaseDir string `json:"dir,omitempty" yaml:"dir,omitempty"`
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
Domain string `json:"domain,omitempty" yaml:"domain,omitempty"`
|
||||
|
||||
CephFSID uuid.UUID `json:"ceph_fsid,omitempty" yaml:"ceph_fsid,omitempty"`
|
||||
Zones []*Zone `json:"zones,omitempty" yaml:"zones,omitempty"`
|
||||
}
|
||||
|
||||
// revive:enable:line-length-limit
|
||||
|
||||
// ForEachMachine calls a function for each Machine in the cluster
|
||||
// until instructed to terminate the loop
|
||||
func (m *Cluster) ForEachMachine(fn func(*Machine) bool) {
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
var term bool
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
term = fn(p)
|
||||
return term
|
||||
})
|
||||
|
||||
return term
|
||||
})
|
||||
}
|
||||
|
||||
// ForEachZone calls a function for each Zone in the cluster
|
||||
// until instructed to terminate the loop
|
||||
func (m *Cluster) ForEachZone(fn func(*Zone) bool) {
|
||||
for _, p := range m.Zones {
|
||||
if fn(p) {
|
||||
// terminate
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMachineByName looks for a machine with the specified
|
||||
// name on any zone
|
||||
func (m *Cluster) GetMachineByName(name string) (*Machine, bool) {
|
||||
var out *Machine
|
||||
|
||||
if name != "" {
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if p.Name == name {
|
||||
out = p
|
||||
}
|
||||
|
||||
return out != nil
|
||||
})
|
||||
}
|
||||
|
||||
return out, out != nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// OpenFile opens a file on the cluster's config directory with the specified flags
|
||||
func (m *Zones) OpenFile(name string, flags int, args ...any) (fs.File, error) {
|
||||
func (m *Cluster) OpenFile(name string, flags int, args ...any) (fs.File, error) {
|
||||
if len(args) > 0 {
|
||||
name = fmt.Sprintf(name, args...)
|
||||
}
|
||||
@@ -18,16 +18,16 @@ func (m *Zones) OpenFile(name string, flags int, args ...any) (fs.File, error) {
|
||||
}
|
||||
|
||||
// CreateTruncFile creates or truncates a file on the cluster's config directory
|
||||
func (m *Zones) CreateTruncFile(name string, args ...any) (io.WriteCloser, error) {
|
||||
func (m *Cluster) CreateTruncFile(name string, args ...any) (io.WriteCloser, error) {
|
||||
return m.openWriter(name, os.O_CREATE|os.O_TRUNC, args...)
|
||||
}
|
||||
|
||||
// CreateFile creates a file on the cluster's config directory
|
||||
func (m *Zones) CreateFile(name string, args ...any) (io.WriteCloser, error) {
|
||||
func (m *Cluster) CreateFile(name string, args ...any) (io.WriteCloser, error) {
|
||||
return m.openWriter(name, os.O_CREATE, args...)
|
||||
}
|
||||
|
||||
func (m *Zones) openWriter(name string, flags int, args ...any) (io.WriteCloser, error) {
|
||||
func (m *Cluster) openWriter(name string, flags int, args ...any) (io.WriteCloser, error) {
|
||||
f, err := m.OpenFile(name, os.O_WRONLY|flags, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -37,7 +37,7 @@ func (m *Zones) openWriter(name string, flags int, args ...any) (io.WriteCloser,
|
||||
}
|
||||
|
||||
// ReadFile reads a file from the cluster's config directory
|
||||
func (m *Zones) ReadFile(name string, args ...any) ([]byte, error) {
|
||||
func (m *Cluster) ReadFile(name string, args ...any) ([]byte, error) {
|
||||
if len(args) > 0 {
|
||||
name = fmt.Sprintf(name, args...)
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hack-pad/hackpadfs/os"
|
||||
)
|
||||
|
||||
// DirFS returns a file system (an [fs.FS]) for the tree
|
||||
// of files rooted at the directory dir.
|
||||
func DirFS(dir string) (fs.FS, error) {
|
||||
dir = filepath.Clean(dir)
|
||||
fullPath, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sub, err := os.NewFS().Sub(fullPath[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
@@ -0,0 +1,137 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func (m *Cluster) init(opts *ScanOptions) error {
|
||||
for _, fn := range []func(*ScanOptions) error{
|
||||
m.initZones,
|
||||
m.scanZoneIDs,
|
||||
m.scanSort,
|
||||
m.scanGateways,
|
||||
} {
|
||||
if err := fn(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Cluster) initZones(opts *ScanOptions) error {
|
||||
var err error
|
||||
|
||||
sub, err := DirFS(m.BaseDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.dir = sub
|
||||
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
err = m.initZone(z, opts)
|
||||
return err != nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Cluster) initZone(z *Zone, _ *ScanOptions) error {
|
||||
var hasMissing bool
|
||||
var lastMachineID int
|
||||
|
||||
z.zones = m
|
||||
z.logger = m
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
p.zone = z
|
||||
p.logger = z
|
||||
|
||||
switch {
|
||||
case p.ID == 0:
|
||||
hasMissing = true
|
||||
case p.ID > lastMachineID:
|
||||
lastMachineID = z.ID
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
|
||||
if hasMissing {
|
||||
next := lastMachineID + 1
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
if p.ID == 0 {
|
||||
p.ID, next = next, next+1
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
p.Name = fmt.Sprintf("%s-%v", z.Name, p.ID)
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeConfigData(data []byte) (out *Cluster, err error) {
|
||||
// try JSON first
|
||||
out = new(Cluster)
|
||||
err = json.Unmarshal(data, out)
|
||||
if err == nil {
|
||||
// good json
|
||||
return out, nil
|
||||
} else if _, ok := err.(*json.SyntaxError); !ok {
|
||||
// bad json
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out = new(Cluster)
|
||||
err = yaml.Unmarshal(data, out)
|
||||
if err != nil {
|
||||
// bad yaml too
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// good yaml
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NewFromConfig loads the cluster data from the given file
|
||||
func NewFromConfig(filename string, opts ...ScanOption) (*Cluster, error) {
|
||||
var scanOptions ScanOptions
|
||||
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err := decodeConfigData(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err = opt(m, &scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = m.setScanDefaults(&scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := m.init(&scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,17 +1,20 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"sort"
|
||||
|
||||
"darvaza.org/core"
|
||||
)
|
||||
|
||||
func (m *Zones) scan(opts *ScanOptions) error {
|
||||
func (m *Cluster) scan(opts *ScanOptions) error {
|
||||
for _, fn := range []func(*ScanOptions) error{
|
||||
m.scanDirectory,
|
||||
m.scanMachines,
|
||||
m.scanZoneIDs,
|
||||
m.scanSort,
|
||||
m.scanGateways,
|
||||
m.scanCephMonitors,
|
||||
} {
|
||||
if err := fn(opts); err != nil {
|
||||
return err
|
||||
@@ -21,7 +24,7 @@ func (m *Zones) scan(opts *ScanOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zones) scanDirectory(_ *ScanOptions) error {
|
||||
func (m *Cluster) scanDirectory(_ *ScanOptions) error {
|
||||
// each directory is a zone
|
||||
entries, err := fs.ReadDir(m.dir, ".")
|
||||
if err != nil {
|
||||
@@ -30,23 +33,41 @@ func (m *Zones) scanDirectory(_ *ScanOptions) error {
|
||||
|
||||
for _, e := range entries {
|
||||
if e.IsDir() {
|
||||
z := &Zone{
|
||||
zones: m,
|
||||
Name: e.Name(),
|
||||
z, err := m.newZone(e.Name())
|
||||
switch {
|
||||
case err != nil:
|
||||
return core.Wrap(err, e.Name())
|
||||
case z.Machines.Len() == 0:
|
||||
z.warn(nil).
|
||||
WithField("zone", z.Name).
|
||||
Print("empty")
|
||||
default:
|
||||
m.Zones = append(m.Zones, z)
|
||||
}
|
||||
|
||||
if err := z.scan(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Zones = append(m.Zones, z)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zones) scanMachines(opts *ScanOptions) error {
|
||||
func (m *Cluster) newZone(name string) (*Zone, error) {
|
||||
z := &Zone{
|
||||
zones: m,
|
||||
logger: m,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
z.debug().
|
||||
WithField("zone", z.Name).
|
||||
Print("found")
|
||||
|
||||
if err := z.scan(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (m *Cluster) scanMachines(opts *ScanOptions) error {
|
||||
var err error
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
err = p.scan(opts)
|
||||
@@ -55,7 +76,7 @@ func (m *Zones) scanMachines(opts *ScanOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Zones) scanZoneIDs(_ *ScanOptions) error {
|
||||
func (m *Cluster) scanZoneIDs(_ *ScanOptions) error {
|
||||
var hasMissing bool
|
||||
var lastZoneID int
|
||||
|
||||
@@ -85,7 +106,7 @@ func (m *Zones) scanZoneIDs(_ *ScanOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zones) scanSort(_ *ScanOptions) error {
|
||||
func (m *Cluster) scanSort(_ *ScanOptions) error {
|
||||
sort.SliceStable(m.Zones, func(i, j int) bool {
|
||||
id1 := m.Zones[i].ID
|
||||
id2 := m.Zones[j].ID
|
||||
@@ -111,7 +132,7 @@ func (m *Zones) scanSort(_ *ScanOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zones) scanGateways(_ *ScanOptions) error {
|
||||
func (m *Cluster) scanGateways(_ *ScanOptions) error {
|
||||
var err error
|
||||
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
@@ -131,11 +152,22 @@ func (z *Zone) scan() error {
|
||||
for _, e := range entries {
|
||||
if e.IsDir() {
|
||||
m := &Machine{
|
||||
zone: z,
|
||||
Name: e.Name(),
|
||||
zone: z,
|
||||
logger: z,
|
||||
Name: e.Name(),
|
||||
}
|
||||
|
||||
m.debug().
|
||||
WithField("node", m.Name).
|
||||
WithField("zone", z.Name).
|
||||
Print("found")
|
||||
|
||||
if err := m.init(); err != nil {
|
||||
m.error(err).
|
||||
WithField("node", m.Name).
|
||||
WithField("zone", z.Name).
|
||||
Print()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
|
||||
"darvaza.org/resolver"
|
||||
"github.com/hack-pad/hackpadfs/os"
|
||||
"darvaza.org/slog"
|
||||
)
|
||||
|
||||
// A ScanOption preconfigures the Zones before scanning
|
||||
type ScanOption func(*Zones, *ScanOptions) error
|
||||
// A ScanOption pre-configures the Zones before scanning
|
||||
type ScanOption func(*Cluster, *ScanOptions) error
|
||||
|
||||
// ScanOptions contains flags used by the initial scan
|
||||
type ScanOptions struct {
|
||||
@@ -17,13 +17,17 @@ type ScanOptions struct {
|
||||
// pre-populate Machine.PublicAddresses during the
|
||||
// initial scan
|
||||
DontResolvePublicAddresses bool
|
||||
|
||||
// Logger specifies the logger to be used. otherwise
|
||||
// the scanner will be mute
|
||||
slog.Logger
|
||||
}
|
||||
|
||||
// ResolvePublicAddresses instructs the scanner to use
|
||||
// the DNS resolver to get PublicAddresses of nodes.
|
||||
// Default is true
|
||||
func ResolvePublicAddresses(resolve bool) ScanOption {
|
||||
return func(m *Zones, opt *ScanOptions) error {
|
||||
return func(m *Cluster, opt *ScanOptions) error {
|
||||
opt.DontResolvePublicAddresses = !resolve
|
||||
return nil
|
||||
}
|
||||
@@ -32,7 +36,7 @@ func ResolvePublicAddresses(resolve bool) ScanOption {
|
||||
// WithLookuper specifies what resolver.Lookuper to use to
|
||||
// find public addresses
|
||||
func WithLookuper(h resolver.Lookuper) ScanOption {
|
||||
return func(m *Zones, opt *ScanOptions) error {
|
||||
return func(m *Cluster, opt *ScanOptions) error {
|
||||
if h == nil {
|
||||
return fs.ErrInvalid
|
||||
}
|
||||
@@ -45,7 +49,7 @@ func WithLookuper(h resolver.Lookuper) ScanOption {
|
||||
// public addresses. if nil is passed, the [net.Resolver] will be used.
|
||||
// The default is using Cloudflare's 1.1.1.1.
|
||||
func WithResolver(h resolver.Resolver) ScanOption {
|
||||
return func(m *Zones, opt *ScanOptions) error {
|
||||
return func(m *Cluster, opt *ScanOptions) error {
|
||||
if h == nil {
|
||||
h = resolver.SystemResolver(true)
|
||||
}
|
||||
@@ -55,55 +59,72 @@ func WithResolver(h resolver.Resolver) ScanOption {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Zones) setDefaults(opt *ScanOptions) error {
|
||||
// WithLogger specifies what to use for logging
|
||||
func WithLogger(log slog.Logger) ScanOption {
|
||||
return func(m *Cluster, opt *ScanOptions) error {
|
||||
if log == nil {
|
||||
log = DefaultLogger()
|
||||
}
|
||||
|
||||
opt.Logger = log
|
||||
m.log = log
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Cluster) setScanDefaults(opt *ScanOptions) error {
|
||||
if m.resolver == nil {
|
||||
h := resolver.NewCloudflareLookuper()
|
||||
h := DefaultLookuper()
|
||||
|
||||
if err := WithLookuper(h)(m, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if opt.Logger == nil {
|
||||
if err := WithLogger(nil)(m, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFS builds a [Zones] tree using the given directory
|
||||
func NewFS(dir fs.FS, domain string, opts ...ScanOption) (*Zones, error) {
|
||||
// NewFromDirectory builds a [Cluster] tree using the given directory
|
||||
func NewFromDirectory(dir, domain string, opts ...ScanOption) (*Cluster, error) {
|
||||
var scanOptions ScanOptions
|
||||
|
||||
z := &Zones{
|
||||
dir: dir,
|
||||
domain: domain,
|
||||
dir = filepath.Clean(dir)
|
||||
fullPath, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sub, err := DirFS(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &Cluster{
|
||||
dir: sub,
|
||||
BaseDir: dir,
|
||||
Name: filepath.Base(fullPath),
|
||||
Domain: domain,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(z, &scanOptions); err != nil {
|
||||
if err := opt(m, &scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := z.setDefaults(&scanOptions); err != nil {
|
||||
if err := m.setScanDefaults(&scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := z.scan(&scanOptions); err != nil {
|
||||
if err := m.scan(&scanOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return z, nil
|
||||
}
|
||||
|
||||
// New builds a [Zones] tree using the given directory
|
||||
func New(dir, domain string, opts ...ScanOption) (*Zones, error) {
|
||||
dir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base, err := os.NewFS().Sub(dir[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewFS(base, domain, opts...)
|
||||
return m, nil
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"darvaza.org/resolver"
|
||||
"darvaza.org/slog"
|
||||
"darvaza.org/slog/handlers/discard"
|
||||
)
|
||||
|
||||
// DefaultLogger returns a logger that doesn't log anything
|
||||
func DefaultLogger() slog.Logger {
|
||||
return discard.New()
|
||||
}
|
||||
|
||||
// DefaultLookuper returns a [resolver.Lookuper] using Cloudflare's 1.1.1.1
|
||||
func DefaultLookuper() resolver.Lookuper {
|
||||
return resolver.NewCloudflareLookuper()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -11,15 +11,24 @@ import (
|
||||
type Env struct {
|
||||
ZoneIterator
|
||||
|
||||
export bool
|
||||
cephFSID string
|
||||
export bool
|
||||
}
|
||||
|
||||
// Env returns a shell environment factory
|
||||
func (m *Zones) Env(export bool) *Env {
|
||||
return &Env{
|
||||
func (m *Cluster) Env(export bool) (*Env, error) {
|
||||
fsid, err := m.GetCephFSID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env := &Env{
|
||||
ZoneIterator: m,
|
||||
cephFSID: fsid.String(),
|
||||
export: export,
|
||||
}
|
||||
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// Zones returns the list of Zone IDs
|
||||
@@ -38,7 +47,12 @@ func (m *Env) Zones() []int {
|
||||
func (m *Env) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if m.cephFSID != "" {
|
||||
m.writeEnvVar(&buf, m.cephFSID, "FSID")
|
||||
}
|
||||
|
||||
m.writeEnvVarInts(&buf, m.Zones(), "ZONES")
|
||||
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
m.writeEnvZone(&buf, z)
|
||||
return false
|
||||
@@ -59,6 +73,15 @@ func (m *Env) writeEnvZone(w io.Writer, z *Zone) {
|
||||
// ZONE{zoneID}_GW
|
||||
gateways, _ := z.GatewayIDs()
|
||||
m.writeEnvVarInts(w, gateways, "ZONE%v_%s", zoneID, "GW")
|
||||
|
||||
// Ceph
|
||||
monitors := z.GetCephMonitors()
|
||||
// MON{zoneID}_NAME
|
||||
m.writeEnvVar(w, genEnvZoneCephMonNames(monitors), "MON%v_%s", zoneID, "NAME")
|
||||
// MON{zoneID}_IP
|
||||
m.writeEnvVar(w, genEnvZoneCephMonIPs(monitors), "MON%v_%s", zoneID, "IP")
|
||||
// MON{zoneID}_ID
|
||||
m.writeEnvVar(w, genEnvZoneCephMonIDs(monitors), "MON%v_%s", zoneID, "ID")
|
||||
}
|
||||
|
||||
func (m *Env) writeEnvVarInts(w io.Writer, value []int, name string, args ...any) {
|
||||
@@ -111,3 +134,44 @@ func genEnvZoneNodes(z *Zone) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func genEnvZoneCephMonNames(m Machines) string {
|
||||
var buf strings.Builder
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if buf.Len() > 0 {
|
||||
_, _ = buf.WriteRune(' ')
|
||||
}
|
||||
_, _ = buf.WriteString(p.Name)
|
||||
|
||||
return false
|
||||
})
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func genEnvZoneCephMonIPs(m Machines) string {
|
||||
var buf strings.Builder
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
addr, _ := RingOneAddress(p.Zone(), p.ID)
|
||||
|
||||
if buf.Len() > 0 {
|
||||
_, _ = buf.WriteRune(' ')
|
||||
}
|
||||
_, _ = buf.WriteString(addr.String())
|
||||
|
||||
return false
|
||||
})
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func genEnvZoneCephMonIDs(m Machines) string {
|
||||
var buf strings.Builder
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if buf.Len() > 0 {
|
||||
_, _ = buf.WriteRune(' ')
|
||||
}
|
||||
_, _ = fmt.Fprintf(&buf, "%v", p.ID)
|
||||
|
||||
return false
|
||||
})
|
||||
return buf.String()
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package cluster
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidName indicates the name isn't valid
|
||||
ErrInvalidName = errors.New("invalid name")
|
||||
|
||||
// ErrUnknownNode indicates there is a reference to a node
|
||||
// we don't have on the tree
|
||||
ErrUnknownNode = errors.New("node does not exist")
|
||||
|
||||
// ErrInvalidNode indicates the nodes can't be used for
|
||||
// the intended purpose
|
||||
ErrInvalidNode = errors.New("invalid node")
|
||||
)
|
||||
@@ -0,0 +1,128 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type hostsFile struct {
|
||||
Ring0 []hostsEntry
|
||||
Ring1 []hostsEntry
|
||||
}
|
||||
|
||||
type hostsEntry struct {
|
||||
Addr string
|
||||
Names []string
|
||||
}
|
||||
|
||||
var hostsTemplate = template.Must(template.New("hosts").Funcs(template.FuncMap{
|
||||
"StringsJoin": strings.Join,
|
||||
}).Parse(`127.0.0.1 localhost
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
||||
|
||||
{{range .Ring1 -}}
|
||||
{{.Addr}} {{StringsJoin .Names " "}}
|
||||
{{end}}
|
||||
{{range .Ring0 -}}
|
||||
{{.Addr}} {{StringsJoin .Names " "}}
|
||||
{{end -}}
|
||||
`))
|
||||
|
||||
// WriteHosts rewrites all hosts files on the tree
|
||||
func (m *Cluster) WriteHosts() error {
|
||||
var err error
|
||||
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
err = z.WriteHosts()
|
||||
return err != nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteHosts rewrites all hosts files in the zone
|
||||
func (z *Zone) WriteHosts() error {
|
||||
var err error
|
||||
|
||||
s := z.Hosts()
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
err = p.WriteStringFile(s, "hosts")
|
||||
return err != nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteHosts rewrites the hosts file
|
||||
func (p *Machine) WriteHosts() error {
|
||||
s := p.zone.Hosts()
|
||||
return p.WriteStringFile(s, "hosts")
|
||||
}
|
||||
|
||||
func (z *Zone) genHosts(out *hostsFile, p *Machine) {
|
||||
var names []string
|
||||
|
||||
ip, _ := RingOneAddress(p.zone.ID, p.ID)
|
||||
names = append(names, p.Name)
|
||||
|
||||
if p.CephMonitor {
|
||||
names = append(names, fmt.Sprintf("%s-%s", p.zone.Name, "ceph"))
|
||||
names = append(names, fmt.Sprintf("%s-%s", p.zone.Name, "k3s"))
|
||||
|
||||
if z.ID == p.zone.ID {
|
||||
names = append(names, "ceph")
|
||||
names = append(names, "k3s")
|
||||
}
|
||||
}
|
||||
|
||||
entry := hostsEntry{
|
||||
Addr: ip.String(),
|
||||
Names: names,
|
||||
}
|
||||
|
||||
out.Ring1 = append(out.Ring1, entry)
|
||||
|
||||
if p.IsGateway() {
|
||||
var s string
|
||||
|
||||
ip, _ = RingZeroAddress(p.zone.ID, p.ID)
|
||||
s = fmt.Sprintf("%s-%v", p.Name, 0)
|
||||
|
||||
entry = hostsEntry{
|
||||
Addr: ip.String(),
|
||||
Names: []string{s},
|
||||
}
|
||||
|
||||
out.Ring0 = append(out.Ring0, entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Hosts renders the /etc/hosts to be used on this zone
|
||||
func (z *Zone) Hosts() string {
|
||||
var buf bytes.Buffer
|
||||
var out hostsFile
|
||||
|
||||
z.zones.ForEachZone(func(z2 *Zone) bool {
|
||||
z2.ForEachMachine(func(p *Machine) bool {
|
||||
z.genHosts(&out, p)
|
||||
|
||||
return false
|
||||
})
|
||||
return false
|
||||
})
|
||||
|
||||
if err := hostsTemplate.Execute(&buf, &out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package cluster
|
||||
|
||||
import "darvaza.org/slog"
|
||||
|
||||
type logger interface {
|
||||
withDebug() (slog.Logger, bool)
|
||||
withInfo() (slog.Logger, bool)
|
||||
|
||||
debug() slog.Logger
|
||||
info() slog.Logger
|
||||
warn(error) slog.Logger
|
||||
error(error) slog.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
_ logger = (*Cluster)(nil)
|
||||
)
|
||||
|
||||
func (z *Cluster) withDebug() (slog.Logger, bool) {
|
||||
return z.debug().WithEnabled()
|
||||
}
|
||||
|
||||
func (z *Cluster) withInfo() (slog.Logger, bool) {
|
||||
return z.debug().WithEnabled()
|
||||
}
|
||||
|
||||
func (z *Cluster) debug() slog.Logger {
|
||||
return z.log.Debug()
|
||||
}
|
||||
|
||||
func (z *Cluster) info() slog.Logger {
|
||||
return z.log.Info()
|
||||
}
|
||||
|
||||
func (z *Cluster) warn(err error) slog.Logger {
|
||||
l := z.log.Warn()
|
||||
if err != nil {
|
||||
l = l.WithField(slog.ErrorFieldName, err)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (z *Cluster) error(err error) slog.Logger {
|
||||
l := z.log.Error()
|
||||
if err != nil {
|
||||
l = l.WithField(slog.ErrorFieldName, err)
|
||||
}
|
||||
return l
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
|
||||
// A Machine is a machine on a Zone
|
||||
type Machine struct {
|
||||
zone *Zone
|
||||
ID int `toml:"id"`
|
||||
Name string `toml:"-" json:"-" yaml:"-"`
|
||||
zone *Zone
|
||||
logger `json:"-" yaml:"-"`
|
||||
|
||||
PublicAddresses []netip.Addr `toml:"public,omitempty" json:"public,omitempty" yaml:"public,omitempty"`
|
||||
Rings []*RingInfo `toml:"rings,omitempty" json:"rings,omitempty" yaml:"rings,omitempty"`
|
||||
ID int
|
||||
Name string `json:"-" yaml:"-"`
|
||||
|
||||
CephMonitor bool `json:"ceph_monitor,omitempty" yaml:"ceph_monitor,omitempty"`
|
||||
PublicAddresses []netip.Addr `json:"public,omitempty" yaml:"public,omitempty"`
|
||||
Rings []*RingInfo `json:"rings,omitempty" yaml:"rings,omitempty"`
|
||||
}
|
||||
|
||||
// revive:enable:line-length-limit
|
||||
@@ -25,16 +28,19 @@ func (m *Machine) String() string {
|
||||
|
||||
// FullName returns the Name of the machine including domain name
|
||||
func (m *Machine) FullName() string {
|
||||
if domain := m.zone.zones.domain; domain != "" {
|
||||
var s = []string{
|
||||
m.Name,
|
||||
domain,
|
||||
}
|
||||
var name []string
|
||||
|
||||
return strings.Join(s, ".")
|
||||
for _, s := range []string{
|
||||
m.Name,
|
||||
m.zone.zones.Name,
|
||||
m.zone.zones.Domain,
|
||||
} {
|
||||
if s != "" {
|
||||
name = append(name, s)
|
||||
}
|
||||
}
|
||||
|
||||
return m.Name
|
||||
return strings.Join(name, ".")
|
||||
}
|
||||
|
||||
// IsGateway tells if the Machine is a ring0 gateway
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,7 +1,8 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -121,23 +122,30 @@ func (m *Machine) applyWireguardConfig(ring int, wg *wireguard.Config) error {
|
||||
addr := wg.GetAddress()
|
||||
zoneID, nodeID, ok := Rings[ring].Decode(addr)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid wg%v address: %s", m.Name, ring, addr)
|
||||
return fmt.Errorf("%s: invalid address", addr)
|
||||
}
|
||||
|
||||
if err := m.applyZoneNodeID(zoneID, nodeID); err != nil {
|
||||
err = core.Wrapf(err, "%s: wg%v:%s", m.Name, ring, addr)
|
||||
return err
|
||||
return core.Wrapf(err, "%s: invalid address", addr)
|
||||
}
|
||||
|
||||
if err := m.applyWireguardInterfaceConfig(ring, wg.Interface); err != nil {
|
||||
err = core.Wrapf(err, "%s: wg%v:%s", m.Name, ring, addr)
|
||||
return err
|
||||
return core.Wrap(err, "interface")
|
||||
}
|
||||
|
||||
for _, peer := range wg.Peer {
|
||||
if err := m.applyWireguardPeerConfig(ring, peer); err != nil {
|
||||
err = core.Wrapf(err, "%s: wg%v:%s", m.Name, ring, addr)
|
||||
return err
|
||||
err := m.applyWireguardPeerConfig(ring, peer)
|
||||
switch {
|
||||
case errors.Is(err, ErrUnknownNode):
|
||||
// ignore unknown peers
|
||||
m.warn(nil).
|
||||
WithField("subsystem", "wireguard").
|
||||
WithField("node", m.Name).
|
||||
WithField("peer", peer.Endpoint.Host).
|
||||
WithField("ring", ring).
|
||||
Print("ignoring unknown endpoint")
|
||||
case err != nil:
|
||||
return core.Wrap(err, "peer")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,6 +166,10 @@ func (m *Machine) applyRingInfo(ring int, new *RingInfo) error {
|
||||
cur, _ := m.getRingInfo(ring)
|
||||
if cur == nil {
|
||||
// first, append
|
||||
m.debug().
|
||||
WithField("node", m.Name).
|
||||
WithField("ring", ring).
|
||||
Print("found")
|
||||
m.Rings = append(m.Rings, new)
|
||||
return nil
|
||||
}
|
||||
@@ -183,8 +195,10 @@ func (m *Machine) applyWireguardPeerConfig(ring int, pc wireguard.PeerConfig) er
|
||||
switch {
|
||||
case !found:
|
||||
// unknown
|
||||
return core.Wrap(ErrUnknownNode, pc.Endpoint.Host)
|
||||
case ring == 1 && m.zone != peer.zone:
|
||||
// invalid zone
|
||||
return core.Wrap(ErrInvalidNode, peer.Name)
|
||||
default:
|
||||
// apply RingInfo
|
||||
ri := &RingInfo{
|
||||
@@ -197,8 +211,6 @@ func (m *Machine) applyWireguardPeerConfig(ring int, pc wireguard.PeerConfig) er
|
||||
|
||||
return peer.applyRingInfo(ring, ri)
|
||||
}
|
||||
|
||||
return fmt.Errorf("%q: invalid peer endpoint", pc.Endpoint.Host)
|
||||
}
|
||||
|
||||
func (m *Machine) applyZoneNodeID(zoneID, nodeID int) error {
|
||||
@@ -1,10 +1,13 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"darvaza.org/core"
|
||||
)
|
||||
|
||||
// LookupNetIP uses the DNS Resolver to get the public addresses associated
|
||||
@@ -30,21 +33,32 @@ func (m *Machine) UpdatePublicAddresses() error {
|
||||
|
||||
func (m *Machine) init() error {
|
||||
if err := m.setID(); err != nil {
|
||||
return err
|
||||
return core.Wrap(err, m.Name)
|
||||
}
|
||||
|
||||
for i := 0; i < RingsCount; i++ {
|
||||
if err := m.tryReadWireguardKeys(i); err != nil {
|
||||
return err
|
||||
return core.Wrap(err, m.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Machine) setID() error {
|
||||
zoneName := m.zone.Name
|
||||
suffix := m.Name[len(zoneName)+1:]
|
||||
|
||||
l := len(zoneName)
|
||||
switch {
|
||||
case len(m.Name) < l+2:
|
||||
return ErrInvalidName
|
||||
case !strings.HasPrefix(m.Name, zoneName):
|
||||
return ErrInvalidName
|
||||
case m.Name[l] != '-':
|
||||
return ErrInvalidName
|
||||
}
|
||||
|
||||
suffix := m.Name[l+1:]
|
||||
id, err := strconv.ParseInt(suffix, 10, 8)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -57,6 +71,11 @@ func (m *Machine) setID() error {
|
||||
func (m *Machine) scan(opts *ScanOptions) error {
|
||||
for i := 0; i < RingsCount; i++ {
|
||||
if err := m.tryApplyWireguardConfig(i); err != nil {
|
||||
m.error(err).
|
||||
WithField("subsystem", "wireguard").
|
||||
WithField("node", m.Name).
|
||||
WithField("ring", i).
|
||||
Print()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
package cluster
|
||||
|
||||
import "sort"
|
||||
|
||||
var (
|
||||
_ MachineIterator = Machines(nil)
|
||||
_ sort.Interface = Machines(nil)
|
||||
)
|
||||
|
||||
// A MachineIterator is a set of Machines we can iterate on
|
||||
type MachineIterator interface {
|
||||
ForEachMachine(func(*Machine) bool)
|
||||
}
|
||||
|
||||
// Machines is a list of Machine objects
|
||||
type Machines []*Machine
|
||||
|
||||
// ForEachMachine calls a function for each Machine in the list
|
||||
// until instructed to terminate the loop
|
||||
func (m Machines) ForEachMachine(fn func(*Machine) bool) {
|
||||
for _, p := range m {
|
||||
if fn(p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of machines in the list
|
||||
func (m Machines) Len() int {
|
||||
return len(m)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface to sort the list
|
||||
func (m Machines) Less(i, j int) bool {
|
||||
a, b := m[i], m[j]
|
||||
za, zb := a.Zone(), b.Zone()
|
||||
|
||||
switch {
|
||||
case za == zb:
|
||||
return a.ID < b.ID
|
||||
default:
|
||||
return za < zb
|
||||
}
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface to sort the list
|
||||
func (m Machines) Swap(i, j int) {
|
||||
m[i], m[j] = m[j], m[i]
|
||||
}
|
||||
|
||||
// FilterMachines produces a subset of the machines offered by the given
|
||||
// iterator fulfilling a condition
|
||||
func FilterMachines(m MachineIterator, cond func(*Machine) bool) (Machines, int) {
|
||||
var out []*Machine
|
||||
|
||||
if cond == nil {
|
||||
// unconditional
|
||||
cond = func(*Machine) bool { return true }
|
||||
}
|
||||
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if cond(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return out, len(out)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -24,9 +24,9 @@ const (
|
||||
// RingInfo contains represents the Wireguard endpoint details
|
||||
// for a Machine on a particular ring
|
||||
type RingInfo struct {
|
||||
Ring int `toml:"ring"`
|
||||
Enabled bool `toml:"enabled,omitempty"`
|
||||
Keys wireguard.KeyPair `toml:"keys,omitempty"`
|
||||
Ring int
|
||||
Enabled bool
|
||||
Keys wireguard.KeyPair
|
||||
}
|
||||
|
||||
// Merge attempts to combine two RingInfo structs
|
||||
@@ -1,9 +1,11 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
// SyncAll updates all config files
|
||||
func (m *Zones) SyncAll() error {
|
||||
func (m *Cluster) SyncAll() error {
|
||||
for _, fn := range []func() error{
|
||||
m.SyncAllWireguard,
|
||||
m.SyncAllCeph,
|
||||
m.WriteHosts,
|
||||
} {
|
||||
if err := fn(); err != nil {
|
||||
return err
|
||||
@@ -14,7 +16,7 @@ func (m *Zones) SyncAll() error {
|
||||
}
|
||||
|
||||
// SyncAllWireguard updates all wireguard config files
|
||||
func (m *Zones) SyncAllWireguard() error {
|
||||
func (m *Cluster) SyncAllWireguard() error {
|
||||
var err error
|
||||
|
||||
for ring := 0; ring < RingsCount; ring++ {
|
||||
@@ -31,3 +33,13 @@ func (m *Zones) SyncAllWireguard() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncAllCeph updates the ceph.conf file
|
||||
func (m *Cluster) SyncAllCeph() error {
|
||||
cfg, err := m.GenCephConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.WriteCephConfig(cfg)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package zones
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
@@ -6,19 +6,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ WireguardConfigPruner = (*Zones)(nil)
|
||||
_ WireguardConfigPruner = (*Cluster)(nil)
|
||||
_ WireguardConfigPruner = (*Zone)(nil)
|
||||
_ WireguardConfigPruner = (*Machine)(nil)
|
||||
|
||||
_ WireguardConfigWriter = (*Zones)(nil)
|
||||
_ WireguardConfigWriter = (*Cluster)(nil)
|
||||
_ WireguardConfigWriter = (*Zone)(nil)
|
||||
_ WireguardConfigWriter = (*Machine)(nil)
|
||||
|
||||
_ WireguardConfigSyncer = (*Zones)(nil)
|
||||
_ WireguardConfigSyncer = (*Cluster)(nil)
|
||||
_ WireguardConfigSyncer = (*Zone)(nil)
|
||||
_ WireguardConfigSyncer = (*Machine)(nil)
|
||||
|
||||
_ WireguardKeysWriter = (*Zones)(nil)
|
||||
_ WireguardKeysWriter = (*Cluster)(nil)
|
||||
_ WireguardKeysWriter = (*Zone)(nil)
|
||||
_ WireguardKeysWriter = (*Machine)(nil)
|
||||
)
|
||||
@@ -31,7 +31,7 @@ type WireguardConfigPruner interface {
|
||||
|
||||
// PruneWireguardConfig removes wgN.conf files of machines with
|
||||
// the corresponding ring disabled on all zones
|
||||
func (m *Zones) PruneWireguardConfig(ring int) error {
|
||||
func (m *Cluster) PruneWireguardConfig(ring int) error {
|
||||
return pruneWireguardConfig(m, ring)
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ type WireguardConfigWriter interface {
|
||||
|
||||
// WriteWireguardConfig rewrites all wgN.conf on all machines
|
||||
// attached to that ring
|
||||
func (m *Zones) WriteWireguardConfig(ring int) error {
|
||||
func (m *Cluster) WriteWireguardConfig(ring int) error {
|
||||
switch ring {
|
||||
case 0:
|
||||
return writeWireguardConfig(m, m, ring)
|
||||
@@ -154,7 +154,7 @@ type WireguardConfigSyncer interface {
|
||||
|
||||
// SyncWireguardConfig updates all wgN.conf files for the specified
|
||||
// ring
|
||||
func (m *Zones) SyncWireguardConfig(ring int) error {
|
||||
func (m *Cluster) SyncWireguardConfig(ring int) error {
|
||||
switch ring {
|
||||
case 0:
|
||||
return syncWireguardConfig(m, m, ring)
|
||||
@@ -214,7 +214,7 @@ type WireguardKeysWriter interface {
|
||||
}
|
||||
|
||||
// WriteWireguardKeys rewrites all wgN.{key,pub} files
|
||||
func (m *Zones) WriteWireguardKeys(ring int) error {
|
||||
func (m *Cluster) WriteWireguardKeys(ring int) error {
|
||||
return writeWireguardKeys(m, ring)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
_ MachineIterator = (*Zone)(nil)
|
||||
)
|
||||
|
||||
// A ZoneIterator is a set of Zones we can iterate on
|
||||
type ZoneIterator interface {
|
||||
ForEachZone(func(*Zone) bool)
|
||||
}
|
||||
|
||||
// A Zone is a set of machines in close proximity and strong
|
||||
// affinity.
|
||||
type Zone struct {
|
||||
zones *Cluster
|
||||
logger `json:"-" yaml:"-"`
|
||||
|
||||
ID int
|
||||
Name string
|
||||
|
||||
Machines
|
||||
}
|
||||
|
||||
func (z *Zone) String() string {
|
||||
return z.Name
|
||||
}
|
||||
|
||||
// SetGateway configures a machine to be the zone's ring0 gateway
|
||||
func (z *Zone) SetGateway(gatewayID int, enabled bool) error {
|
||||
var err error
|
||||
var found bool
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
if p.ID == gatewayID {
|
||||
found = true
|
||||
err = p.SetGateway(enabled)
|
||||
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !found:
|
||||
return fs.ErrNotExist
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GatewayIDs returns the list of IDs of machines that act as ring0 gateways
|
||||
func (z *Zone) GatewayIDs() ([]int, int) {
|
||||
var out []int
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
if p.IsGateway() {
|
||||
out = append(out, p.ID)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return out, len(out)
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
// Package zones contains information about the cluster
|
||||
package zones
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"sort"
|
||||
|
||||
"darvaza.org/resolver"
|
||||
)
|
||||
|
||||
var (
|
||||
_ MachineIterator = Machines(nil)
|
||||
_ sort.Interface = Machines(nil)
|
||||
|
||||
_ MachineIterator = (*Zone)(nil)
|
||||
_ MachineIterator = (*Zones)(nil)
|
||||
_ ZoneIterator = (*Zones)(nil)
|
||||
)
|
||||
|
||||
// A MachineIterator is a set of Machines we can iterate on
|
||||
type MachineIterator interface {
|
||||
ForEachMachine(func(*Machine) bool)
|
||||
}
|
||||
|
||||
// A ZoneIterator is a set of Zones we can iterate on
|
||||
type ZoneIterator interface {
|
||||
ForEachZone(func(*Zone) bool)
|
||||
}
|
||||
|
||||
// Machines is a list of Machine objects
|
||||
type Machines []*Machine
|
||||
|
||||
// ForEachMachine calls a function for each Machine in the list
|
||||
// until instructed to terminate the loop
|
||||
func (m Machines) ForEachMachine(fn func(*Machine) bool) {
|
||||
for _, p := range m {
|
||||
if fn(p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of machines in the list
|
||||
func (m Machines) Len() int {
|
||||
return len(m)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface to sort the list
|
||||
func (m Machines) Less(i, j int) bool {
|
||||
a, b := m[i], m[j]
|
||||
za, zb := a.Zone(), b.Zone()
|
||||
|
||||
switch {
|
||||
case za == zb:
|
||||
return a.ID < b.ID
|
||||
default:
|
||||
return za < zb
|
||||
}
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface to sort the list
|
||||
func (m Machines) Swap(i, j int) {
|
||||
m[i], m[j] = m[j], m[i]
|
||||
}
|
||||
|
||||
// FilterMachines produces a subset of the machines offered by the given
|
||||
// iterator fulfilling a condition
|
||||
func FilterMachines(m MachineIterator, cond func(*Machine) bool) (Machines, int) {
|
||||
var out []*Machine
|
||||
|
||||
if cond == nil {
|
||||
// unconditional
|
||||
cond = func(*Machine) bool { return true }
|
||||
}
|
||||
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if cond(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return out, len(out)
|
||||
}
|
||||
|
||||
// Zone represents one zone in a cluster
|
||||
type Zone struct {
|
||||
zones *Zones
|
||||
|
||||
ID int `toml:"id"`
|
||||
Name string `toml:"name"`
|
||||
|
||||
Machines `toml:"machines"`
|
||||
}
|
||||
|
||||
func (z *Zone) String() string {
|
||||
return z.Name
|
||||
}
|
||||
|
||||
// SetGateway configures a machine to be the zone's ring0 gateway
|
||||
func (z *Zone) SetGateway(gatewayID int, enabled bool) error {
|
||||
var err error
|
||||
var found bool
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
if p.ID == gatewayID {
|
||||
found = true
|
||||
err = p.SetGateway(enabled)
|
||||
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !found:
|
||||
return fs.ErrNotExist
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GatewayIDs returns the list of IDs of machines that act as ring0 gateways
|
||||
func (z *Zone) GatewayIDs() ([]int, int) {
|
||||
var out []int
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
if p.IsGateway() {
|
||||
out = append(out, p.ID)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return out, len(out)
|
||||
}
|
||||
|
||||
// Zones represents all zones in a cluster
|
||||
type Zones struct {
|
||||
dir fs.FS
|
||||
resolver resolver.Resolver
|
||||
domain string
|
||||
|
||||
Zones []*Zone `toml:"zones"`
|
||||
}
|
||||
|
||||
// ForEachMachine calls a function for each Machine in the cluster
|
||||
// until instructed to terminate the loop
|
||||
func (m *Zones) ForEachMachine(fn func(*Machine) bool) {
|
||||
m.ForEachZone(func(z *Zone) bool {
|
||||
var term bool
|
||||
|
||||
z.ForEachMachine(func(p *Machine) bool {
|
||||
term = fn(p)
|
||||
return term
|
||||
})
|
||||
|
||||
return term
|
||||
})
|
||||
}
|
||||
|
||||
// ForEachZone calls a function for each Zone in the cluster
|
||||
// until instructed to terminate the loop
|
||||
func (m *Zones) ForEachZone(fn func(*Zone) bool) {
|
||||
for _, p := range m.Zones {
|
||||
if fn(p) {
|
||||
// terminate
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMachineByName looks for a machine with the specified
|
||||
// name on any zone
|
||||
func (m *Zones) GetMachineByName(name string) (*Machine, bool) {
|
||||
var out *Machine
|
||||
|
||||
if name != "" {
|
||||
m.ForEachMachine(func(p *Machine) bool {
|
||||
if p.Name == name {
|
||||
out = p
|
||||
}
|
||||
|
||||
return out != nil
|
||||
})
|
||||
}
|
||||
|
||||
return out, out != nil
|
||||
}
|
||||
Reference in New Issue
Block a user