Compare commits

...

16 Commits

Author SHA1 Message Date
amery 1fe1cf940d Merge pull request 'ceph: add initial ceph support. reading and writing m/ceph.conf' (#9)
Reviewed-on: #9
2023-09-05 21:35:52 +02:00
amery f10ea1dc22 jpictl: write m/ceph.conf on sync
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery ac87757b06 ceph: zones.Zones.WriteCephConfig() and ceph.Config.WriteTo()
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery fe081a4297 env: set ceph monitors variables
they indicate the ceph monitors on the specified zone

* MON{zoneID}_NAME
* MON{zoneID}_ID
* MON{zoneID}_IP

Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery cea8362fe6 zones: extend scan to ensure every zone has a ceph monitor
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery b772ec0a3d zones: store ceph FSID on scan
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery 77ad016e99 zones: set Machine.CephMonitor if its referenced as monitor on ceph.conf
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery bf4bfeb3fc zones: introduce GenCephConfig()
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery e3ab931eb1 zones: introduce Zone.GetCephMonitors()
returning the local ceph monitors and setting one
if there is none. non-gateway nodes are preferred
when setting a monitor automatically

Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery 05e04c758b zones: introduce Zones.GetCephConfig() accessor for m/ceph.conf
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery 94011a3a03 ceph: add NewConfigFromReader() and initial ceph.conf parser
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 16:48:17 +00:00
amery 025b9072b4 zones: introduce Machine.CephMonitor field
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 12:24:55 +00:00
amery 0fb8c1d44b zones: introduce Zones.CephFSID and Zones.GetCephFSID()
the accessor doesn't generate one if needed yet

Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 12:24:55 +00:00
amery a8849b747c vscode: add ceph to the dictionary
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 12:24:55 +00:00
amery 879d2b4d1c chore: update dependencies
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 12:24:38 +00:00
amery ff4bb97599 vscode: add jpictl, zerolog and darvaza to the dictionary
Signed-off-by: Alejandro Mery <amery@jpi.io>
2023-09-05 12:19:51 +00:00
13 changed files with 589 additions and 21 deletions
+8
View File
@@ -0,0 +1,8 @@
{
"cSpell.words": [
"ceph",
"darvaza",
"jpictl",
"zerolog"
]
}
+10 -7
View File
@@ -3,11 +3,13 @@ module git.jpi.io/amery/jpictl
go 1.19
require (
darvaza.org/core v0.9.5
darvaza.org/resolver v0.5.2
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf
darvaza.org/slog v0.5.2
asciigoat.org/ini v0.2.5
darvaza.org/core v0.9.8
darvaza.org/resolver v0.5.4
darvaza.org/sidecar v0.0.2
darvaza.org/slog v0.5.3
github.com/burntSushi/toml v0.3.1
github.com/gofrs/uuid/v5 v5.0.0
github.com/hack-pad/hackpadfs v0.2.1
github.com/mgechev/revive v1.3.3
github.com/spf13/cobra v1.7.0
@@ -17,8 +19,9 @@ require (
)
require (
darvaza.org/slog/handlers/filter v0.4.4 // indirect
darvaza.org/slog/handlers/zerolog v0.4.4 // indirect
asciigoat.org/core v0.3.9 // indirect
darvaza.org/slog/handlers/filter v0.4.5 // indirect
darvaza.org/slog/handlers/zerolog v0.4.5 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/chavacava/garif v0.1.0 // indirect
github.com/fatih/color v1.15.0 // indirect
@@ -37,7 +40,7 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/sys v0.12.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/tools v0.12.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
+20 -14
View File
@@ -1,15 +1,19 @@
darvaza.org/core v0.9.5 h1:sS5pZFwicaxJIQixEiqkMr9GknVHYL+EbKDMkR/4jDM=
darvaza.org/core v0.9.5/go.mod h1:O3tHBMlw+xB47uGh5CUx7dXAujBAMmD8BCRFPZmIw54=
darvaza.org/resolver v0.5.2 h1:VjHhEr/MJBszeDb7tYlXQ9Bsyh4xrDR7Sd10WAmPD6k=
darvaza.org/resolver v0.5.2/go.mod h1:fFvsVPEFeMzUIWlLG47Go/6uJYtRLb9R8HIgYg3uaxE=
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf h1:ya5ZQicBb/GWll3rlqra8No7oJXks7y1m/cJGYBypv4=
darvaza.org/sidecar v0.0.0-20230721122716-b9c54b8adbaf/go.mod h1:by+bPsMa7Rxc/ZYG1qBunrtKocv/DkrPBmyFlmq/j2Q=
darvaza.org/slog v0.5.2 h1:8TG1WyHjOyh2vW6t3pjzZVaWzpko5MIIpeI7LWqHFvs=
darvaza.org/slog v0.5.2/go.mod h1:HAkEpxTA/mkiLNUXJo5qsCh8EVCtA3evje8GAaCDWHI=
darvaza.org/slog/handlers/filter v0.4.4 h1:b2e2T9fQzMdJ0ia+f6b7kw9/T9GFwhFCKob/2tqhGGU=
darvaza.org/slog/handlers/filter v0.4.4/go.mod h1:cQlJWuolB6guLug09sX/8Zrzct++M6SPCGvXR37E7Cc=
darvaza.org/slog/handlers/zerolog v0.4.4 h1:OR1ASvH1fBCq3t85t4OU6oJPPuqMB1tsDoSpsh6HVJU=
darvaza.org/slog/handlers/zerolog v0.4.4/go.mod h1:t60TeEbFcMLo74CkXC2S0rKlnwF4ixZyBR4fqIJV1GE=
asciigoat.org/core v0.3.9 h1:hgDDz4ecm3ZvehX++m8A/IzAt+B5oDPiRtxatzfUHPQ=
asciigoat.org/core v0.3.9/go.mod h1:CAaHwyw8MpAq4a1MYtN2dxJrsK+hmIdW50OndaQZYPI=
asciigoat.org/ini v0.2.5 h1:4gRIp9rU+XQt8+HMqZO5R7GavMv9Yl2+N+je6djDIAE=
asciigoat.org/ini v0.2.5/go.mod h1:gmXzJ9XFqf1NLk5nQkj04USQ4tMtdRJHNQX6vp3DzjU=
darvaza.org/core v0.9.8 h1:luLxgfUc2pzuusYPo/Z/dC/qr9XZPKpSQw8/kS7zNUM=
darvaza.org/core v0.9.8/go.mod h1:Dbme64naxeshQfxcVJX9ZT7AiGyIY8kldfuELVtf8mw=
darvaza.org/resolver v0.5.4 h1:dlSBNV14yYsp7Kg7ipwYOMNsLbrpeXa8Z0HBTa0Ryxs=
darvaza.org/resolver v0.5.4/go.mod h1:vHMkQUmHjaetFqG2ZLZJiQHsXEMGoTOFGm+NXwfndhE=
darvaza.org/sidecar v0.0.2 h1:4H8FUxc43kkLjxdShN1CoxLTcoHQsZjDVwm7kt6eIK0=
darvaza.org/sidecar v0.0.2/go.mod h1:yFC3Qt3j+uS7n9CMpLxwrA68z+FNJhENoenBc9zBJJo=
darvaza.org/slog v0.5.3 h1:sQzmZXgqRh9oFMKBwEYrEpucLvKJVZxaxa2bHIA6GJ0=
darvaza.org/slog v0.5.3/go.mod h1:59d+yi+C7gn4pDDuwbbOKawERpdXthFFk1Yc+Sv6XB0=
darvaza.org/slog/handlers/filter v0.4.5 h1:CX1bMzldd67e3y3s3Sh4jK8Lyo0WMvTGBB2lD315jhc=
darvaza.org/slog/handlers/filter v0.4.5/go.mod h1:OuH9rHYg9CIErTJCZliMnFexBfP/HJ9PZ1V1VwSCZ1g=
darvaza.org/slog/handlers/zerolog v0.4.5 h1:W4cgGORx4wImr+RL96CWSQGTdkZzKX6YHXPSYJvdoB4=
darvaza.org/slog/handlers/zerolog v0.4.5/go.mod h1:mCoh/mIl8Nsa6Yu1Um7d7cos6RuEJzgaTXaX5LDRUao=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/burntSushi/toml v0.3.1 h1:Hu1cOEC2qtKULZJCzym5tyA35bZr3HREuolgiAzMlhY=
@@ -26,6 +30,8 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
github.com/hack-pad/hackpadfs v0.2.1 h1:FelFhIhv26gyjujoA/yeFO+6YGlqzmc9la/6iKMIxMw=
github.com/hack-pad/hackpadfs v0.2.1/go.mod h1:khQBuCEwGXWakkmq8ZiFUvUZz84ZkJ2KNwKvChs4OrU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -84,8 +90,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
+2
View File
@@ -0,0 +1,2 @@
// Package ceph deals with ceph config
package ceph
+67
View File
@@ -0,0 +1,67 @@
package ceph
import (
"bytes"
"fmt"
"io"
"net/netip"
"strings"
"github.com/gofrs/uuid/v5"
"asciigoat.org/ini/basic"
)
// Config represents a ceph.conf file
type Config struct {
Global GlobalConfig `ini:"global"`
}
// GlobalConfig represents the [global] section of a ceph.conf file
type GlobalConfig struct {
FSID uuid.UUID `ini:"fsid"`
Monitors []string `ini:"mon_initial_members,comma"`
MonitorsAddr []netip.Addr `ini:"mon_host,comma"`
ClusterNetwork netip.Prefix `ini:"cluster_network"`
}
// WriteTo writes a Wireguard [Config] onto the provided [io.Writer]
func (cfg *Config) WriteTo(w io.Writer) (int64, error) {
var buf bytes.Buffer
writeGlobalToBuffer(&buf, &cfg.Global)
return buf.WriteTo(w)
}
func writeGlobalToBuffer(w *bytes.Buffer, c *GlobalConfig) {
_, _ = w.WriteString("[global]\n")
_, _ = fmt.Fprintf(w, "%s = %s\n", "fsid", c.FSID.String())
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_initial_members", strings.Join(c.Monitors, ", "))
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_host", joinAddrs(c.MonitorsAddr, ", "))
_, _ = fmt.Fprintf(w, "%s = %s\n", "cluster_network", c.ClusterNetwork.String())
}
func joinAddrs(addrs []netip.Addr, sep string) string {
s := make([]string, len(addrs))
for i, addr := range addrs {
s[i] = addr.String()
}
return strings.Join(s, sep)
}
// NewConfigFromReader parses the ceph.conf file
func NewConfigFromReader(r io.Reader) (*Config, error) {
doc, err := basic.Decode(r)
if err != nil {
return nil, err
}
cfg, err := newConfigFromDocument(doc)
if err != nil {
return nil, err
}
return cfg, nil
}
+110
View File
@@ -0,0 +1,110 @@
package ceph
import (
"io/fs"
"net/netip"
"asciigoat.org/ini/basic"
"asciigoat.org/ini/parser"
"darvaza.org/core"
)
var sectionMap = map[string]func(*Config, *basic.Section) error{
"global": loadGlobalConfSection,
}
func loadConfSection(out *Config, src *basic.Section) error {
h, ok := sectionMap[src.Key]
if !ok {
return core.Wrapf(fs.ErrInvalid, "unknown section %q", src.Key)
}
return h(out, src)
}
func loadGlobalConfSection(out *Config, src *basic.Section) error {
var cfg GlobalConfig
for _, field := range src.Fields {
if err := loadGlobalConfField(&cfg, field); err != nil {
return core.Wrap(err, "global")
}
}
out.Global = cfg
return nil
}
// revive:disable:cyclomatic
// revive:disable:cognitive-complexity
func loadGlobalConfField(cfg *GlobalConfig, field basic.Field) error {
// revive:enable:cyclomatic
// revive:enable:cognitive-complexity
// TODO: refactor when asciigoat's ini parser learns to do reflection
switch field.Key {
case "fsid":
if !core.IsZero(cfg.FSID) {
return core.Wrapf(fs.ErrInvalid, "duplicate field %q", field.Key)
}
err := cfg.FSID.UnmarshalText([]byte(field.Value))
switch {
case err != nil:
return core.Wrap(err, field.Key)
default:
return nil
}
case "mon_host":
entries, _ := parser.SplitCommaArray(field.Value)
for _, s := range entries {
var addr netip.Addr
if err := addr.UnmarshalText([]byte(s)); err != nil {
return core.Wrap(err, field.Key)
}
cfg.MonitorsAddr = append(cfg.MonitorsAddr, addr)
}
return nil
case "mon_initial_members":
entries, _ := parser.SplitCommaArray(field.Value)
cfg.Monitors = append(cfg.Monitors, entries...)
return nil
case "cluster_network":
if !core.IsZero(cfg.ClusterNetwork) {
err := core.Wrap(fs.ErrInvalid, "fields before the first section")
return err
}
err := cfg.ClusterNetwork.UnmarshalText([]byte(field.Value))
switch {
case err != nil:
return core.Wrap(err, field.Key)
default:
return nil
}
}
return nil
}
func newConfigFromDocument(doc *basic.Document) (*Config, error) {
var out Config
if len(doc.Global) > 0 {
err := core.Wrap(fs.ErrInvalid, "fields before the first section")
return nil, err
}
for i := range doc.Sections {
src := &doc.Sections[i]
if err := loadConfSection(&out, src); err != nil {
return nil, err
}
}
return &out, nil
}
+118
View File
@@ -0,0 +1,118 @@
package zones
import (
"bytes"
"net/netip"
"sort"
"darvaza.org/core"
"github.com/gofrs/uuid/v5"
"git.jpi.io/amery/jpictl/pkg/ceph"
)
// GetCephFSID returns our Ceph's FSID
func (m *Zones) GetCephFSID() (uuid.UUID, error) {
if core.IsZero(m.CephFSID) {
// TODO: generate one
return uuid.Nil, nil
}
return m.CephFSID, nil
}
// GetCephConfig reads the ceph.conf file
func (m *Zones) GetCephConfig() (*ceph.Config, error) {
data, err := m.ReadFile("ceph.conf")
if err != nil {
return nil, err
}
r := bytes.NewReader(data)
return ceph.NewConfigFromReader(r)
}
// WriteCephConfig writes the ceph.conf file
func (m *Zones) WriteCephConfig(cfg *ceph.Config) error {
f, err := m.CreateTruncFile("ceph.conf")
if err != nil {
return err
}
defer f.Close()
_, err = cfg.WriteTo(f)
return err
}
// GenCephConfig prepares a ceph.Config using the cluster information
func (m *Zones) GenCephConfig() (*ceph.Config, error) {
fsid, err := m.GetCephFSID()
if err != nil {
return nil, err
}
cfg := &ceph.Config{
Global: ceph.GlobalConfig{
FSID: fsid,
ClusterNetwork: netip.PrefixFrom(
netip.AddrFrom4([4]byte{10, 0, 0, 0}),
8,
),
},
}
m.ForEachZone(func(z *Zone) bool {
for _, p := range z.GetCephMonitors() {
addr, _ := RingOneAddress(z.ID, p.ID)
cfg.Global.Monitors = append(cfg.Global.Monitors, p.Name)
cfg.Global.MonitorsAddr = append(cfg.Global.MonitorsAddr, addr)
}
return false
})
return cfg, nil
}
// GetCephMonitors returns the set of Ceph monitors on
// the zone
func (z *Zone) GetCephMonitors() Machines {
var mons Machines
var first, second *Machine
z.ForEachMachine(func(p *Machine) bool {
switch {
case p.CephMonitor:
// it is a monitor
mons = append(mons, p)
case len(mons) > 0:
// zone has a monitor
case first == nil && !p.IsGateway():
// first option for monitor
first = p
case second == nil:
// second option for monitor
second = p
}
return false
})
switch {
case len(mons) > 0:
// ready
case first != nil:
// make first option our monitor
first.CephMonitor = true
mons = append(mons, first)
case second != nil:
// make second option our monitor
second.CephMonitor = true
mons = append(mons, second)
default:
// zone without machines??
panic("unreachable")
}
sort.Sort(mons)
return mons
}
+183
View File
@@ -0,0 +1,183 @@
package zones
import (
"net/netip"
"os"
"strings"
"darvaza.org/core"
"git.jpi.io/amery/jpictl/pkg/ceph"
)
// CephMissingMonitorError is an error that contains ceph
// monitors present in ceph.conf but not found on the cluster
type CephMissingMonitorError struct {
Names []string
Addrs []netip.Addr
}
func (err *CephMissingMonitorError) appendName(name string) {
err.Names = append(err.Names, name)
}
func (err *CephMissingMonitorError) appendAddr(addr netip.Addr) {
err.Addrs = append(err.Addrs, addr)
}
// OK tells if this instance actual shouldn't be treated as an error
func (err CephMissingMonitorError) OK() bool {
switch {
case len(err.Names) > 0:
return false
case len(err.Addrs) > 0:
return false
default:
return true
}
}
func (err CephMissingMonitorError) Error() string {
if !err.OK() {
var buf strings.Builder
_, _ = buf.WriteString("missing:")
err.writeNames(&buf)
err.writeAddrs(&buf)
return buf.String()
}
// no error
return ""
}
func (err *CephMissingMonitorError) writeNames(w *strings.Builder) {
if len(err.Names) > 0 {
_, _ = w.WriteString(" mon_initial_members:")
for i, name := range err.Names {
if i != 0 {
_, _ = w.WriteRune(',')
}
_, _ = w.WriteString(name)
}
}
}
func (err *CephMissingMonitorError) writeAddrs(w *strings.Builder) {
if len(err.Addrs) > 0 {
_, _ = w.WriteString(" mon_host:")
for i, addr := range err.Addrs {
if i != 0 {
_, _ = w.WriteRune(',')
}
_, _ = w.WriteString(addr.String())
}
}
}
// AsError returns nil if the instance is actually OK
func (err *CephMissingMonitorError) AsError() error {
if err == nil || err.OK() {
return nil
}
return err
}
type cephScanTODO struct {
names map[string]bool
addrs map[string]bool
}
func (todo *cephScanTODO) checkMachine(p *Machine) bool {
// on ceph all addresses are ring1
ring1, _ := RingOneAddress(p.Zone(), p.ID)
addr := ring1.String()
if _, found := todo.names[p.Name]; found {
// found on the TODO by name
todo.names[p.Name] = true
todo.addrs[addr] = true
return true
}
if _, found := todo.addrs[addr]; found {
// found on the TODO by address
todo.names[p.Name] = true
todo.addrs[addr] = true
return true
}
return false
}
func (todo *cephScanTODO) Missing() error {
var check CephMissingMonitorError
for name, found := range todo.names {
if !found {
check.appendName(name)
}
}
for addr, found := range todo.addrs {
if !found {
var a netip.Addr
// it wouldn't be on the map if it wasn't valid
_ = a.UnmarshalText([]byte(addr))
check.appendAddr(a)
}
}
return check.AsError()
}
func newCephScanTODO(cfg *ceph.Config) *cephScanTODO {
todo := &cephScanTODO{
names: make(map[string]bool),
addrs: make(map[string]bool),
}
for _, name := range cfg.Global.Monitors {
todo.names[name] = false
}
for _, addr := range cfg.Global.MonitorsAddr {
todo.addrs[addr.String()] = false
}
return todo
}
func (m *Zones) scanCephMonitors(_ *ScanOptions) error {
cfg, err := m.GetCephConfig()
switch {
case os.IsNotExist(err):
err = nil
case err != nil:
return err
}
if cfg != nil {
// store FSID
m.CephFSID = cfg.Global.FSID
// flag monitors based on config
todo := newCephScanTODO(cfg)
m.ForEachMachine(func(p *Machine) bool {
p.CephMonitor = todo.checkMachine(p)
return false
})
if err := todo.Missing(); err != nil {
return core.Wrap(err, "ceph")
}
}
// make sure every zone has one
m.ForEachZone(func(z *Zone) bool {
_ = z.GetCephMonitors()
return false
})
return nil
}
+50
View File
@@ -59,6 +59,15 @@ func (m *Env) writeEnvZone(w io.Writer, z *Zone) {
// ZONE{zoneID}_GW
gateways, _ := z.GatewayIDs()
m.writeEnvVarInts(w, gateways, "ZONE%v_%s", zoneID, "GW")
// Ceph
monitors := z.GetCephMonitors()
// MON{zoneID}_NAME
m.writeEnvVar(w, genEnvZoneCephMonNames(monitors), "MON%v_%s", zoneID, "NAME")
// MON{zoneID}_IP
m.writeEnvVar(w, genEnvZoneCephMonIPs(monitors), "MON%v_%s", zoneID, "IP")
// MON{zoneID}_ID
m.writeEnvVar(w, genEnvZoneCephMonIDs(monitors), "MON%v_%s", zoneID, "ID")
}
func (m *Env) writeEnvVarInts(w io.Writer, value []int, name string, args ...any) {
@@ -111,3 +120,44 @@ func genEnvZoneNodes(z *Zone) string {
}
return ""
}
func genEnvZoneCephMonNames(m Machines) string {
var buf strings.Builder
m.ForEachMachine(func(p *Machine) bool {
if buf.Len() > 0 {
_, _ = buf.WriteRune(' ')
}
_, _ = buf.WriteString(p.Name)
return false
})
return buf.String()
}
func genEnvZoneCephMonIPs(m Machines) string {
var buf strings.Builder
m.ForEachMachine(func(p *Machine) bool {
addr, _ := RingOneAddress(p.Zone(), p.ID)
if buf.Len() > 0 {
_, _ = buf.WriteRune(' ')
}
_, _ = buf.WriteString(addr.String())
return false
})
return buf.String()
}
func genEnvZoneCephMonIDs(m Machines) string {
var buf strings.Builder
m.ForEachMachine(func(p *Machine) bool {
if buf.Len() > 0 {
_, _ = buf.WriteRune(' ')
}
_, _ = fmt.Fprintf(&buf, "%v", p.ID)
return false
})
return buf.String()
}
+2
View File
@@ -15,6 +15,8 @@ type Machine struct {
PublicAddresses []netip.Addr `toml:"public,omitempty" json:"public,omitempty" yaml:"public,omitempty"`
Rings []*RingInfo `toml:"rings,omitempty" json:"rings,omitempty" yaml:"rings,omitempty"`
CephMonitor bool `toml:"ceph_monitor,omitempty" json:"ceph_monitor,omitempty" yaml:"ceph_monitor,omitempty"`
}
// revive:enable:line-length-limit
+1
View File
@@ -12,6 +12,7 @@ func (m *Zones) scan(opts *ScanOptions) error {
m.scanZoneIDs,
m.scanSort,
m.scanGateways,
m.scanCephMonitors,
} {
if err := fn(opts); err != nil {
return err
+11
View File
@@ -4,6 +4,7 @@ package zones
func (m *Zones) SyncAll() error {
for _, fn := range []func() error{
m.SyncAllWireguard,
m.SyncAllCeph,
} {
if err := fn(); err != nil {
return err
@@ -31,3 +32,13 @@ func (m *Zones) SyncAllWireguard() error {
return nil
}
// SyncAllCeph updates the ceph.conf file
func (m *Zones) SyncAllCeph() error {
cfg, err := m.GenCephConfig()
if err != nil {
return err
}
return m.WriteCephConfig(cfg)
}
+7
View File
@@ -6,6 +6,7 @@ import (
"sort"
"darvaza.org/resolver"
"github.com/gofrs/uuid/v5"
)
var (
@@ -135,15 +136,21 @@ func (z *Zone) GatewayIDs() ([]int, int) {
return out, len(out)
}
// revive:disable:line-length-limit
// Zones represents all zones in a cluster
type Zones struct {
dir fs.FS
resolver resolver.Resolver
domain string
CephFSID uuid.UUID `toml:"ceph_fsid,omitempty" json:"ceph_fsid,omitempty" yaml:"ceph_fsid,omitempty"`
Zones []*Zone `toml:"zones"`
}
// revive:enable:line-length-limit
// ForEachMachine calls a function for each Machine in the cluster
// until instructed to terminate the loop
func (m *Zones) ForEachMachine(fn func(*Machine) bool) {