ceph: add initial ceph support. reading and writing m/ceph.conf #9
Merged
amery
merged 13 commits from pr-amery-ceph
into main
1 year ago
13 changed files with 561 additions and 0 deletions
@ -0,0 +1,2 @@
|
||||
// Package ceph deals with ceph config
|
||||
package ceph |
@ -0,0 +1,67 @@
|
||||
package ceph |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io" |
||||
"net/netip" |
||||
"strings" |
||||
|
||||
"github.com/gofrs/uuid/v5" |
||||
|
||||
"asciigoat.org/ini/basic" |
||||
) |
||||
|
||||
// Config represents a ceph.conf file
|
||||
type Config struct { |
||||
Global GlobalConfig `ini:"global"` |
||||
} |
||||
|
||||
// GlobalConfig represents the [global] section of a ceph.conf file
|
||||
type GlobalConfig struct { |
||||
FSID uuid.UUID `ini:"fsid"` |
||||
Monitors []string `ini:"mon_initial_members,comma"` |
||||
MonitorsAddr []netip.Addr `ini:"mon_host,comma"` |
||||
ClusterNetwork netip.Prefix `ini:"cluster_network"` |
||||
} |
||||
|
||||
// WriteTo writes a Wireguard [Config] onto the provided [io.Writer]
|
||||
func (cfg *Config) WriteTo(w io.Writer) (int64, error) { |
||||
var buf bytes.Buffer |
||||
|
||||
writeGlobalToBuffer(&buf, &cfg.Global) |
||||
return buf.WriteTo(w) |
||||
} |
||||
|
||||
func writeGlobalToBuffer(w *bytes.Buffer, c *GlobalConfig) { |
||||
_, _ = w.WriteString("[global]\n") |
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "fsid", c.FSID.String()) |
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_initial_members", strings.Join(c.Monitors, ", ")) |
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "mon_host", joinAddrs(c.MonitorsAddr, ", ")) |
||||
_, _ = fmt.Fprintf(w, "%s = %s\n", "cluster_network", c.ClusterNetwork.String()) |
||||
} |
||||
|
||||
func joinAddrs(addrs []netip.Addr, sep string) string { |
||||
s := make([]string, len(addrs)) |
||||
|
||||
for i, addr := range addrs { |
||||
s[i] = addr.String() |
||||
} |
||||
|
||||
return strings.Join(s, sep) |
||||
} |
||||
|
||||
// NewConfigFromReader parses the ceph.conf file
|
||||
func NewConfigFromReader(r io.Reader) (*Config, error) { |
||||
doc, err := basic.Decode(r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
cfg, err := newConfigFromDocument(doc) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return cfg, nil |
||||
} |
@ -0,0 +1,110 @@
|
||||
package ceph |
||||
|
||||
import ( |
||||
"io/fs" |
||||
"net/netip" |
||||
|
||||
"asciigoat.org/ini/basic" |
||||
"asciigoat.org/ini/parser" |
||||
|
||||
"darvaza.org/core" |
||||
) |
||||
|
||||
var sectionMap = map[string]func(*Config, *basic.Section) error{ |
||||
"global": loadGlobalConfSection, |
||||
} |
||||
|
||||
func loadConfSection(out *Config, src *basic.Section) error { |
||||
h, ok := sectionMap[src.Key] |
||||
if !ok { |
||||
return core.Wrapf(fs.ErrInvalid, "unknown section %q", src.Key) |
||||
} |
||||
|
||||
return h(out, src) |
||||
} |
||||
|
||||
func loadGlobalConfSection(out *Config, src *basic.Section) error { |
||||
var cfg GlobalConfig |
||||
|
||||
for _, field := range src.Fields { |
||||
if err := loadGlobalConfField(&cfg, field); err != nil { |
||||
return core.Wrap(err, "global") |
||||
} |
||||
} |
||||
|
||||
out.Global = cfg |
||||
return nil |
||||
} |
||||
|
||||
// revive:disable:cyclomatic
|
||||
// revive:disable:cognitive-complexity
|
||||
|
||||
func loadGlobalConfField(cfg *GlobalConfig, field basic.Field) error { |
||||
// revive:enable:cyclomatic
|
||||
// revive:enable:cognitive-complexity
|
||||
|
||||
// TODO: refactor when asciigoat's ini parser learns to do reflection
|
||||
|
||||
switch field.Key { |
||||
case "fsid": |
||||
if !core.IsZero(cfg.FSID) { |
||||
return core.Wrapf(fs.ErrInvalid, "duplicate field %q", field.Key) |
||||
} |
||||
|
||||
err := cfg.FSID.UnmarshalText([]byte(field.Value)) |
||||
switch { |
||||
case err != nil: |
||||
return core.Wrap(err, field.Key) |
||||
default: |
||||
return nil |
||||
} |
||||
case "mon_host": |
||||
entries, _ := parser.SplitCommaArray(field.Value) |
||||
for _, s := range entries { |
||||
var addr netip.Addr |
||||
|
||||
if err := addr.UnmarshalText([]byte(s)); err != nil { |
||||
return core.Wrap(err, field.Key) |
||||
} |
||||
|
||||
cfg.MonitorsAddr = append(cfg.MonitorsAddr, addr) |
||||
} |
||||
return nil |
||||
case "mon_initial_members": |
||||
entries, _ := parser.SplitCommaArray(field.Value) |
||||
cfg.Monitors = append(cfg.Monitors, entries...) |
||||
return nil |
||||
case "cluster_network": |
||||
if !core.IsZero(cfg.ClusterNetwork) { |
||||
err := core.Wrap(fs.ErrInvalid, "fields before the first section") |
||||
return err |
||||
} |
||||
|
||||
err := cfg.ClusterNetwork.UnmarshalText([]byte(field.Value)) |
||||
switch { |
||||
case err != nil: |
||||
return core.Wrap(err, field.Key) |
||||
default: |
||||
return nil |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func newConfigFromDocument(doc *basic.Document) (*Config, error) { |
||||
var out Config |
||||
|
||||
if len(doc.Global) > 0 { |
||||
err := core.Wrap(fs.ErrInvalid, "fields before the first section") |
||||
return nil, err |
||||
} |
||||
|
||||
for i := range doc.Sections { |
||||
src := &doc.Sections[i] |
||||
if err := loadConfSection(&out, src); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return &out, nil |
||||
} |
@ -0,0 +1,118 @@
|
||||
package zones |
||||
|
||||
import ( |
||||
"bytes" |
||||
"net/netip" |
||||
"sort" |
||||
|
||||
"darvaza.org/core" |
||||
"github.com/gofrs/uuid/v5" |
||||
|
||||
"git.jpi.io/amery/jpictl/pkg/ceph" |
||||
) |
||||
|
||||
// GetCephFSID returns our Ceph's FSID
|
||||
func (m *Zones) GetCephFSID() (uuid.UUID, error) { |
||||
if core.IsZero(m.CephFSID) { |
||||
// TODO: generate one
|
||||
return uuid.Nil, nil |
||||
} |
||||
return m.CephFSID, nil |
||||
} |
||||
|
||||
// GetCephConfig reads the ceph.conf file
|
||||
func (m *Zones) GetCephConfig() (*ceph.Config, error) { |
||||
data, err := m.ReadFile("ceph.conf") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
r := bytes.NewReader(data) |
||||
return ceph.NewConfigFromReader(r) |
||||
} |
||||
|
||||
// WriteCephConfig writes the ceph.conf file
|
||||
func (m *Zones) WriteCephConfig(cfg *ceph.Config) error { |
||||
f, err := m.CreateTruncFile("ceph.conf") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer f.Close() |
||||
|
||||
_, err = cfg.WriteTo(f) |
||||
return err |
||||
} |
||||
|
||||
// GenCephConfig prepares a ceph.Config using the cluster information
|
||||
func (m *Zones) GenCephConfig() (*ceph.Config, error) { |
||||
fsid, err := m.GetCephFSID() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
cfg := &ceph.Config{ |
||||
Global: ceph.GlobalConfig{ |
||||
FSID: fsid, |
||||
ClusterNetwork: netip.PrefixFrom( |
||||
netip.AddrFrom4([4]byte{10, 0, 0, 0}), |
||||
8, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
m.ForEachZone(func(z *Zone) bool { |
||||
for _, p := range z.GetCephMonitors() { |
||||
addr, _ := RingOneAddress(z.ID, p.ID) |
||||
|
||||
cfg.Global.Monitors = append(cfg.Global.Monitors, p.Name) |
||||
cfg.Global.MonitorsAddr = append(cfg.Global.MonitorsAddr, addr) |
||||
} |
||||
return false |
||||
}) |
||||
|
||||
return cfg, nil |
||||
} |
||||
|
||||
// GetCephMonitors returns the set of Ceph monitors on
|
||||
// the zone
|
||||
func (z *Zone) GetCephMonitors() Machines { |
||||
var mons Machines |
||||
var first, second *Machine |
||||
|
||||
z.ForEachMachine(func(p *Machine) bool { |
||||
switch { |
||||
case p.CephMonitor: |
||||
// it is a monitor
|
||||
mons = append(mons, p) |
||||
case len(mons) > 0: |
||||
// zone has a monitor
|
||||
case first == nil && !p.IsGateway(): |
||||
// first option for monitor
|
||||
first = p |
||||
case second == nil: |
||||
// second option for monitor
|
||||
second = p |
||||
} |
||||
|
||||
return false |
||||
}) |
||||
|
||||
switch { |
||||
case len(mons) > 0: |
||||
// ready
|
||||
case first != nil: |
||||
// make first option our monitor
|
||||
first.CephMonitor = true |
||||
mons = append(mons, first) |
||||
case second != nil: |
||||
// make second option our monitor
|
||||
second.CephMonitor = true |
||||
mons = append(mons, second) |
||||
default: |
||||
// zone without machines??
|
||||
panic("unreachable") |
||||
} |
||||
|
||||
sort.Sort(mons) |
||||
return mons |
||||
} |
@ -0,0 +1,183 @@
|
||||
package zones |
||||
|
||||
import ( |
||||
"net/netip" |
||||
"os" |
||||
"strings" |
||||
|
||||
"darvaza.org/core" |
||||
"git.jpi.io/amery/jpictl/pkg/ceph" |
||||
) |
||||
|
||||
// CephMissingMonitorError is an error that contains ceph
|
||||
// monitors present in ceph.conf but not found on the cluster
|
||||
type CephMissingMonitorError struct { |
||||
Names []string |
||||
Addrs []netip.Addr |
||||
} |
||||
|
||||
func (err *CephMissingMonitorError) appendName(name string) { |
||||
err.Names = append(err.Names, name) |
||||
} |
||||
|
||||
func (err *CephMissingMonitorError) appendAddr(addr netip.Addr) { |
||||
err.Addrs = append(err.Addrs, addr) |
||||
} |
||||
|
||||
// OK tells if this instance actual shouldn't be treated as an error
|
||||
func (err CephMissingMonitorError) OK() bool { |
||||
switch { |
||||
case len(err.Names) > 0: |
||||
return false |
||||
case len(err.Addrs) > 0: |
||||
return false |
||||
default: |
||||
return true |
||||
} |
||||
} |
||||
|
||||
func (err CephMissingMonitorError) Error() string { |
||||
if !err.OK() { |
||||
var buf strings.Builder |
||||
|
||||
_, _ = buf.WriteString("missing:") |
||||
err.writeNames(&buf) |
||||
err.writeAddrs(&buf) |
||||
|
||||
return buf.String() |
||||
} |
||||
|
||||
// no error
|
||||
return "" |
||||
} |
||||
|
||||
func (err *CephMissingMonitorError) writeNames(w *strings.Builder) { |
||||
if len(err.Names) > 0 { |
||||
_, _ = w.WriteString(" mon_initial_members:") |
||||
for i, name := range err.Names { |
||||
if i != 0 { |
||||
_, _ = w.WriteRune(',') |
||||
} |
||||
_, _ = w.WriteString(name) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (err *CephMissingMonitorError) writeAddrs(w *strings.Builder) { |
||||
if len(err.Addrs) > 0 { |
||||
_, _ = w.WriteString(" mon_host:") |
||||
for i, addr := range err.Addrs { |
||||
if i != 0 { |
||||
_, _ = w.WriteRune(',') |
||||
} |
||||
_, _ = w.WriteString(addr.String()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// AsError returns nil if the instance is actually OK
|
||||
func (err *CephMissingMonitorError) AsError() error { |
||||
if err == nil || err.OK() { |
||||
return nil |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
type cephScanTODO struct { |
||||
names map[string]bool |
||||
addrs map[string]bool |
||||
} |
||||
|
||||
func (todo *cephScanTODO) checkMachine(p *Machine) bool { |
||||
// on ceph all addresses are ring1
|
||||
ring1, _ := RingOneAddress(p.Zone(), p.ID) |
||||
addr := ring1.String() |
||||
|
||||
if _, found := todo.names[p.Name]; found { |
||||
// found on the TODO by name
|
||||
todo.names[p.Name] = true |
||||
todo.addrs[addr] = true |
||||
return true |
||||
} |
||||
|
||||
if _, found := todo.addrs[addr]; found { |
||||
// found on the TODO by address
|
||||
todo.names[p.Name] = true |
||||
todo.addrs[addr] = true |
||||
return true |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
func (todo *cephScanTODO) Missing() error { |
||||
var check CephMissingMonitorError |
||||
|
||||
for name, found := range todo.names { |
||||
if !found { |
||||
check.appendName(name) |
||||
} |
||||
} |
||||
|
||||
for addr, found := range todo.addrs { |
||||
if !found { |
||||
var a netip.Addr |
||||
// it wouldn't be on the map if it wasn't valid
|
||||
_ = a.UnmarshalText([]byte(addr)) |
||||
|
||||
check.appendAddr(a) |
||||
} |
||||
} |
||||
|
||||
return check.AsError() |
||||
} |
||||
|
||||
func newCephScanTODO(cfg *ceph.Config) *cephScanTODO { |
||||
todo := &cephScanTODO{ |
||||
names: make(map[string]bool), |
||||
addrs: make(map[string]bool), |
||||
} |
||||
|
||||
for _, name := range cfg.Global.Monitors { |
||||
todo.names[name] = false |
||||
} |
||||
|
||||
for _, addr := range cfg.Global.MonitorsAddr { |
||||
todo.addrs[addr.String()] = false |
||||
} |
||||
|
||||
return todo |
||||
} |
||||
|
||||
func (m *Zones) scanCephMonitors(_ *ScanOptions) error { |
||||
cfg, err := m.GetCephConfig() |
||||
switch { |
||||
case os.IsNotExist(err): |
||||
err = nil |
||||
case err != nil: |
||||
return err |
||||
} |
||||
|
||||
if cfg != nil { |
||||
// store FSID
|
||||
m.CephFSID = cfg.Global.FSID |
||||
|
||||
// flag monitors based on config
|
||||
todo := newCephScanTODO(cfg) |
||||
m.ForEachMachine(func(p *Machine) bool { |
||||
p.CephMonitor = todo.checkMachine(p) |
||||
return false |
||||
}) |
||||
if err := todo.Missing(); err != nil { |
||||
return core.Wrap(err, "ceph") |
||||
} |
||||
} |
||||
|
||||
// make sure every zone has one
|
||||
m.ForEachZone(func(z *Zone) bool { |
||||
_ = z.GetCephMonitors() |
||||
return false |
||||
}) |
||||
return nil |
||||
} |
Loading…
Reference in new issue