diff --git a/cmd/updateMonitoringMixins.go b/cmd/updateMonitoringMixins.go new file mode 100644 index 0000000..a76c9b9 --- /dev/null +++ b/cmd/updateMonitoringMixins.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "fmt" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/util" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var ( + debugMode = false + useCache = true + disableCache = false + cacheDir string + workingDir string + pathMode = updatemonitoringmixins.BasePathModeOBTeam +) + +var updateMonitoringMixinsCmd = &cobra.Command{ + Use: "updateMonitoringMixins", + Short: "Update all the monitoring chart mixins", + PreRun: func(cmd *cobra.Command, args []string) { + ctx := &config.AppContext{ + DebugMode: false, + } + config.SetContext(ctx) + }, + Args: func(_ *cobra.Command, args []string) error { + + if len(args) == 0 && workingDir != "" { + pathMode = updatemonitoringmixins.BasePathModeCWD + } + + // Check if there's one argument provided + if len(args) == 1 || workingDir != "" { + return nil + } + + return fmt.Errorf("you must provide a target monitoring chart version") + }, + Run: updateMonitoringMixinsHandler, +} + +func init() { + updateMonitoringMixinsCmd.PersistentFlags().BoolVarP(&disableCache, "disableCache", "C", false, "disable the use of caching") + if disableCache { + useCache = false + } + maybeCacheDir, err := util.GetCacheDir("ob-charts-tool") + if err == nil { + cacheDir = maybeCacheDir + } else { + logrus.Warn("Cache dir setup failed, cache will not work.") + logrus.Warnf("attempted using cached directory: %s", maybeCacheDir) + useCache = false + } + updateMonitoringMixinsCmd.PersistentFlags().StringVarP(&workingDir, "working-dir", "w", "", "Specify the working directory to use") + updateMonitoringMixinsCmd.PersistentFlags().BoolVarP(&debugMode, "debug", "D", false, "enable debug mode") + rootCmd.AddCommand(updateMonitoringMixinsCmd) +} + +func updateMonitoringMixinsHandler(_ *cobra.Command, args []string) { + + updatemonitoringmixins.PrepareGitCache(useCache, cacheDir) + chartTargetRoot := updatemonitoringmixins.DetermineTargetRoot(args, pathMode, workingDir) + + ctx := config.GetContext() + ctx.ChartRootDir = chartTargetRoot + ctx.DebugMode = debugMode + + err := updatemonitoringmixins.VerifySystemDependencies() + if err != nil { + logrus.Fatal(err) + return + } + + mixinErr := updatemonitoringmixins.UpdateMonitoringMixins(useCache) + if mixinErr != nil { + logrus.Fatal(err) + return + } +} diff --git a/cmd/verifyChartImages.go b/cmd/verifyChartImages.go index 3c93903..ff8eb35 100644 --- a/cmd/verifyChartImages.go +++ b/cmd/verifyChartImages.go @@ -3,6 +3,7 @@ package cmd import ( "bytes" "fmt" + "github.com/rancher/ob-charts-tool/internal/charts" "io" "os" "os/exec" @@ -69,7 +70,7 @@ func verifyChartImagesHandler(_ *cobra.Command, args []string) { panic(err) } - chartTargetRoot := fmt.Sprintf("%s/charts/rancher-monitoring/%s", cwd, targetVersion) + chartTargetRoot := charts.BaseMonitoringVersionDir(cwd, targetVersion) if _, err := os.Stat(chartTargetRoot); os.IsNotExist(err) { panic(fmt.Sprintf("Cannot find a monitoring chart with the provided version (%s)", targetVersion)) } diff --git a/go.mod b/go.mod index 67594b6..3c40caf 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.23.6 require ( github.com/Masterminds/semver/v3 v3.3.1 - github.com/go-git/go-git/v5 v5.16.0 + github.com/go-git/go-git/v5 v5.16.2 github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 @@ -13,19 +13,19 @@ require ( ) require ( - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.1.6 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-viper/mapstructure/v2 v2.3.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -34,24 +34,24 @@ require ( github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sagikazarmark/locafero v0.9.0 // indirect + github.com/sergi/go-diff v1.4.0 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.9.2 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect ) diff --git a/go.sum b/go.sum index d8d3e7c..9b820bd 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -7,6 +9,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -29,6 +33,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -39,12 +45,16 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.16.0 h1:k3kuOEpkc0DeY7xlL6NaaNg39xdgQbtH5mwCafHO9AQ= github.com/go-git/go-git/v5 v5.16.0/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -76,6 +86,8 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -89,8 +101,12 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= +github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -100,8 +116,12 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -123,16 +143,24 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -141,12 +169,16 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/charts/monitoring.go b/internal/charts/monitoring.go new file mode 100644 index 0000000..1249a42 --- /dev/null +++ b/internal/charts/monitoring.go @@ -0,0 +1,13 @@ +package charts + +import ( + "fmt" +) + +func BaseMonitoringDir(cwd string) string { + return fmt.Sprintf("%s/charts/rancher-monitoring", cwd) +} + +func BaseMonitoringVersionDir(cwd string, version string) string { + return fmt.Sprintf("%s/charts/rancher-monitoring/%s", cwd, version) +} diff --git a/internal/cmd/updatemonitoringmixins/common/main.go b/internal/cmd/updatemonitoringmixins/common/main.go new file mode 100644 index 0000000..d0efed6 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/common/main.go @@ -0,0 +1,53 @@ +package common + +import ( + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + "gopkg.in/yaml.v3" + "strings" +) + +// LiteralStr is a custom type to represent a literal block string +type LiteralStr string + +func (l LiteralStr) MarshalYAML() (interface{}, error) { + return yaml.Node{ + Kind: yaml.ScalarNode, + Value: string(l), + Style: yaml.LiteralStyle, // This is the key part to make it a |- block + Tag: "!!str", + }, nil +} + +func YamlStrRepr(v interface{}, indent int, escape bool) (string, error) { + var b strings.Builder + encoder := yaml.NewEncoder(&b) + encoder.SetIndent(indent) + err := encoder.Encode(v) + if err != nil { + return "", err + } + + yamlStr := b.String() + if escape { + yamlStr = escapeHelm(yamlStr) + } + + return yamlStr, nil +} + +func escapeHelm(s string) string { + s = strings.ReplaceAll(s, "{{", "{{`{{") + s = strings.ReplaceAll(s, "}}", "}}`}}") + s = strings.ReplaceAll(s, "{{`{{", "{{`{{`}}") + s = strings.ReplaceAll(s, "}}`}}", "{{`}}`}}") + return s +} + +func SetDefaultMaxK8s(kv types.KatesVersions) types.KatesVersions { + if kv.GetMaxKubernetes() == "" { + // Equal to: https://github.com/prometheus-community/helm-charts/blob/0b60795bb66a21cd368b657f0665d67de3e49da9/charts/kube-prometheus-stack/hack/sync_grafana_dashboards.py#L326 + kv.SetMaxKubernetes("9.9.9-9") + } + + return kv +} diff --git a/internal/cmd/updatemonitoringmixins/config/context.go b/internal/cmd/updatemonitoringmixins/config/context.go new file mode 100644 index 0000000..d8e7607 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/config/context.go @@ -0,0 +1,19 @@ +package config + +type AppContext struct { + ChartRootDir string + DebugMode bool +} + +var appCtx *AppContext + +func SetContext(ctx *AppContext) { + appCtx = ctx +} + +func GetContext() *AppContext { + if appCtx == nil { + panic("AppContext not initialized") + } + return appCtx +} diff --git a/internal/cmd/updatemonitoringmixins/constants/common.go b/internal/cmd/updatemonitoringmixins/constants/common.go new file mode 100644 index 0000000..cb2b4c9 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/constants/common.go @@ -0,0 +1,25 @@ +package constants + +import "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/git" + +var Repos = map[string]git.RepoConfigStatus{ + "etcd": { + Name: "etcd", + RepoURL: "https://github.com/etcd-io/etcd.git", + Branch: "main", + // HeadSha: "7351ab86c054aad7d31d6639b2e841f2c37cd296", // If HeadSha is set we will use that specific SHA over branch + }, + "kube-prometheus": { + Name: "kube-prometheus", + RepoURL: "https://github.com/prometheus-operator/kube-prometheus.git", + Branch: "main", + // HeadSha: "685008710cbb881cd8fce9db1e2f890c9e249903", // If HeadSha is set we will use that specific SHA over branch + }, + "kubernetes-mixin": { + Name: "kubernetes-mixin", + RepoURL: "https://github.com/kubernetes-monitoring/kubernetes-mixin.git", + Branch: "master", + // HeadSha: "834daaa30905d5832c68b7ef8ab41fbedcd9dd4b", // If HeadSha is set we will use that specific SHA over branch + }, + // TODO: in the future we'll maybe add a Rancher source +} diff --git a/internal/cmd/updatemonitoringmixins/constants/dashboards.go b/internal/cmd/updatemonitoringmixins/constants/dashboards.go new file mode 100644 index 0000000..3ccf542 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/constants/dashboards.go @@ -0,0 +1,121 @@ +package constants + +import ( + "bytes" + "fmt" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/pythonish" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" +) + +func DashboardsSourceCharts() types.DashboardsConfig { + return types.DashboardsConfig{ + types.DashboardFileSource{ + Source: "/files/dashboards/k8s-coredns.json", + DashboardSourceBase: types.DashboardSourceBase{ + Destination: "/templates/grafana/dashboards-1.14", + Type: types.DashboardJson, + MinKubernetes: "1.14.0-0", + MulticlusterKey: ".Values.grafana.sidecar.dashboards.multicluster.global.enabled", + }, + }, + types.DashboardURLSource{ + Source: fmt.Sprintf("https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/%s/manifests/grafana-dashboardDefinitions.yaml", Repos["kube-prometheus"].HeadSha), + DashboardSourceBase: types.DashboardSourceBase{ + Destination: "/templates/grafana/dashboards-1.14", + Type: types.DashboardKatesYaml, + MinKubernetes: "1.14.0-0", + MulticlusterKey: ".Values.grafana.sidecar.dashboards.multicluster.global.enabled", + }, + }, + types.DashboardGitSource{ + Repository: Repos["kubernetes-mixin"], + Content: "(import 'dashboards/windows.libsonnet') + (import 'config.libsonnet') + { _config+:: { windowsExporterSelector: 'job=\"windows-exporter\"', }}", + Cwd: ".", + Source: "_mixin.jsonnet", + DashboardSourceBase: types.DashboardSourceBase{ + Destination: "/templates/grafana/dashboards-1.14", + MinKubernetes: "1.14.0-0", + Type: types.DashboardJsonnetMixin, + MulticlusterKey: ".Values.grafana.sidecar.dashboards.multicluster.global.enabled", + }, + MixinVars: map[string]interface{}{}, + }, + types.DashboardGitSource{ + Repository: Repos["etcd"], + Source: "mixin.libsonnet", + Cwd: "contrib/mixin", + DashboardSourceBase: types.DashboardSourceBase{ + Destination: "/templates/grafana/dashboards-1.14", + MinKubernetes: "1.14.0-0", + Type: types.DashboardJsonnetMixin, + MulticlusterKey: "(or .Values.grafana.sidecar.dashboards.multicluster.global.enabled .Values.grafana.sidecar.dashboards.multicluster.etcd.enabled)", + }, + MixinVars: map[string]interface{}{ + "_config+": map[string]interface{}{}, + }, + }, + } +} + +var DashboardsConditionMap = map[string]string{ + "alertmanager-overview": " (or .Values.alertmanager.enabled .Values.alertmanager.forceDeployDashboards)", + "grafana-coredns-k8s": " .Values.coreDns.enabled", + "etcd": " .Values.kubeEtcd.enabled", + "apiserver": " .Values.kubeApiServer.enabled", + "controller-manager": " .Values.kubeControllerManager.enabled", + "kubelet": " .Values.kubelet.enabled", + "proxy": " .Values.kubeProxy.enabled", + "scheduler": " .Values.kubeScheduler.enabled", + "node-rsrc-use": " (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards)", + "node-cluster-rsrc-use": " (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards)", + "nodes": " (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.linux.enabled)", + "nodes-aix": " (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.aix.enabled)", + "nodes-darwin": " (and (or .Values.nodeExporter.enabled .Values.nodeExporter.forceDeployDashboards) .Values.nodeExporter.operatingSystems.darwin.enabled)", + "prometheus-remote-write": " .Values.prometheus.prometheusSpec.remoteWriteDashboards", + "k8s-coredns": " .Values.coreDns.enabled", + "k8s-windows-cluster-rsrc-use": " .Values.windowsMonitoring.enabled", + "k8s-windows-node-rsrc-use": " .Values.windowsMonitoring.enabled", + "k8s-resources-windows-cluster": " .Values.windowsMonitoring.enabled", + "k8s-resources-windows-namespace": " .Values.windowsMonitoring.enabled", + "k8s-resources-windows-pod": " .Values.windowsMonitoring.enabled", +} + +const DashboardHeader = `{{- /* +Generated from '%(.Name)s' from %(.URL)s%(.ByLine)s +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=%(.MinKubeVersion)s" $kubeTargetVersion) (semverCompare "<%(.MaxKubeVersion)s" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled%(.Condition)s }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "%(.Name)s" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: {{ ternary $.Values.grafana.sidecar.dashboards.labelValue "1" (not (empty $.Values.grafana.sidecar.dashboards.labelValue)) | quote }} + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +` + +func NewDashboardHeader(headerData types.HeaderData) (string, error) { + if config.GetContext().DebugMode { + headerData.ByLine = ` by 'ob-charts-tool'` + } + + templateRenderer := pythonish.NewRenderer() + tmpl, err := templateRenderer.Parse(DashboardHeader) + if err != nil { + return "ERROR", err + } + + var buffer bytes.Buffer + err = tmpl.Execute(&buffer, headerData) + return buffer.String(), err +} diff --git a/internal/cmd/updatemonitoringmixins/constants/rules.go b/internal/cmd/updatemonitoringmixins/constants/rules.go new file mode 100644 index 0000000..4a5aa55 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/constants/rules.go @@ -0,0 +1,188 @@ +package constants + +import ( + "bytes" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/pythonish" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" +) + +func RulesSourceCharts() types.RulesConfig { + return types.RulesConfig{ + { + Repository: Repos["kube-prometheus"], + Source: "main.libsonnet", + Cwd: "", + Destination: "/templates/prometheus/rules-1.14", + MinKubernetes: "1.14.0-0", + Mixin: `local kp = +(import 'jsonnet/kube-prometheus/main.libsonnet') + { +values+:: { + nodeExporter+: { + mixin+: { + _config+: { + fsSelector: '$.Values.defaultRules.node.fsSelector', + }, + }, + }, + common+: { + namespace: 'monitoring', + }, + kubernetesControlPlane+: { + kubeProxy: true, + }, +}, +grafana: {}, +}; + +{ +groups: std.flattenArrays([ +kp[component][resource].spec.groups +for component in std.objectFields(kp) +for resource in std.filter( + function(resource) + kp[component][resource].kind == 'PrometheusRule', + std.objectFields(kp[component]) +) +]), +} +`, + }, + { + Repository: Repos["kubernetes-mixin"], + Source: "windows.libsonnet", + Cwd: "rules", + Destination: "/templates/prometheus/rules-1.14", + MinKubernetes: "1.14.0-0", + Mixin: `local kp = + { prometheusAlerts+:: {}, prometheusRules+:: {}} + + (import "windows.libsonnet") + + {'_config': { + 'clusterLabel': 'cluster', + 'windowsExporterSelector': 'job="windows-exporter"', + 'kubeStateMetricsSelector': 'job="kube-state-metrics"', + }}; + +kp.prometheusAlerts + kp.prometheusRules`, + }, + { + Repository: Repos["etcd"], + Source: "mixin.libsonnet", + Cwd: "contrib/mixin", + Destination: "/templates/prometheus/rules-1.14", + MinKubernetes: "1.14.0-0", + // Override the default etcd_instance_labels to get proper aggregation for etcd instances in k8s clusters (#2720) + // see https://github.com/etcd-io/etcd/blob/1c22e7b36bc5d8543f1646212f2960f9fe503b8c/contrib/mixin/config.libsonnet#L13 + Mixin: `local kp = + { prometheusAlerts+:: {}, prometheusRules+:: {}} + + (import "mixin.libsonnet") + + {'_config': { + 'etcd_selector': 'job=~".*etcd.*"', + 'etcd_instance_labels': 'instance, pod', + 'scrape_interval_seconds': 30, + 'clusterLabel': 'job', + }}; + +kp.prometheusAlerts + kp.prometheusRules`, + }, + } +} + +var RulesConditionMap = map[string]string{ + "alertmanager.rules": " .Values.defaultRules.rules.alertmanager", + "config-reloaders": " .Values.defaultRules.rules.configReloaders", + "etcd": " .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd", + "general.rules": " .Values.defaultRules.rules.general", + "k8s.rules.container_cpu_limits": " .Values.defaultRules.rules.k8sContainerCpuLimits", + "k8s.rules.container_cpu_requests": " .Values.defaultRules.rules.k8sContainerCpuRequests", + "k8s.rules.container_cpu_usage_seconds_total": " .Values.defaultRules.rules.k8sContainerCpuUsageSecondsTotal", + "k8s.rules.container_memory_cache": " .Values.defaultRules.rules.k8sContainerMemoryCache", + "k8s.rules.container_memory_limits": " .Values.defaultRules.rules.k8sContainerMemoryLimits", + "k8s.rules.container_memory_requests": " .Values.defaultRules.rules.k8sContainerMemoryRequests", + "k8s.rules.container_memory_rss": " .Values.defaultRules.rules.k8sContainerMemoryRss", + "k8s.rules.container_memory_swap": " .Values.defaultRules.rules.k8sContainerMemorySwap", + "k8s.rules.container_memory_working_set_bytes": " .Values.defaultRules.rules.k8sContainerMemoryWorkingSetBytes", + "k8s.rules.container_resource": " .Values.defaultRules.rules.k8sContainerResource", + "k8s.rules.pod_owner": " .Values.defaultRules.rules.k8sPodOwner", + "kube-apiserver-availability.rules": " .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverAvailability", + "kube-apiserver-burnrate.rules": " .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverBurnrate", + "kube-apiserver-histogram.rules": " .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverHistogram", + "kube-apiserver-slos": " .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverSlos", + "kube-prometheus-general.rules": " .Values.defaultRules.rules.kubePrometheusGeneral", + "kube-prometheus-node-recording.rules": " .Values.defaultRules.rules.kubePrometheusNodeRecording", + "kube-scheduler.rules": " .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeSchedulerRecording", + "kube-state-metrics": " .Values.defaultRules.rules.kubeStateMetrics", + "kubelet.rules": " .Values.kubelet.enabled .Values.defaultRules.rules.kubelet", + "kubernetes-apps": " .Values.defaultRules.rules.kubernetesApps", + "kubernetes-resources": " .Values.defaultRules.rules.kubernetesResources", + "kubernetes-storage": " .Values.defaultRules.rules.kubernetesStorage", + "kubernetes-system": " .Values.defaultRules.rules.kubernetesSystem", + "kubernetes-system-kube-proxy": " .Values.kubeProxy.enabled .Values.defaultRules.rules.kubeProxy", + "kubernetes-system-apiserver": " .Values.defaultRules.rules.kubernetesSystem", + "kubernetes-system-kubelet": " .Values.defaultRules.rules.kubernetesSystem", + "kubernetes-system-controller-manager": " .Values.kubeControllerManager.enabled .Values.defaultRules.rules.kubeControllerManager", + "kubernetes-system-scheduler": " .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeSchedulerAlerting", + "node-exporter.rules": " .Values.defaultRules.rules.nodeExporterRecording", + "node-exporter": " .Values.defaultRules.rules.nodeExporterAlerting", + "node.rules": " .Values.defaultRules.rules.node", + "node-network": " .Values.defaultRules.rules.network", + "prometheus-operator": " .Values.defaultRules.rules.prometheusOperator", + "prometheus": " .Values.defaultRules.rules.prometheus", + "windows.node.rules": " .Values.windowsMonitoring.enabled .Values.defaultRules.rules.windows", + "windows.pod.rules": " .Values.windowsMonitoring.enabled .Values.defaultRules.rules.windows", +} + +var AlertConditionMap = map[string]string{ + "AggregatedAPIDown": `semverCompare ">=1.18.0-0" $kubeTargetVersion`, + "AlertmanagerDown": ".Values.alertmanager.enabled", + "CoreDNSDown": ".Values.kubeDns.enabled", + "KubeAPIDown": ".Values.kubeApiServer.enabled", + "KubeControllerManagerDown": ".Values.kubeControllerManager.enabled", + "KubeletDown": ".Values.prometheusOperator.kubeletService.enabled", + "KubeSchedulerDown": ".Values.kubeScheduler.enabled", + "KubeStateMetricsDown": ".Values.kubeStateMetrics.enabled", + "NodeExporterDown": ".Values.nodeExporter.enabled", + "PrometheusOperatorDown": ".Values.prometheusOperator.enabled", +} + +const RuleHeader = `{{- /* +Generated from '%(.Name)s' group from %(.URL)s%(.ByLine)s +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=%(.MinKubeVersion)s" $kubeTargetVersion) (semverCompare "<%(.MaxKubeVersion)s" $kubeTargetVersion) .Values.defaultRules.create%(.Condition)s }}%(.InitLine)s +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "%(.Name)s" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: +` + +func NewRuleHeader(headerData types.HeaderData) (string, error) { + if config.GetContext().DebugMode { + headerData.ByLine = ` by 'ob-charts-tool'` + } + + templateRenderer := pythonish.NewRenderer() + tmpl, err := templateRenderer.Parse(RuleHeader) + if err != nil { + return "ERROR", err + } + + var buffer bytes.Buffer + err = tmpl.Execute(&buffer, headerData) + return buffer.String(), err +} diff --git a/internal/cmd/updatemonitoringmixins/git/main.go b/internal/cmd/updatemonitoringmixins/git/main.go new file mode 100644 index 0000000..74123a8 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/git/main.go @@ -0,0 +1,23 @@ +package git + +import "github.com/go-git/go-git/v5/plumbing" + +type RepoConfigStatus struct { + Name string `json:"name"` + RepoURL string `json:"repo_url"` + Branch string `json:"branch"` + HeadSha string `json:"head_sha"` +} + +func (repoConfig *RepoConfigStatus) HeadHash() plumbing.Hash { + return plumbing.NewHash(repoConfig.HeadSha) +} + +func RepoSHAs(repos map[string]RepoConfigStatus) map[string]string { + refs := make(map[string]string, len(repos)) + for _, repo := range repos { + refs[repo.Name] = repo.HeadSha + } + + return refs +} diff --git a/internal/cmd/updatemonitoringmixins/jsonnet/main.go b/internal/cmd/updatemonitoringmixins/jsonnet/main.go new file mode 100644 index 0000000..073cde4 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/jsonnet/main.go @@ -0,0 +1,127 @@ +package jsonnet + +import ( + "fmt" + "github.com/rancher/ob-charts-tool/internal/util" + "github.com/sirupsen/logrus" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/google/go-jsonnet" +) + +type fsCacheEntry struct { + contents jsonnet.Contents + abspath string + exists bool +} + +type MyImporter struct { + fsCache map[string]*fsCacheEntry + MixinDir string +} + +func (mi *MyImporter) tryPath(dir, importedPath string) (found bool, contents jsonnet.Contents, foundHere string, err error) { + if mi.fsCache == nil { + mi.fsCache = make(map[string]*fsCacheEntry) + } + var absPath string + if filepath.IsAbs(importedPath) { + absPath = importedPath + } else { + absPath = filepath.Join(dir, importedPath) + } + var entry *fsCacheEntry + if cacheEntry, isCached := mi.fsCache[absPath]; isCached { + entry = cacheEntry + } else { + contentBytes, err := os.ReadFile(absPath) + if err != nil { + if os.IsNotExist(err) { + entry = &fsCacheEntry{ + exists: false, + } + } else { + return false, jsonnet.Contents{}, "", err + } + } else { + entry = &fsCacheEntry{ + exists: true, + contents: jsonnet.MakeContentsRaw(contentBytes), + } + } + mi.fsCache[absPath] = entry + } + return entry.exists, entry.contents, absPath, nil +} + +func (mi *MyImporter) Import(importedFrom, importedPath string) (contents jsonnet.Contents, foundAt string, err error) { + if strings.Contains(importedPath, "github.com") { + found, content, foundHere, err := mi.tryPath(mi.MixinDir+"/vendor/", importedPath) + if err != nil { + return jsonnet.Contents{}, "", err + } + + if found { + return content, foundHere, nil + } + } + + dir, _ := filepath.Split(importedFrom) + found, content, foundHere, err := mi.tryPath(dir, importedPath) + if err != nil { + return jsonnet.Contents{}, "", err + } + + if !found { + found, content, foundHere, err = mi.tryPath(mi.MixinDir, importedPath) + if err != nil { + return jsonnet.Contents{}, "", err + } + } + + if found { + return content, foundHere, nil + } + + return jsonnet.Contents{}, "", fmt.Errorf("couldn't open import %#v: no match locally or in the Jsonnet library paths", importedPath) +} + +func NewVm(mixinDir string) *jsonnet.VM { + vm := jsonnet.MakeVM() + abs, err := filepath.Abs(mixinDir) + if err != nil { + vm.Importer(&MyImporter{ + fsCache: make(map[string]*fsCacheEntry), + MixinDir: mixinDir, + }) + } + vm.Importer(&MyImporter{ + fsCache: make(map[string]*fsCacheEntry), + MixinDir: abs, + }) + + return vm +} + +func InitJsonnetBuilder(dir string) error { + if !strings.HasSuffix(dir, "/") { + dir = dir + "/" + } + jsonnetFile := filepath.Join(dir, "jsonnetfile.json") + if util.IsFile(jsonnetFile) { + logrus.Info("Running jsonnet-bundler, because jsonnetfile.json exists") + + cmd := exec.Command("jb", "install") + cmd.Dir = dir // Set the working directory + err := cmd.Run() + if err != nil { + logrus.Error("Error running jsonnet-bundler: %v\n", err) + return err + } + } + + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/k8s/main.go b/internal/cmd/updatemonitoringmixins/k8s/main.go new file mode 100644 index 0000000..5390358 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/k8s/main.go @@ -0,0 +1,15 @@ +package k8s + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func ParseConfigMapList(input string) v1.ConfigMapList { + var configMapList v1.ConfigMapList + err := yaml.Unmarshal([]byte(input), &configMapList) + if err != nil { + panic(err) + } + return configMapList +} diff --git a/internal/cmd/updatemonitoringmixins/main.go b/internal/cmd/updatemonitoringmixins/main.go new file mode 100644 index 0000000..bd105a6 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/main.go @@ -0,0 +1,123 @@ +package updatemonitoringmixins + +import ( + "fmt" + "github.com/jedib0t/go-pretty/text" + "github.com/rancher/ob-charts-tool/internal/charts" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/constants" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/syncgrafana" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/syncprom" + "github.com/rancher/ob-charts-tool/internal/git" + "github.com/rancher/ob-charts-tool/internal/git/cache" + "github.com/rancher/ob-charts-tool/internal/util" + "github.com/sirupsen/logrus" + "os" +) + +type ChartPathMode int + +const ( + BasePathModeUnknown ChartPathMode = iota + BasePathModeOBTeam + BasePathModeCWD +) + +func PrepareGitCache(useCache bool, cacheDir string) { + if useCache { + // Called to set root dir + cache.SetupGitCacheManager(cacheDir) + } +} + +func DetermineTargetRoot(args []string, pathMode ChartPathMode, workingDir string) string { + var chartTargetRoot string + if pathMode == BasePathModeOBTeam { + targetVersion := args[0] + fmt.Println( + text.AlignCenter.Apply( + text.Color.Sprintf(text.FgBlue, "Looking for `rancher-monitoring` chart with version `%s`...", targetVersion), + 125, + ), + ) + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + + chartTargetRoot = charts.BaseMonitoringVersionDir(cwd, targetVersion) + } else if pathMode == BasePathModeCWD { + chartTargetRoot = workingDir + } + + return chartTargetRoot +} + +func VerifySystemDependencies() error { + // TODO: verify user has jb command installed (jsonnet-bundler) + return nil +} + +// PrepareTempDir creates the temp dir, but callers must be responsible for `defer os.RemoveAll` of those dirs +func PrepareTempDir() (string, error) { + tempDir, err := os.MkdirTemp("", "ob-charts-tool-mixins-") + if err != nil { + logrus.Error("Error creating temporary directory:", err) + return "", err + } + + return tempDir, nil +} + +// UpdateMonitoringMixins is essentially equivalent to `update_mixins.sh` +func UpdateMonitoringMixins(useCache bool) error { + // First prepare a temp dir + tempDir, err := PrepareTempDir() + defer os.RemoveAll(tempDir) // Ensure cleanup when the function exits + if err != nil { + logrus.Error("Error creating temporary directory:", err) + return err + } + + // When the HeadSHA is empty we'll find the head + // TODO add a force flag later? + for key, repoConfig := range constants.Repos { + if repoConfig.HeadSha != "" { + continue + } + headSha, err := git.FindRepoHeadSha(repoConfig.RepoURL) + if err != nil { + panic(err) + } + repoConfig.HeadSha = headSha + constants.Repos[key] = repoConfig + } + + // Clone git repos + for _, repoConfig := range constants.Repos { + var cloneErr error + repoConfigParams := git.RepoConfigParams{Name: repoConfig.Name, URL: repoConfig.RepoURL, Head: repoConfig.HeadHash()} + if useCache { + destDir := util.GetRepoCloneDir(tempDir, repoConfig.Name, repoConfig.HeadSha) + "/shallow" + _, cloneErr = git.CachedShallowCloneRepository(repoConfigParams, destDir) + } else { + destDir := util.GetRepoCloneDir(tempDir, repoConfig.Name, repoConfig.HeadSha) + "/shallow" + _, cloneErr = git.ShallowCloneRepository(repoConfigParams, destDir) + } + if cloneErr != nil { + logrus.Error("Error shallow cloning repository:", cloneErr) + os.Exit(1) + } + } + + dashboardsErr := syncgrafana.DashboardsSync(tempDir) + if dashboardsErr != nil { + return dashboardsErr + } + + rulesErr := syncprom.PrometheusRulesSync(tempDir) + if rulesErr != nil { + return rulesErr + } + + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/pythonish/main.go b/internal/cmd/updatemonitoringmixins/pythonish/main.go new file mode 100644 index 0000000..b357d77 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/pythonish/main.go @@ -0,0 +1,9 @@ +package pythonish + +import ( + "text/template" +) + +func NewRenderer() *template.Template { + return template.New("pythonish").Delims("%(", ")s") +} diff --git a/internal/cmd/updatemonitoringmixins/pythonish/re/main.go b/internal/cmd/updatemonitoringmixins/pythonish/re/main.go new file mode 100644 index 0000000..a5b3757 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/pythonish/re/main.go @@ -0,0 +1,13 @@ +package re + +import ( + "regexp" +) + +// FindIter returns an iterator-like channel that yields the start and end +// indices of each match of the regular expression in the input string. +// This is similar to Python's re.finditer. +func FindIter(pattern, s string) [][]int { + re := regexp.MustCompile(pattern) + return re.FindAllStringIndex(s, -1) +} diff --git a/internal/cmd/updatemonitoringmixins/pythonish/textwrap/main.go b/internal/cmd/updatemonitoringmixins/pythonish/textwrap/main.go new file mode 100644 index 0000000..748fe61 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/pythonish/textwrap/main.go @@ -0,0 +1,31 @@ +package textwrap + +import "strings" + +// Indent adds 'prefix' to the beginning of selected lines in 'text'. +// +// If 'predicate' is provided, 'prefix' will only be added to the lines +// where 'predicate(line)' returns true. If 'predicate' is nil, it will +// default to adding 'prefix' to all non-empty lines that do not +// consist solely of whitespace characters. +func Indent(text, prefix string, predicate ...func(string) bool) string { + lines := strings.SplitAfter(text, "\n") + var builder strings.Builder + + var predFunc func(string) bool + if len(predicate) > 0 && predicate[0] != nil { + predFunc = predicate[0] + } else { + predFunc = func(line string) bool { + return strings.TrimSpace(line) != "" + } + } + + for _, line := range lines { + if predFunc(line) { + builder.WriteString(prefix) + } + builder.WriteString(line) + } + return builder.String() +} diff --git a/internal/cmd/updatemonitoringmixins/syncgrafana/main.go b/internal/cmd/updatemonitoringmixins/syncgrafana/main.go new file mode 100644 index 0000000..778009a --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncgrafana/main.go @@ -0,0 +1,192 @@ +package syncgrafana + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/common" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/constants" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/git" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + "github.com/sirupsen/logrus" + "reflect" + "regexp" + "strings" +) + +var ReplacementMap = []types.DashboardReplacementRule{ + { + "var-namespace=$__cell_1", + "var-namespace=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__data.fields.namespace}{{ else }}$__cell_1{{ end }}{{`", + }, + { + "var-type=$__cell_2", + "var-type=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__data.fields.workload_type}{{ else }}$__cell_2{{ end }}{{`", + }, + { + "=$__cell", + "=`}}{{ if .Values.grafana.sidecar.dashboards.enableNewTablePanelSyntax }}${__value.text}{{ else }}$__cell{{ end }}{{`", + }, + { + `job=\"prometheus-k8s\",namespace=\"monitoring\"`, + "", + }, +} + +func DashboardsSync(tempDir string) error { + logrus.Info("Syncing grafana dashboards") + chartPath := config.GetContext().ChartRootDir + + chartsSources := constants.DashboardsSourceCharts() + for _, chart := range chartsSources { + currentState := chartState{} + switch c := chart.(type) { + case types.DashboardGitSource: + err := prepareGitDashboard(¤tState, tempDir, c, chartPath) + if err != nil { + return err + } + common.SetDefaultMaxK8s(&c) + writeErr := writeOutput(currentState, &c) + if writeErr != nil { + return writeErr + } + case types.DashboardURLSource: + err := prepareUrlDashboard(¤tState, c) + if err != nil { + return err + } + common.SetDefaultMaxK8s(&c) + writeErr := writeOutput(currentState, &c) + if writeErr != nil { + return writeErr + } + case types.DashboardFileSource: + // Needs to be essentially: https://github.com/prometheus-community/helm-charts/blob/0b60795bb66a21cd368b657f0665d67de3e49da9/charts/kube-prometheus-stack/hack/sync_grafana_dashboards.py#L320 + err := prepareFileDashboard(¤tState, c, chartPath) + if err != nil { + return err + } + common.SetDefaultMaxK8s(&c) + writeErr := writeOutput(currentState, &c) + if writeErr != nil { + return writeErr + } + default: + return fmt.Errorf("unknown chart type: %T", c) + } + } + + logrus.Info("Finished syncing grafana dashboards") + + return nil +} + +func PatchDashboardJson(inputContent string, key string) string { + content := strings.TrimSpace(inputContent) + + var data map[string]interface{} + err := json.Unmarshal([]byte(content), &data) + if err != nil { + return "{{`" + content + "`}}" + } + + // multicluster + templating, templatingOk := data["templating"].(map[string]interface{}) + if !templatingOk { + return "{{`" + content + "`}}" + } + list, listOk := templating["list"].([]interface{}) + if !listOk { + return "{{`" + content + "`}}" + } + + overwriteList := make([]interface{}, 0) + for _, item := range list { + if variable, ok := item.(map[string]interface{}); ok { + if name, ok := variable["name"].(string); ok && name == "cluster" { + variable["allValue"] = ".*" + variable["hide"] = ":multicluster:" + } + overwriteList = append(overwriteList, variable) + } else { + return "{{`" + content + "`}}" + } + } + templating["list"] = overwriteList + data["templating"] = templating + + updated := replaceNestedKey(data, "decimals", -1, nil) + + var b bytes.Buffer + encErr := customJsonEncoder(&b).Encode(updated) + if encErr != nil { + return "{{`" + content + "`}}" + } + content = b.String() + replacementString := fmt.Sprintf("`}}{{ if %s }}0{{ else }}2{{ end }}{{`", key) + content = strings.Replace(content, `":multicluster:"`, replacementString, -1) // this changes things to escaped utf8 + + for _, rule := range ReplacementMap { + content = strings.Replace(content, rule.Match, rule.Replacement, -1) + } + + content = strings.TrimSpace(content) + + return "{{`" + content + "`}}" +} + +func customJsonEncoder(b *bytes.Buffer) *json.Encoder { + // Use a custom Encoder to disable HTML escaping + enc := json.NewEncoder(b) + enc.SetEscapeHTML(false) // Disable HTML escaping + return enc +} + +func replaceNestedKey(data interface{}, key string, value interface{}, replace interface{}) interface{} { + switch v := data.(type) { + case map[string]interface{}: + newMap := make(map[string]interface{}) + for k, val := range v { + if k == key && reflect.DeepEqual(val, value) { + newMap[k] = replace + } else { + newMap[k] = replaceNestedKey(val, key, value, replace) + } + } + return newMap + case []interface{}: + newList := make([]interface{}, len(v)) + for i, item := range v { + newList[i] = replaceNestedKey(item, key, value, replace) + } + return newList + default: + return data + } +} + +const ( + timezoneReplacement = `"timezone": "` + "`" + `}}{{ .Values.grafana.defaultDashboardsTimezone }}{{` + "`" + `"` + editableReplacement = `"editable":` + "`" + `}}{{ .Values.grafana.defaultDashboardsEditable }}{{` + "`" + intervalReplacement = `"interval":"` + "`" + `}}{{ .Values.grafana.defaultDashboardsInterval }}{{` + "`" + `"` +) + +func PatchDashboardJsonSetTimezoneAsVariable(content string) string { + timezoneRegexp := regexp.MustCompile(`"timezone"\s*:\s*"(?:\\.|[^\"])*"`) + content = timezoneRegexp.ReplaceAllString(content, timezoneReplacement) + return content +} + +func PatchDashboardJsonSetEditableAsVariable(content string) string { + editableRegexp := regexp.MustCompile(`"editable"\s*:\s*(?:true|false)`) + content = editableRegexp.ReplaceAllString(content, editableReplacement) + return content +} + +func PatchDashboardJsonSetIntervalAsVariable(content string) string { + intervalRegexp := regexp.MustCompile(`"interval"\s*:\s*"(?:\\.|[^\"])*"`) + content = intervalRegexp.ReplaceAllString(content, intervalReplacement) + return content +} diff --git a/internal/cmd/updatemonitoringmixins/syncgrafana/output.go b/internal/cmd/updatemonitoringmixins/syncgrafana/output.go new file mode 100644 index 0000000..281508b --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncgrafana/output.go @@ -0,0 +1,222 @@ +package syncgrafana + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/common" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/constants" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/jsonnet" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/k8s" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/pythonish/textwrap" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +func writeOutput[T types.DashboardSource](currentState chartState, chart T) error { + dashboardType := chart.GetType() + if dashboardType == types.DashboardKatesYaml { + var yamlData map[string]interface{} + err := yaml.Unmarshal([]byte(currentState.rawText), &yamlData) + if err != nil { + return err + } + + kind, hasKind := yamlData["kind"] + if !hasKind { + logrus.Warn("kind not found in yaml") + } + k8sKeys := []string{ + "apiVersion", + "kind", + "metadata", + "items", + } + keyCount := 0 + for key, _ := range yamlData { + if slices.Contains(k8sKeys, key) { + keyCount++ + } + } + if keyCount < len(k8sKeys)-1 && !hasKind { + return fmt.Errorf("no kind found in yaml and not enough expected keys: %v", k8sKeys) + } + + // Now use `kind` var to somehow reparse the data into that specific kind + switch kind { + case "ConfigMapList": + configMapList := k8s.ParseConfigMapList(currentState.rawText) + for _, group := range configMapList.Items { + for resource, content := range group.Data { + resourceName := strings.TrimSuffix(resource, filepath.Ext(resource)) + + err := writeGroupToFile( + resourceName, + content, + currentState.url, + chart.GetDestination(), + chart.GetMinKubernetes(), + chart.GetMaxKubernetes(), + chart.GetMulticlusterKey(), + ) + if err != nil { + return err + } + } + } + } + + } else if dashboardType == types.DashboardYaml { + logrus.Warn("dashboard type for `yaml` not implemented") + } else if dashboardType == types.DashboardJson { + resourceName := filepath.Base(currentState.source) + resourceName = strings.TrimSuffix(resourceName, filepath.Ext(resourceName)) + + err := writeGroupToFile( + resourceName, + currentState.rawText, + currentState.url, + chart.GetDestination(), + chart.GetMinKubernetes(), + chart.GetMaxKubernetes(), + chart.GetMulticlusterKey(), + ) + if err != nil { + return err + } + } else if dashboardType == types.DashboardJsonnetMixin { + // In the python script, the CWD would be mixinDir; with prev CWD saved to `cwd` var + vm := jsonnet.NewVm(currentState.mixinDir) + renderedJson, err := vm.EvaluateAnonymousSnippet( + currentState.source, + currentState.rawText+".grafanaDashboards", + ) + if err != nil { + return err + } + var jsonDataMap map[string]map[string]interface{} + jsonErr := json.Unmarshal([]byte(renderedJson), &jsonDataMap) + if jsonErr != nil { + return jsonErr + } + + // After jsonnet is run we can go back to prev CWD context if + _, ok := any(chart).(types.DashboardGitSource) + if ok { + // change dir maybe? + } + + _, useFlatStructure := jsonDataMap["annotations"] + if useFlatStructure { + resourceName := filepath.Base(currentState.source) + resourceName = strings.TrimSuffix(resourceName, filepath.Ext(resourceName)) + + content, jsonErr := json.Marshal(jsonDataMap) + if jsonErr != nil { + return jsonErr + } + + err := writeGroupToFile( + resourceName, + string(content), + currentState.url, + chart.GetDestination(), + chart.GetMinKubernetes(), + chart.GetMaxKubernetes(), + chart.GetMulticlusterKey(), + ) + if err != nil { + return err + } + } else { + for resource, content := range jsonDataMap { + resourceName := strings.TrimSuffix(resource, filepath.Ext(resource)) + + jsonData, jsonErr := json.Marshal(content) + if jsonErr != nil { + return jsonErr + } + + err := writeGroupToFile( + resourceName, + string(jsonData), + currentState.url, + chart.GetDestination(), + chart.GetMinKubernetes(), + chart.GetMaxKubernetes(), + chart.GetMulticlusterKey(), + ) + if err != nil { + return err + } + } + } + } + + return nil +} + +func writeGroupToFile( + resourceName string, + content string, + url string, + destination string, + minKubernetesVersion string, + maxKubernetesVersion string, + multiclusterKey string, +) error { + condition, _ := constants.DashboardsConditionMap[resourceName] + + headerData := types.HeaderData{ + Name: resourceName, + URL: url, + Condition: condition, + MinKubeVersion: minKubernetesVersion, + MaxKubeVersion: maxKubernetesVersion, + } + preparedContent, headerErr := constants.NewDashboardHeader(headerData) + if headerErr != nil { + panic(headerErr) + } + + content = PatchDashboardJson(content, multiclusterKey) + content = PatchDashboardJsonSetTimezoneAsVariable(content) + content = PatchDashboardJsonSetEditableAsVariable(content) + content = PatchDashboardJsonSetIntervalAsVariable(content) + + fileStruct := map[string]interface{}{ + resourceName + ".json": common.LiteralStr(content), + } + yamlString, yamlStrErr := common.YamlStrRepr(fileStruct, 2, false) + if yamlStrErr != nil { + return yamlStrErr + } + yamlString = textwrap.Indent(yamlString, " ") + preparedContent += yamlString + preparedContent += "{{- end }}" + + filename := resourceName + ".yaml" + newFilename := fmt.Sprintf("%s/%s", destination, filename) + + // make sure directories to store the file exist + dirErr := os.MkdirAll(destination, os.ModePerm) + if dirErr != nil { + return dirErr + } + + // Recreate the file + writeErr := os.WriteFile(newFilename, []byte(preparedContent), 0644) + if writeErr != nil { + return writeErr + } + + logrus.Infof("Generated %s", newFilename) + + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/syncgrafana/prepare.go b/internal/cmd/updatemonitoringmixins/syncgrafana/prepare.go new file mode 100644 index 0000000..907b29e --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncgrafana/prepare.go @@ -0,0 +1,134 @@ +package syncgrafana + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/sirupsen/logrus" + + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/jsonnet" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + mainGit "github.com/rancher/ob-charts-tool/internal/git" + "github.com/rancher/ob-charts-tool/internal/util" +) + +func prepareGitDashboard(currentState *chartState, tempDir string, chart types.DashboardGitSource, chartPath string) error { + if chart.Source == "" { + chart.Source = "_mixin.jsonnet" + } + + url := chart.Repository.RepoURL + baseName := filepath.Base(url) + clonePath := filepath.Join(tempDir, baseName) + + // Remove the clonePath if it exists from previous runs... + _ = os.RemoveAll(clonePath) + + branch := "main" + if chart.Repository.Branch != "" { + branch = chart.Repository.Branch + } + branchHead := chart.Repository.HeadSha + if branchHead == "" { + var headErr error + branchHead, headErr = mainGit.FindBranchHeadSha(chart.Repository.RepoURL, branch) + if headErr != nil { + return headErr + } + } + + configParams := mainGit.RepoConfigParams{ + Name: chart.Repository.Name, + URL: chart.Repository.RepoURL, + Branch: branch, + Head: plumbing.NewHash(branchHead), + } + logrus.Infof("Cloning %s to %s", chart.Repository.RepoURL, clonePath) + _, cloneErr := mainGit.CachedShallowCloneRepository(configParams, clonePath) + if cloneErr != nil { + return cloneErr + } + + mixinFile := chart.Source + mixinDir := fmt.Sprintf("%s/%s/", clonePath, chart.Cwd) + currentState.mixinDir = mixinDir + jbErr := jsonnet.InitJsonnetBuilder(mixinDir) + if jbErr != nil { + return jbErr + } + + filePath := filepath.Join(mixinDir, mixinFile) + if chart.Content != "" { + file, err := os.Create(filePath) + if err != nil { + return err + } + defer file.Close() // Ensure the file is closed when the function exits + _, err = file.WriteString(chart.Content) + if err != nil { + logrus.Errorf("Error writing to file %s: %v\n", filePath, err) + return err + } + } + + mixinVarsJSON, err := json.Marshal(chart.MixinVars) + if err != nil { + logrus.Errorf("Error encoding mixin_vars to JSON: %v\n", err) + return err + } + + currentState.url = url + currentState.cwd = tempDir + currentState.rawText = fmt.Sprintf("((import \"%s\") + %s)", mixinFile, mixinVarsJSON) + currentState.source = filepath.Base(mixinFile) + return nil +} + +func prepareUrlDashboard(currentState *chartState, chart types.DashboardURLSource) error { + logrus.Infof("Generating dashboard from %s", chart.Source) + + resp, err := http.Get(chart.Source) + if err != nil { + return err + } + defer resp.Body.Close() // Ensure the connection is closed + + // Read the response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + currentState.rawText = string(body) + currentState.source = chart.Source + currentState.url = chart.Source + + return nil +} + +func prepareFileDashboard(currentState *chartState, chart types.DashboardFileSource, chartPath string) error { + fileSourcePath, err := filepath.Abs(chartPath + chart.Source) + file, err := os.ReadFile(fileSourcePath) + if err != nil { + return err + } + logrus.Infof("Generating dashboards from %s", fileSourcePath) + + currentState.rawText = string(file) + currentState.source = chart.Source + // TODO update to relative path + currentState.url = chart.Source + relPath, err := util.GetRelativePath( + chart.GetDestination(), + fileSourcePath, + ) + if err == nil { + currentState.url = relPath + } + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/syncgrafana/types.go b/internal/cmd/updatemonitoringmixins/syncgrafana/types.go new file mode 100644 index 0000000..8b334bb --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncgrafana/types.go @@ -0,0 +1,9 @@ +package syncgrafana + +type chartState struct { + cwd string + mixinDir string + rawText string + source string + url string +} diff --git a/internal/cmd/updatemonitoringmixins/syncgrafana/util.go b/internal/cmd/updatemonitoringmixins/syncgrafana/util.go new file mode 100644 index 0000000..a0c9867 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncgrafana/util.go @@ -0,0 +1,12 @@ +package syncgrafana + +import "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + +func setDefaultMaxK8s(ds types.DashboardSource) types.DashboardSource { + if ds.GetMaxKubernetes() == "" { + // Equal to: https://github.com/prometheus-community/helm-charts/blob/0b60795bb66a21cd368b657f0665d67de3e49da9/charts/kube-prometheus-stack/hack/sync_grafana_dashboards.py#L326 + ds.SetMaxKubernetes("9.9.9-9") + } + + return ds +} diff --git a/internal/cmd/updatemonitoringmixins/syncprom/main.go b/internal/cmd/updatemonitoringmixins/syncprom/main.go new file mode 100644 index 0000000..209f8a4 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncprom/main.go @@ -0,0 +1,428 @@ +package syncprom + +import ( + "fmt" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/pythonish/re" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/pythonish/textwrap" + "regexp" + "strings" + "unicode" + + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/common" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/constants" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + "github.com/sirupsen/logrus" +) + +var ReplacementMap = []types.RuleReplacementRule{ + { + Match: `job="prometheus-operator"`, + Replacement: `job="{{ $operatorJob }}"`, + Init: `{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }}`, + }, + { + Match: `job="prometheus-k8s"`, + Replacement: `job="{{ $prometheusJob }}"`, + Init: `{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }}`, + }, + { + Match: `job="alertmanager-main"`, + Replacement: `job="{{ $alertmanagerJob }}"`, + Init: `{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }}`, + }, + { + Match: `namespace="monitoring"`, + Replacement: `namespace="{{ $namespace }}"`, + Init: `{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }}`, + }, + { + Match: `alertmanager-$1`, + Replacement: `$1`, + Init: ``, + }, + { + Match: `job="kube-state-metrics"`, + Replacement: `job="{{ $kubeStateMetricsJob }}"`, + Init: `{{- $kubeStateMetricsJob := include "kube-prometheus-stack-kube-state-metrics.name" . }}`, + }, + { + Match: `job="{{ $kubeStateMetricsJob }}"`, + Replacement: `job="{{ $kubeStateMetricsJob }}", namespace=~"{{ $targetNamespace }}"`, + LimitGroup: []string{"kubernetes-apps"}, + Init: `{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }}`, + }, + { + Match: `job="kubelet"`, + Replacement: `job="kubelet", namespace=~"{{ $targetNamespace }}"`, + LimitGroup: []string{"kubernetes-storage"}, + Init: `{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }}`, + }, + { + Match: `runbook_url: https://runbooks.prometheus-operator.dev/runbooks/`, + Replacement: `runbook_url: {{ .Values.defaultRules.runbookUrl }}/`, + Init: ``, + }, + { + Match: `(namespace,service)`, + Replacement: `(namespace,service,cluster)`, + Init: ``, + }, + { + Match: `(namespace, job, handler`, + Replacement: `(cluster, namespace, job, handler`, + Init: ``, + }, + { + Match: `$.Values.defaultRules.node.fsSelector`, + Replacement: `{{ $.Values.defaultRules.node.fsSelector }}`, + Init: ``, + }, +} + +func PrometheusRulesSync(tempDir string) error { + logrus.Info("Syncing prometheus rules") + chartPath := config.GetContext().ChartRootDir + chartsSources := constants.RulesSourceCharts() + for _, chart := range chartsSources { + currentState := chartState{} + err := prepareGitRules(¤tState, tempDir, chart, chartPath) + if err != nil { + return err + } + common.SetDefaultMaxK8s(&chart) + writeErr := writeOutput(currentState, chart) + if writeErr != nil { + return writeErr + } + } + + logrus.Info("Finished syncing prometheus rules & alerts") + + return nil +} + +func FixExpr(group *AlertGroup) { + // TODO: something to fix "\n" in groupRule.Expr + for key, groupRule := range group.Rules { + groupRule.Expr = strings.TrimRight(groupRule.Expr, " \n\r\t") + group.Rules[key] = groupRule + } +} + +const ( + indent = 4 + labelIndent = 2 +) + +func AddCustomLabels(rules string, group AlertGroup) string { + condition := constants.RulesConditionMap[group.Name] + ruleGroupLabels := getRuleGroupCondition(condition, "additionalRuleGroupLabels") + + baseLabelIndent := strings.Repeat(" ", indent+labelIndent) + + additionalRuleLabels := prepareAdditonalRuleLabels(ruleGroupLabels) + additonalRuleLabelsConditionStart := "\n" + baseLabelIndent + fmt.Sprintf("{{- if or .Values.defaultRules.additionalRuleLabels %s }}", ruleGroupLabels) + additonalRuleLabelsConditionEnd := "\n" + baseLabelIndent + "{{- end }}" + + // labels: cannot be null, if a rule does not have any labels by default, the labels block + // should only be added if there are .Values defaultRules.additionalRuleLabels defined + ruleSeperator := "\n" + strings.Repeat(" ", indent) + "-.*" + labelSeperator := "\n" + strings.Repeat(" ", indent) + " labels:" + sectionSeperator := "\n" + strings.Repeat(" ", indent) + " \\S" + sectionSeperatorLen := len(sectionSeperator) - 1 + rulesPositions := re.FindIter(ruleSeperator, rules) + + // fetch breakpoint between each set of rules + var ruleStartingLine [][]int + for _, pos := range rulesPositions { + ruleStartingLine = append(ruleStartingLine, pos) + } + var head string + if len(ruleStartingLine) > 0 { + head = rules[:ruleStartingLine[0][0]] + } else { + head = rules // If no rule separator is found + } + + // construct array of rules so they can be handled individually + updatedRules := make([]string, 0) + var prevRule []int + for _, r := range ruleStartingLine { + if prevRule != nil { + updatedRules = append(updatedRules, rules[prevRule[0]:r[0]]) + } + prevRule = r + } + updatedRules = append(updatedRules, rules[prevRule[0]:len(rules)-1]) + + for i, rule := range updatedRules { + labelRegex := regexp.MustCompile(labelSeperator) + currentLabel := labelRegex.FindStringIndex(rule) + if len(currentLabel) > 0 { + sectionRegex := regexp.MustCompile(sectionSeperator) + ruleSearch := rule[currentLabel[1]:] + entries := sectionRegex.FindStringIndex(ruleSearch) + if len(entries) > 0 { + entriesStart := currentLabel[1] + entriesEnd := entries[1] + entriesStart - sectionSeperatorLen + updatedRules[i] = rule[:entriesEnd] + additonalRuleLabelsConditionStart + + additionalRuleLabels + additonalRuleLabelsConditionEnd + + rule[entriesEnd:] + } else { + updatedRules[i] += additonalRuleLabelsConditionStart + + additionalRuleLabels + + additonalRuleLabelsConditionEnd + } + } else { + updatedRules[i] += additonalRuleLabelsConditionStart + "\n" + + strings.Repeat(" ", indent) + " labels:" + + additionalRuleLabels + + additonalRuleLabelsConditionEnd + } + } + + return head + strings.Join(updatedRules, "") + "\n" +} + +func getRuleGroupCondition(groupName string, valueKey string) string { + if groupName == "" { + return "" + } + + valCount := strings.Count(groupName, ".Values") + if valCount > 1 { + parts := strings.Split(groupName, " ") + groupName = parts[len(parts)-1] + } + + return strings.TrimSpace(strings.ReplaceAll( + groupName, + "Values.defaultRules.rules", + fmt.Sprintf("Values.defaultRules.%s", valueKey), + )) +} + +func prepareAdditonalRuleLabels(ruleGroupLabels string) string { + const additonalRuleLabelsTemplate = ` +{{- with .Values.defaultRules.additionalRuleLabels }} + {{- toYaml . | nindent 8 }} +{{- end }} +{{- with %s }} + {{- toYaml . | nindent 8 }} +{{- end }}` + + return textwrap.Indent( + fmt.Sprintf(additonalRuleLabelsTemplate, ruleGroupLabels), + strings.Repeat(" ", indent+labelIndent*2), + ) +} + +func AddCustomAnnotations(rules string, group AlertGroup) string { + ruleCondition := "{{- if .Values.defaultRules.additionalRuleAnnotations }}\n{{ toYaml .Values.defaultRules.additionalRuleAnnotations | indent 8 }}\n{{- end }}" + groupCondition := constants.RulesConditionMap[group.Name] + ruleGroupAnnotations := getRuleGroupCondition(groupCondition, "additionalRuleGroupAnnotations") + ruleGroupCondition := fmt.Sprintf( + "\n{{- if %s }}\n{{ toYaml %s | indent 8 }}\n{{- end }}", + ruleGroupAnnotations, + ruleGroupAnnotations, + ) + annotations := " annotations:" + annotationsLen := len(annotations) + 1 + ruleConditionLen := len(ruleCondition) + 1 + ruleGroupConditionLen := len(ruleGroupCondition) + + separator := strings.Repeat(" ", indent) + "- alert:.*" + alertsPositions := re.FindIter(separator, rules) + alert := 0 + + for _, alertPosition := range alertsPositions { + index := alertPosition[1] + annotationsLen + (ruleConditionLen+ruleGroupConditionLen)*alert + rules = rules[:index] + "\n" + ruleCondition + ruleGroupCondition + rules[index:] + alert += 1 + } + + return rules +} + +func AddCustomKeepFiringFor(rules string) string { + indentSpaces := strings.Repeat(" ", indent) + " " + keepFiringFor := indentSpaces + "{{- with .Values.defaultRules.keepFiringFor }}\n" + + indentSpaces + "keep_firing_for: \"{{ . }}\"\n" + + indentSpaces + "{{- end }}" + keepFiringForLen := len(keepFiringFor) + 1 + + separator := strings.Repeat(" ", indent) + " for:.*" + alertsPositions := re.FindIter(separator, rules) + alert := 0 + + for _, alertPosition := range alertsPositions { + index := alertPosition[1] + keepFiringForLen*alert + rules = rules[:index] + "\n" + keepFiringFor + rules[index:] + alert += 1 + } + + return rules +} + +func AddCustomFor(rules string) string { + replaceField := "for:" + return addCustomAlertRules(rules, replaceField) +} + +func AddCustomSeverity(rules string) string { + replaceField := "severity:" + return addCustomAlertRules(rules, replaceField) +} + +func addCustomAlertRules(rules, keyToReplace string) string { + indentedKey := strings.Repeat(" ", indent) + keyToReplace + alertPrefix := "- alert:" + var ( + builder strings.Builder + alertName string + inAlertBlock bool + ) + + for i := 0; i < len(rules); { + minPrefixLength := i + len(alertPrefix) + if len(rules) >= minPrefixLength && rules[i:minPrefixLength] == alertPrefix { + inAlertBlock = true + start := i + len(alertPrefix) + 1 + end := start + for end < len(rules) && isAlnum(rules[end]) { + end++ + } + + alertName = rules[start:end] + } + + if inAlertBlock { + minKeyLength := i + len(indentedKey) + if len(rules) >= minKeyLength && rules[i:minKeyLength] == indentedKey { + inAlertBlock = false + + start := i + len(indentedKey) + 1 + end := start + for end < len(rules) && isAlnum(rules[end]) { + end++ + } + + wordAfterReplace := rules[start:end] + newKey := indentedKey + " {{ dig \"" + alertName + + `" "` + keyToReplace[:len(keyToReplace)-1] + `" "` + + wordAfterReplace + `" .Values.customRules }}` + builder.WriteString(newKey) + i = end + } + } + + builder.WriteByte(rules[i]) + i++ + } + + return builder.String() +} + +func isAlnum(b byte) bool { + r := rune(b) + return unicode.IsLetter(r) || unicode.IsDigit(r) +} + +func AddRulesConditionsFromConditionMap(rules string) string { + return addRulesConditions(rules, constants.AlertConditionMap) +} + +func addRulesConditions(rules string, conditionMap map[string]string) string { + ruleCondition := "{{- if %s }}\n" + lineStart := strings.Repeat(" ", indent) + "- alert: " + + for alertName, condition := range conditionMap { + fullLine := lineStart + alertName + if !strings.Contains(rules, fullLine) { + continue + } + + ruleText := fmt.Sprintf(ruleCondition, condition) + start := 0 + + for { + index := strings.Index(rules[start:], fullLine) + if index == -1 { + break + } + // add if condition + index += start + start = index + len(ruleText) + 1 + rules = rules[:index] + ruleText + rules[index:] + // add end of if + + nextIndex := strings.Index(rules[start:], lineStart) + if nextIndex == -1 { + // we found the last alert in file if there are no alerts after it + nextIndex = len(rules) + } else { + nextIndex += start + } + + foundBlockEnd := false + lastLineIndex := nextIndex + + for !foundBlockEnd { + lastLineIndex = strings.LastIndex(rules[index:lastLineIndex-1], "\n") + if lastLineIndex == -1 { + break + } + lastLineIndex += index + + lastLine := rules[lastLineIndex+1 : nextIndex] + if strings.HasPrefix(lastLine, "{{- if") { + nextIndex = lastLineIndex + 1 + continue + } + foundBlockEnd = true + } + + rules = rules[:nextIndex] + "{{- end }}\n" + rules[nextIndex:] + } + } + + return rules +} + +func AddRulesPerRuleConditions(rules string, group AlertGroup) string { + rulesConditionMap := map[string]string{} + for _, rule := range group.Rules { + if rule.Alert != "" { + rulesConditionMap[rule.Alert] = fmt.Sprintf("not (.Values.defaultRules.disabled.%s | default false)", rule.Alert) + } + } + + rules = addRulesConditions(rules, rulesConditionMap) + + return rules +} + +func FixGroupsIndent(content string) string { + lines := strings.Split(content, "\n") + if len(lines) == 0 { + return "" + } + + prefixCountReg := regexp.MustCompile(`(\s*\- )\w+:`) + prefixLen := len(prefixCountReg.FindStringSubmatch(content)[1]) + prefixSpaces := strings.Repeat(" ", prefixLen) + lines[0] = " " + lines[0] + regex := "^" + prefixSpaces + `\w+` + levelMatchReg := regexp.MustCompile(regex) + for i, line := range lines { + if i == 0 { + continue + } + if levelMatchReg.MatchString(line) { + lines[i] = prefixSpaces + line + } + } + + return strings.Join(lines, "\n") +} diff --git a/internal/cmd/updatemonitoringmixins/syncprom/output.go b/internal/cmd/updatemonitoringmixins/syncprom/output.go new file mode 100644 index 0000000..9033c15 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncprom/output.go @@ -0,0 +1,114 @@ +package syncprom + +import ( + "fmt" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/common" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/constants" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + "github.com/sirupsen/logrus" + "os" + "regexp" + "slices" + "strings" +) + +func writeOutput(currentState chartState, chart types.RulesGitSource) error { + + // TODO if alerts has spec get spec.groups, else .groups + groups := currentState.alerts.Groups + //fmt.Println(groups) + for _, group := range groups { + FixExpr(&group) + groupName := group.Name + + rulesGroups := []AlertGroup{group} + rules, yamlErr := common.YamlStrRepr(rulesGroups, 4, true) + if yamlErr != nil { + return yamlErr + } + + initLine := "" + for _, replaceRule := range ReplacementMap { + limitGroup := replaceRule.LimitGroup + if limitGroup == nil || len(limitGroup) == 0 { + limitGroup = []string{ + groupName, + } + } + + if slices.Contains(limitGroup, groupName) && strings.Contains(rules, replaceRule.Match) { + rules = strings.ReplaceAll(rules, replaceRule.Match, replaceRule.Replacement) + if replaceRule.Init != "" { + initLine += "\n" + replaceRule.Init + } + } + } + // Now append per-alert rules + rules = AddCustomLabels(rules, group) // rules = add_custom_labels(rules, group) + rules = AddCustomAnnotations(rules, group) // rules = add_custom_annotations(rules, group) + rules = AddCustomKeepFiringFor(rules) // rules = add_custom_keep_firing_for(rules) + rules = AddCustomFor(rules) // rules = add_custom_for(rules) + rules = AddCustomSeverity(rules) // rules = add_custom_severity(rules) + rules = AddRulesConditionsFromConditionMap(rules) // rules = add_rules_conditions_from_condition_map(rules) + rules = AddRulesPerRuleConditions(rules, group) // rules = add_rules_per_rule_conditions(rules, group) + writeErr := writeGroupToFile(groupName, rules, currentState.url, chart.GetDestination(), initLine, chart.GetMinKubernetes(), chart.GetMaxKubernetes()) + if writeErr != nil { + return writeErr + } + } + + return nil +} + +func writeGroupToFile( + resourceName string, + content string, + url string, + destination string, + initLine string, + minKubernetesVersion string, + maxKubernetesVersion string, +) error { + condition, _ := constants.RulesConditionMap[resourceName] + headerData := types.HeaderData{ + Name: strings.ToLower(strings.ReplaceAll(resourceName, "_", "-")), + URL: url, + Condition: condition, + InitLine: initLine, + MinKubeVersion: minKubernetesVersion, + MaxKubeVersion: maxKubernetesVersion, + } + + preparedContent, headerErr := constants.NewRuleHeader(headerData) + if headerErr != nil { + panic(headerErr) + } + + content = FixGroupsIndent(content) + + // Adjust rules + re := regexp.MustCompile(`\s(?i)(by|on) ?\(`) + replacement := ` ${1} ({{ range $.Values.defaultRules.additionalAggregationLabels }}{{ . }},{{ end }}` + preparedContent += re.ReplaceAllString(content, replacement) + + preparedContent += "{{- end }}" + + filename := resourceName + ".yaml" + newFilename := fmt.Sprintf("%s/%s", destination, filename) + + // make sure directories to store the file exist + dirErr := os.MkdirAll(destination, os.ModePerm) + if dirErr != nil { + return dirErr + } + + // Recreate the file + writeErr := os.WriteFile(newFilename, []byte(preparedContent), 0644) + if writeErr != nil { + return writeErr + } + + logrus.Infof("Generated %s", newFilename) + + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/syncprom/prepare.go b/internal/cmd/updatemonitoringmixins/syncprom/prepare.go new file mode 100644 index 0000000..ea076ad --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncprom/prepare.go @@ -0,0 +1,98 @@ +package syncprom + +import ( + "encoding/json" + "fmt" + "github.com/go-git/go-git/v5/plumbing" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/jsonnet" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/types" + mainGit "github.com/rancher/ob-charts-tool/internal/git" + "github.com/sirupsen/logrus" + "os" + "path/filepath" +) + +func prepareGitRules(currentState *chartState, tempDir string, chart types.RulesGitSource, chartPath string) error { + if chart.Source == "" { + chart.Source = "_mixin.jsonnet" + } + + url := chart.Repository.RepoURL + baseName := filepath.Base(url) + clonePath := filepath.Join(tempDir, baseName) + + // Remove the clonePath if it exists from previous runs... + _ = os.RemoveAll(clonePath) + + branch := "main" + if chart.Repository.Branch != "" { + branch = chart.Repository.Branch + } + branchHead := chart.Repository.HeadSha + if branchHead == "" { + var headErr error + branchHead, headErr = mainGit.FindBranchHeadSha(chart.Repository.RepoURL, branch) + if headErr != nil { + return headErr + } + } + + configParams := mainGit.RepoConfigParams{ + Name: chart.Repository.Name, + URL: chart.Repository.RepoURL, + Branch: branch, + Head: plumbing.NewHash(branchHead), + } + + logrus.Infof("Cloning %s to %s", chart.Repository.RepoURL, clonePath) + _, cloneErr := mainGit.CachedShallowCloneRepository(configParams, clonePath) + if cloneErr != nil { + return cloneErr + } + + if chart.Mixin != "" { + currentState.cwd = tempDir + + sourceCwd := chart.Cwd + mixinFile := chart.Source + + mixinDir := filepath.Join(clonePath, sourceCwd) + currentState.mixinDir = mixinDir + jbErr := jsonnet.InitJsonnetBuilder(mixinDir) + if jbErr != nil { + return jbErr + } + + // TODO this is where python checks for content field in charts + + logrus.Infof("Generatring rules from %s", mixinFile) + vm := jsonnet.NewVm(currentState.mixinDir) + renderedJson, err := vm.EvaluateAnonymousSnippet( + mixinFile, + chart.Mixin, + ) + if err != nil { + return err + } + + var alerts Alerts + jsonErr := json.Unmarshal([]byte(renderedJson), &alerts) + if jsonErr != nil { + return jsonErr + } + currentState.alerts = alerts + currentState.url = url + } else { + sourcePath := filepath.Join(tempDir, chart.Source) + sourceContent, err := os.ReadFile(sourcePath) + if err != nil { + return err + } + currentState.rawText = string(sourceContent) + fmt.Println(currentState.rawText) + // TODO parse to alerts + currentState.alerts = Alerts{} + } + + return nil +} diff --git a/internal/cmd/updatemonitoringmixins/syncprom/types.go b/internal/cmd/updatemonitoringmixins/syncprom/types.go new file mode 100644 index 0000000..1d9e7a2 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/syncprom/types.go @@ -0,0 +1,31 @@ +package syncprom + +type chartState struct { + cwd string + mixinDir string + rawText string + alerts Alerts + source string + url string +} + +type Alerts struct { + Groups []AlertGroup `json:"groups"` +} + +type AlertGroup struct { + Interval string `json:"interval,omitempty" yaml:"interval,omitempty"` + Name string `json:"name" yaml:"name"` + Rules PromRules `json:"rules" yaml:"rules"` +} + +type PromRules []PromRule + +type PromRule struct { + Alert string `json:"alert,omitempty" yaml:"alert,omitempty"` + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + Expr string `json:"expr"` + For string `json:"for,omitempty" yaml:"for,omitempty"` + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + Record string `json:"record,omitempty" yaml:"record,omitempty"` +} diff --git a/internal/cmd/updatemonitoringmixins/types/main.go b/internal/cmd/updatemonitoringmixins/types/main.go new file mode 100644 index 0000000..d960465 --- /dev/null +++ b/internal/cmd/updatemonitoringmixins/types/main.go @@ -0,0 +1,232 @@ +package types + +import ( + "encoding/json" + "errors" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/config" + "github.com/rancher/ob-charts-tool/internal/cmd/updatemonitoringmixins/git" + "path/filepath" +) + +type DashboardReplacementRule struct { + Match string + Replacement string +} + +type RuleReplacementRule struct { + Match string + Replacement string + LimitGroup []string + Init string +} + +type HeaderData struct { + Name string + URL string + Condition string + InitLine string + MinKubeVersion string + MaxKubeVersion string + ByLine string +} + +type DashboardType int + +const ( + DashboardJson DashboardType = iota + DashboardYaml + DashboardKatesYaml + DashboardJsonnetMixin +) + +var dashboardTypeName = map[DashboardType]string{ + DashboardJson: "dashboard_json", + DashboardKatesYaml: "yaml", + DashboardYaml: "yaml", + DashboardJsonnetMixin: "jsonnet_mixin", +} + +func MaybeDashboardType(maybeDashboard string) (*DashboardType, error) { + for typeName, typeString := range dashboardTypeName { + if maybeDashboard == typeString { + return &typeName, nil + } + } + return nil, errors.New("unknown dashboard type") +} + +func (dt *DashboardType) String() string { + return dashboardTypeName[*dt] +} + +func (dt *DashboardType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + maybeDash, err := MaybeDashboardType(s) + if err != nil { + return err + } + *dt = *maybeDash + + return nil +} + +func (dt *DashboardType) MarshalJSON() ([]byte, error) { + s := dt.String() + return json.Marshal(s) +} + +type KatesVersions interface { + GetMinKubernetes() string + GetMaxKubernetes() string + SetMaxKubernetes(string) +} + +type DashboardSource interface { + GetDestination() string + GetMinKubernetes() string + GetMaxKubernetes() string + SetMaxKubernetes(string) + GetMulticlusterKey() string + GetType() DashboardType +} + +type DashboardSourceBase struct { + Destination string `json:"destination"` + Type DashboardType `json:"type"` + MinKubernetes string `json:"min_kubernetes"` + MaxKubernetes string `json:"max_kubernetes,omitempty"` + MulticlusterKey string `json:"multicluster_key"` +} + +func (ds *DashboardSourceBase) GetDestination() string { + chartBaseDir := config.GetContext().ChartRootDir + return filepath.Join(chartBaseDir, ds.Destination) +} + +// DashboardFileSource represents a dashboard sourced from a local file. +type DashboardFileSource struct { + Source string `json:"source"` + DashboardSourceBase +} + +func (dfs *DashboardFileSource) GetDestination() string { + return dfs.DashboardSourceBase.GetDestination() +} + +func (dfs *DashboardFileSource) GetMinKubernetes() string { + return dfs.DashboardSourceBase.MinKubernetes +} + +func (dfs *DashboardFileSource) GetMaxKubernetes() string { + return dfs.DashboardSourceBase.MaxKubernetes +} + +func (dfs *DashboardFileSource) SetMaxKubernetes(s string) { + dfs.DashboardSourceBase.MaxKubernetes = s +} + +func (dfs *DashboardFileSource) GetMulticlusterKey() string { + return dfs.DashboardSourceBase.MulticlusterKey +} + +func (dfs *DashboardFileSource) GetType() DashboardType { + return dfs.DashboardSourceBase.Type +} + +// DashboardURLSource represents a dashboard sourced from a remote URL. +type DashboardURLSource struct { + Source string `json:"source"` + DashboardSourceBase +} + +func (dus *DashboardURLSource) GetDestination() string { + return dus.DashboardSourceBase.GetDestination() +} + +func (dus *DashboardURLSource) GetMinKubernetes() string { + return dus.DashboardSourceBase.MinKubernetes +} + +func (dus *DashboardURLSource) GetMaxKubernetes() string { + return dus.DashboardSourceBase.MaxKubernetes +} + +func (dus *DashboardURLSource) SetMaxKubernetes(s string) { + dus.DashboardSourceBase.MaxKubernetes = s +} + +func (dus *DashboardURLSource) GetMulticlusterKey() string { + return dus.DashboardSourceBase.MulticlusterKey +} + +func (dus *DashboardURLSource) GetType() DashboardType { + return dus.DashboardSourceBase.Type +} + +type DashboardGitSource struct { + Repository git.RepoConfigStatus `json:"repository,omitempty"` + Content string `json:"content,omitempty"` + Cwd string `json:"cwd,omitempty"` + Source string `json:"source,omitempty"` // For specific file in the repo + DashboardSourceBase + MixinVars map[string]interface{} `json:"mixin_vars,omitempty"` // For Jsonnet mixin variables +} + +func (dgs *DashboardGitSource) GetDestination() string { + return dgs.DashboardSourceBase.GetDestination() +} + +func (dgs *DashboardGitSource) GetMinKubernetes() string { + return dgs.DashboardSourceBase.MinKubernetes +} + +func (dgs *DashboardGitSource) GetMaxKubernetes() string { + return dgs.DashboardSourceBase.MaxKubernetes +} + +func (dgs *DashboardGitSource) SetMaxKubernetes(s string) { + dgs.DashboardSourceBase.MaxKubernetes = s +} + +func (dgs *DashboardGitSource) GetMulticlusterKey() string { + return dgs.DashboardSourceBase.MulticlusterKey +} + +func (dgs *DashboardGitSource) GetType() DashboardType { + return dgs.DashboardSourceBase.Type +} + +// DashboardsConfig is a slice that can hold any of the specific dashboard source types. +type DashboardsConfig []interface{} + +type RulesGitSource struct { + Repository git.RepoConfigStatus `json:"repository"` + Source string `json:"source,omitempty"` + Cwd string `json:"cwd,omitempty"` + Destination string `json:"destination,omitempty"` + MinKubernetes string `json:"min_kubernetes,omitempty"` + MaxKubernetes string `json:"max_kubernetes,omitempty"` + Mixin string `json:"mixin,omitempty"` +} + +func (r *RulesGitSource) GetMinKubernetes() string { + return r.MinKubernetes +} + +func (r *RulesGitSource) GetMaxKubernetes() string { + return r.MaxKubernetes +} + +func (r *RulesGitSource) SetMaxKubernetes(s string) { + r.MaxKubernetes = s +} + +func (r *RulesGitSource) GetDestination() string { + chartBaseDir := config.GetContext().ChartRootDir + return filepath.Join(chartBaseDir, r.Destination) +} + +type RulesConfig []RulesGitSource diff --git a/internal/git/cache/main.go b/internal/git/cache/main.go new file mode 100644 index 0000000..088ce43 --- /dev/null +++ b/internal/git/cache/main.go @@ -0,0 +1,57 @@ +package cache + +import ( + "errors" + "fmt" + "os" +) + +func SetupGitCacheManager(cacheRoot string) { + once.Do(func() { + if err := os.MkdirAll(cacheRoot, 0o755); err != nil { + panic(fmt.Sprint("unable to create cache root: ", err)) + } + + cacheInstance = &GitCacheManager{ + rootDir: cacheRoot, + } + }) +} + +func GetGitCacheManager() (*GitCacheManager, error) { + if cacheInstance == nil { + return nil, errors.New("git cache manager not initialized") + } + + return cacheInstance, nil +} + +// checkDestDirState will error if the destDir is anything but: a) non-existent, or b) empty +func checkDestDirState(destDir string) error { + info, err := os.Stat(destDir) + if err != nil { + // When the dir doesn't exist we can continue + if os.IsNotExist(err) { + // Directory doesn't exist, create it + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + + return nil + } + + return err + } + + if !info.IsDir() { + return fmt.Errorf("destination exists but is not a directory: %s", destDir) + } + entries, err := os.ReadDir(destDir) + if err != nil { + return fmt.Errorf("error reading destination directory: %w", err) + } + if len(entries) > 0 { + return fmt.Errorf("destination directory is not empty: %s", destDir) + } + return nil +} diff --git a/internal/git/cache/types.go b/internal/git/cache/types.go new file mode 100644 index 0000000..c34caa6 --- /dev/null +++ b/internal/git/cache/types.go @@ -0,0 +1,67 @@ +package cache + +import ( + "errors" + "github.com/go-git/go-git/v5/plumbing" + cp "github.com/otiai10/copy" + "github.com/rancher/ob-charts-tool/internal/util" + "os" + "path/filepath" + "sync" +) + +var ( + once sync.Once + cacheInstance *GitCacheManager +) + +type GitCacheManager struct { + rootDir string +} + +func (gitCache *GitCacheManager) GetCacheDir() string { + return gitCache.rootDir +} + +func (gitCache *GitCacheManager) GetRepoDir(repoName string, sha string, useShallow *bool) string { + rootDir := util.GetRepoCloneDir(gitCache.rootDir, repoName, sha) + + if useShallow != nil && *useShallow { + return filepath.Join(rootDir, "shallow") + } + return filepath.Join(rootDir, repoName) +} + +func (gitCache *GitCacheManager) HasRepoCache(repoName string, sha string, useShallow *bool) (bool, error) { + repoPath := gitCache.GetRepoDir(repoName, sha, useShallow) + if _, err := os.Stat(repoPath); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + return true, nil +} + +func (gitCache *GitCacheManager) CopyCacheTo(repoName string, sha plumbing.Hash, destinationDir string, useShallow *bool) (plumbing.Hash, error) { + if has, _ := gitCache.HasRepoCache(repoName, sha.String(), useShallow); !has { + return plumbing.ZeroHash, errors.New("repo not initialized") + } + + repoCacheDir := gitCache.GetRepoDir(repoName, sha.String(), useShallow) + // Verify destinationDir is empty or doesn't exist yet + destErr := checkDestDirState(destinationDir) + if destErr != nil { + return plumbing.ZeroHash, destErr + } + + // If we know the dest is in OK shape, then copy source from cache... + copyErr := cp.Copy(repoCacheDir, destinationDir) + if copyErr != nil { + return plumbing.ZeroHash, copyErr + } + + // On success we will + return sha, nil +} diff --git a/internal/git/clone.go b/internal/git/clone.go new file mode 100644 index 0000000..7020677 --- /dev/null +++ b/internal/git/clone.go @@ -0,0 +1,132 @@ +package git + +import ( + "errors" + "fmt" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/rancher/ob-charts-tool/internal/git/cache" +) + +type RepoConfigParams struct { + Name string + URL string + Branch string + Head plumbing.Hash +} + +func ShallowCloneRepository(repoConfig RepoConfigParams, destinationDir string) (plumbing.Hash, error) { + cloneOptions := git.CloneOptions{ + URL: repoConfig.URL, + Depth: 1, + RecurseSubmodules: git.NoRecurseSubmodules, + Progress: nil, + } + + // If a branch is provided, set it + if repoConfig.Branch != "" { + cloneOptions.ReferenceName = plumbing.NewBranchReferenceName(repoConfig.Branch) + cloneOptions.SingleBranch = true + } + + repoClone, err := git.PlainClone(destinationDir, false, &cloneOptions) + if err != nil { + return plumbing.ZeroHash, err + } + + // If a specific commit (Head) is provided, check it out + if !repoConfig.Head.IsZero() { + worktree, err := repoClone.Worktree() + if err != nil { + return plumbing.ZeroHash, err + } + + // Attempt to resolve the hash (it may not be available yet) + _, err = repoClone.CommitObject(repoConfig.Head) + if err != nil { + // Commit not present: fetch the specific commit explicitly + err = repoClone.Fetch(&git.FetchOptions{ + Depth: 1, + Progress: nil, + RefSpecs: []config.RefSpec{ + config.RefSpec(fmt.Sprintf("+%s:refs/temp/%s", repoConfig.Head.String(), repoConfig.Head.String())), + }, + }) + if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { + return plumbing.ZeroHash, fmt.Errorf("failed to fetch specific commit: %w", err) + } + } + + err = worktree.Checkout(&git.CheckoutOptions{ + Hash: repoConfig.Head, + }) + if err != nil { + return plumbing.ZeroHash, err + } + } + + head, err := repoClone.Head() + if err != nil { + return plumbing.ZeroHash, err + } + return head.Hash(), nil +} + +func CachedShallowCloneRepository(repoConfig RepoConfigParams, destinationDir string) (plumbing.Hash, error) { + cacheManager, err := cache.GetGitCacheManager() + if err != nil { + return plumbing.ZeroHash, err + } + // First find and check cache path, + // If not exist, create in cache path first. + useShallow := true + hasCache, err := cacheManager.HasRepoCache(repoConfig.Name, repoConfig.Head.String(), &useShallow) + if !hasCache && err == nil { + _, cloneErr := ShallowCloneRepository(repoConfig, cacheManager.GetRepoDir(repoConfig.Name, repoConfig.Head.String(), &useShallow)) + if cloneErr != nil { + return plumbing.ZeroHash, cloneErr + } + } else if err != nil { + return plumbing.ZeroHash, err + } + // Then we will copy from cache to dest. + return cacheManager.CopyCacheTo(repoConfig.Name, repoConfig.Head, destinationDir, &useShallow) +} + +func CloneRepository(repoConfig RepoConfigParams, destinationDir string) (plumbing.Hash, error) { + repoClone, err := git.PlainClone(destinationDir, false, &git.CloneOptions{ + URL: repoConfig.URL, + RecurseSubmodules: git.NoRecurseSubmodules, + Progress: nil, + }) + if err != nil { + return plumbing.ZeroHash, err + } + head, err := repoClone.Head() + if err != nil { + return plumbing.ZeroHash, err + } + return head.Hash(), nil +} + +func CachedCloneRepository(repoConfig RepoConfigParams, destinationDir string) (plumbing.Hash, error) { + cacheManager, err := cache.GetGitCacheManager() + if err != nil { + return plumbing.ZeroHash, err + } + // First find and check cache path, + // If not exist, create in cache path first. + useShallow := false + hasCache, err := cacheManager.HasRepoCache(repoConfig.Name, repoConfig.Head.String(), &useShallow) + if !hasCache && err == nil { + _, cloneErr := CloneRepository(repoConfig, cacheManager.GetRepoDir(repoConfig.Name, repoConfig.Head.String(), &useShallow)) + if cloneErr != nil { + return plumbing.ZeroHash, cloneErr + } + } else if err != nil { + return plumbing.ZeroHash, err + } + // Then we will copy from cache to dest. + return cacheManager.CopyCacheTo(repoConfig.Name, repoConfig.Head, destinationDir, &useShallow) +} diff --git a/internal/git/main.go b/internal/git/main.go index ac1c323..09b8707 100644 --- a/internal/git/main.go +++ b/internal/git/main.go @@ -2,13 +2,12 @@ package git import ( "fmt" - "strings" - "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/rancher/ob-charts-tool/internal/util" log "github.com/sirupsen/logrus" + "strings" ) func VerifyTagExists(repo string, tag string) (bool, string, string) { @@ -36,9 +35,9 @@ func VerifyTagExists(repo string, tag string) (bool, string, string) { return found, expectedTagRef, hash } -func FindTagsMatching(repo string, tagPartial string) (bool, []*plumbing.Reference) { +func FindTagsMatching(repoUrl string, tagPartial string) (bool, []*plumbing.Reference) { remote := git.NewRemote(nil, &config.RemoteConfig{URLs: []string{ - repo, + repoUrl, }}) refs, err := remote.List(&git.ListOptions{}) if err != nil { @@ -55,3 +54,65 @@ func FindTagsMatching(repo string, tagPartial string) (bool, []*plumbing.Referen return false, refs } + +func FindRepoHeadSha(repoUrl string) (string, error) { + remote := git.NewRemote(nil, &config.RemoteConfig{ + Name: "origin", + URLs: []string{repoUrl}, + }) + refs, err := remote.List(&git.ListOptions{}) + if err != nil { + log.Fatalf("failed to list remote refs: %v", err) + } + + var headTarget *plumbing.ReferenceName + for _, ref := range refs { + if ref.Name() == plumbing.HEAD { + target := ref.Target() + headTarget = &target + break + } + } + + if headTarget == nil { + return "", fmt.Errorf("HEAD reference not found") + } + + // Now find the actual reference it points to (the default branch's tip) + for _, ref := range refs { + if ref.Name() == *headTarget { + return ref.Hash().String(), nil + } + } + + return "", fmt.Errorf("could not resolve HEAD target %s", *headTarget) +} + +func FindBranchHeadSha(repoUrl, branch string) (string, error) { + // Check if the branch is already a valid SHA + if plumbing.IsHash(branch) { + return branch, nil + } + + remote := git.NewRemote(nil, &config.RemoteConfig{ + Name: "origin", + URLs: []string{repoUrl}, + }) + refs, err := remote.List(&git.ListOptions{ + Timeout: 30, + }) + if err != nil { + return "", fmt.Errorf("failed to list remote refs: %v", err) + } + + branchRef := plumbing.NewBranchReferenceName(branch) + + for _, ref := range refs { + refName := ref.Name() + if refName == branchRef { + return ref.Hash().String(), nil + } + } + + return "", fmt.Errorf("branch %s not found in remote", branch) +} diff --git a/internal/rebase/chart.go b/internal/rebase/chart.go index 37a0787..db4bb49 100644 --- a/internal/rebase/chart.go +++ b/internal/rebase/chart.go @@ -40,10 +40,10 @@ func findNewestReleaseTag(chartDep ChartDep) (bool, *plumbing.Reference) { version = strings.ReplaceAll(version, ".*", "") } - repo := upstream.IdentifyChartUpstream(chartDep.Name) + repoUrl := upstream.IdentifyChartUpstream(chartDep.Name) tag := fmt.Sprintf("%s-%s", chartDep.Name, version) - found, tags := git.FindTagsMatching(repo, tag) + found, tags := git.FindTagsMatching(repoUrl, tag) if !found { panic("Could not find any tags for this chart") } diff --git a/internal/util/git.go b/internal/util/git.go new file mode 100644 index 0000000..572d6d3 --- /dev/null +++ b/internal/util/git.go @@ -0,0 +1,10 @@ +package util + +import ( + "fmt" + "path/filepath" +) + +func GetRepoCloneDir(baseDir string, repoName string, sha string) string { + return filepath.Join(baseDir, fmt.Sprintf("%s@%s", repoName, sha)) +} diff --git a/internal/util/paths.go b/internal/util/paths.go new file mode 100644 index 0000000..8cda76b --- /dev/null +++ b/internal/util/paths.go @@ -0,0 +1,36 @@ +package util + +import ( + "fmt" + "os" + "path/filepath" +) + +func GetCacheDir(appName string) (string, error) { + cacheDir, err := os.UserCacheDir() + if err != nil { + return "", err + } + + return filepath.Join(cacheDir, appName), nil +} + +func IsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false // e.g., file doesn't exist + } + return info.Mode().IsRegular() +} + +func GetRelativePath(cwd, filePath string) (string, error) { + cleanCwd := filepath.Clean(cwd) + cleanFilePath := filepath.Clean(filePath) + + relPath, err := filepath.Rel(cleanCwd, cleanFilePath) + if err != nil { + return "", fmt.Errorf("could not determine relative path for %s: %w", filePath, err) + } + + return relPath, nil +}