Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
156 changes: 156 additions & 0 deletions providers/openstack/rke2/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# Cluster Stacks

## Getting started

To begin, ensure you have a functional Rancher Management Dashboard. Note that there must be no existing downstream clusters installed via the "custom-cluster" method, as the Cluster Stacks Operator (CSO) will conflict with the cluster management.

For Rancher versions prior to 2.13, you must manually install Rancher Turtles to enable the preinstalled Cluster API (CAPI) functionality. Refer to the Rancher CAPI Overview for more details. <https://ranchermanager.docs.rancher.com/integrations-in-rancher/cluster-api/overview>

For Rancher versions 2.13 and later, Rancher Turtles is preinstalled and enabled by default. However, the Rancher Turtles UI extension must be installed separately. Installation instructions can be found in the Turtles Quickstart Guide. <https://turtles.docs.rancher.com/turtles/v0.24/en/tutorials/quickstart.html#_capi_ui_extension_installation>

Once the environment is prepared, install the required providers via the GUI by navigating to Cluster Management > CAPI > Provider > Create

|Key|Value bootstrap|Value controlplane|Value infrastructure|
|---|---|---|--|
|Namespace|rke2-bootstrap|rke2-controlplane|capo-system|
|Name|rke2-bootstrap|rke2-controlplane|infrastructure-openstack|
|Provider|rke2|rke2|openstack|
|Provider type|bootstrap|controlPlane|infrastructure|
|Features Enable cluster resource set|yes|yes|yes|
|Features Enable cluster topology|yes|yes|yes|
|Features Enable machine pool|yes|yes|yes|
|Variables|EXP_RUNTIME_SDK=true|EXP_RUNTIME_SDK=true|EXP_RUNTIME_SDK=true|



```sh
# Init openstack resource controller
kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml

```

```
# Install CSO and CSPO
helm upgrade -i cso \
-n cso-system \
--create-namespace \
oci://registry.scs.community/cluster-stacks/cso
```

```sh
export CLUSTER_NAMESPACE=cluster
export CLUSTER_NAME=my-cluster
export CLUSTERSTACK_NAMESPACE=cluster
export CLUSTERSTACK_VERSION=v6
export OS_CLIENT_CONFIG_FILE=${PWD}/clouds.yaml
kubectl create namespace $CLUSTER_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
kubectl label namespace $CLUSTER_NAMESPACE cluster-api.cattle.io/rancher-auto-import=true
```

```sh
# Create secret for CAPO
kubectl create secret -n $CLUSTER_NAMESPACE generic openstack --from-file=clouds.yaml=$OS_CLIENT_CONFIG_FILE --dry-run=client -oyaml | kubectl apply -f -

# Prepare the Secret as it will be deployed in the Workload Cluster
kubectl create secret -n kube-system generic clouds-yaml --from-file=clouds.yaml=$OS_CLIENT_CONFIG_FILE --dry-run=client -oyaml > clouds-yaml-secret

# Add the Secret to the ClusterResourceSet Secret in the Management Cluster
kubectl create -n $CLUSTER_NAMESPACE secret generic clouds-yaml --from-file=clouds-yaml-secret --type=addons.cluster.x-k8s.io/resource-set --dry-run=client -oyaml | kubectl apply -f -
```

```yaml
cat <<EOF | kubectl apply -f -
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: clouds-yaml
namespace: $CLUSTER_NAMESPACE
spec:
strategy: "Reconcile"
clusterSelector:
matchLabels:
managed-secret: clouds-yaml
resources:
- name: clouds-yaml
kind: Secret
EOF
```

```sh
# Apply ClusterStack resource
cat <<EOF | kubectl apply -f -
apiVersion: clusterstack.x-k8s.io/v1alpha1
kind: ClusterStack
metadata:
name: openstack
namespace: $CLUSTERSTACK_NAMESPACE
spec:
provider: openstack
name: rke2
kubernetesVersion: "1.33"
channel: stable
autoSubscribe: false
noProvider: true
versions:
- $CLUSTERSTACK_VERSION
EOF
```

```sh
# Apply Cluster resource
cat <<EOF | kubectl apply -f -
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: $CLUSTER_NAME
namespace: $CLUSTER_NAMESPACE

labels:
managed-secret: clouds-yaml
spec:
clusterNetwork:
pods:
cidrBlocks:
- "172.16.0.0/16"
serviceDomain: cluster.local
services:
cidrBlocks:
- "10.96.0.0/12"
topology:
variables:
- name: clusterCNI
value: "cilium" # Calicio is also possible, but must be manually patched after installation.: kubectl patch ippools.crd.projectcalico.org default-ipv4-ippool --type='json' -p '[{"op": "replace", "path": "/spec/ipipMode", "value":"CrossSubnet"}]'
- name: apiServerLoadBalancer
value: "octavia-ovn"
- name: imageAddVersion
value: false
- name: imageName
value: "Ubuntu 24.04"
- name: workerFlavor
value: "SCS-4V-8"
- name: controlPlaneFlavor
value: "SCS-4V-8"
- name: bastionFlavor
value: "SCS-2V-4"
- name: bastionEnabled
value: true
class: openstack-rke2-1-33-$CLUSTERSTACK_VERSION
classNamespace: $CLUSTERSTACK_NAMESPACE
controlPlane:
replicas: 1
version: v1.33.6+rke2r1
workers:
machineDeployments:
- class: default-worker
name: md-0
replicas: 1
EOF
```

```sh
clusterctl get kubeconfig -n $CLUSTER_NAMESPACE $CLUSTER_NAME > /tmp/kubeconfig
kubectl get nodes --kubeconfig /tmp/kubeconfig
# Enable rke2-ingress-loadbalancer and set loadBalancerIP
kubectl --kubeconfig /tmp/kubeconfig -n kube-system patch HelmChart.helm.cattle.io rke2-ingress-nginx --type='json' -p '[{"op": "add", "path": "/spec/set/'controller.service.enabled'", "value":"true"}, {"op": "add", "path": "/spec/set/'controller.service.loadBalancerIP'", "value":"xxx.xxx.xxx.xxx"}]'

```
10 changes: 10 additions & 0 deletions providers/openstack/rke2/cluster-addon/ccm/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: v2
type: application
description: CCM
name: CCM
version: v1
dependencies:
- alias: openstack-cloud-controller-manager
name: openstack-cloud-controller-manager
repository: https://kubernetes.github.io/cloud-provider-openstack
version: 2.33.1
4 changes: 4 additions & 0 deletions providers/openstack/rke2/cluster-addon/ccm/overwrite.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
values: |
openstack-cloud-controller-manager:
cluster:
name: {{ .Cluster.metadata.name }}
21 changes: 21 additions & 0 deletions providers/openstack/rke2/cluster-addon/ccm/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
openstack-cloud-controller-manager:
secret:
enabled: true
name: ccm-cloud-config
create: true
nodeSelector:
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
extraVolumes:
- name: clouds-yaml
secret:
secretName: clouds-yaml
extraVolumeMounts:
- name: clouds-yaml
readOnly: true
mountPath: /etc/openstack
cloudConfig:
global:
use-clouds: true
10 changes: 10 additions & 0 deletions providers/openstack/rke2/cluster-addon/csi/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: v2
type: application
description: CSI
name: CSI
version: v1
dependencies:
- alias: openstack-cinder-csi
name: openstack-cinder-csi
repository: https://kubernetes.github.io/cloud-provider-openstack
version: 2.33.1
3 changes: 3 additions & 0 deletions providers/openstack/rke2/cluster-addon/csi/overwrite.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
values: |
openstack-cinder-csi:
clusterID: "{{ .Cluster.metadata.name }}"
41 changes: 41 additions & 0 deletions providers/openstack/rke2/cluster-addon/csi/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
openstack-cinder-csi:
secret:
enabled: true
name: csi-cloud-config
create: true
filename: cloud.conf
data:
cloud.conf: |-
[Global]
use-clouds = "true"
clouds-file = /etc/openstack/clouds.yaml
storageClass:
delete:
isDefault: true
csi:
plugin:
volumes:
- name: clouds-yaml
secret:
secretName: clouds-yaml
- name: cloud-conf
secret:
secretName: csi-cloud-config
volumeMounts:
- name: clouds-yaml
readOnly: true
mountPath: /etc/openstack
- name: cloud-conf
readOnly: true
mountPath: /etc/kubernetes
- name: cloud-conf
readOnly: true
mountPath: /etc/config
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
23 changes: 23 additions & 0 deletions providers/openstack/rke2/cluster-class/.helmignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
9 changes: 9 additions & 0 deletions providers/openstack/rke2/cluster-class/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: v2
description: "This chart installs and configures:

* Openstack rke2 Cluster Class

"
name: openstack-rke2-1-33-cluster-class
type: application
version: v1
62 changes: 62 additions & 0 deletions providers/openstack/rke2/cluster-class/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "cluster-class.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "cluster-class.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cluster-class.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Common labels
*/}}
{{- define "cluster-class.labels" -}}
helm.sh/chart: {{ include "cluster-class.chart" . }}
{{ include "cluster-class.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

{{/*
Selector labels
*/}}
{{- define "cluster-class.selectorLabels" -}}
app.kubernetes.io/name: {{ include "cluster-class.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

{{/*
Create the name of the service account to use
*/}}
{{- define "cluster-class.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "cluster-class.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
Loading