diff --git a/.gitignore b/.gitignore
index 13ffb04..f0743a6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,7 +14,7 @@ crash.log
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
-*.tfvars
+**/*.tfvars
# Ignore files for local testing
test.tf
@@ -72,3 +72,8 @@ terraform.rc
# Go workspace file
go.work
+
+**/ge_topology
+
+# to remove
+modules/sm-istio-ingress-sdnlb
diff --git a/.secrets.baseline b/.secrets.baseline
index 0a4efa4..40af044 100644
--- a/.secrets.baseline
+++ b/.secrets.baseline
@@ -3,7 +3,7 @@
"files": "go.sum|^.secrets.baseline$",
"lines": null
},
- "generated_at": "2024-11-22T17:36:38Z",
+ "generated_at": "2025-11-14T11:16:57Z",
"plugins_used": [
{
"name": "AWSKeyDetector"
@@ -82,13 +82,13 @@
"hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2",
"is_secret": false,
"is_verified": false,
- "line_number": 74,
+ "line_number": 73,
"type": "Secret Keyword",
"verified_result": null
}
]
},
- "version": "0.13.1+ibm.62.dss",
+ "version": "0.13.1+ibm.64.dss",
"word_list": {
"file": null,
"hash": null
diff --git a/README.md b/README.md
index 2c46c84..ed7517a 100644
--- a/README.md
+++ b/README.md
@@ -27,9 +27,8 @@ TODO: Replace this with a description of the modules in this repo.
## Overview
* [terraform-ibm-ocp-service-mesh](#terraform-ibm-ocp-service-mesh)
+* [Submodules](./modules)
* [Examples](./examples)
- * [Advanced example](./examples/advanced)
- * [Basic example](./examples/basic)
* [Contributing](#contributing)
@@ -123,7 +122,10 @@ statement instead the previous block.
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 1.9.0 |
-| [ibm](#requirement\_ibm) | >= 1.71.2, < 2.0.0 |
+| [helm](#requirement\_helm) | >= 3.0.0 |
+| [ibm](#requirement\_ibm) | >= 1.59.0, < 2.0.0 |
+| [null](#requirement\_null) | >= 3.2.1, < 4.0.0 |
+| [time](#requirement\_time) | >= 0.9.1, < 1.0.0 |
### Modules
@@ -133,25 +135,23 @@ No modules.
| Name | Type |
|------|------|
-| [ibm_resource_instance.cos_instance](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/resources/resource_instance) | resource |
+| [helm_release.service_mesh_operator](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [null_resource.undeploy_servicemesh](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [time_sleep.wait_operators](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource |
+| [ibm_container_cluster_config.cluster_config](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_config) | data source |
### Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [name](#input\_name) | A descriptive name used to identify the resource instance. | `string` | n/a | yes |
-| [plan](#input\_plan) | The name of the plan type supported by service. | `string` | `"standard"` | no |
-| [resource\_group\_id](#input\_resource\_group\_id) | The ID of the resource group where you want to create the service. | `string` | n/a | yes |
-| [resource\_tags](#input\_resource\_tags) | List of resource tag to associate with the instance. | `list(string)` | `[]` | no |
+| [cluster\_config\_endpoint\_type](#input\_cluster\_config\_endpoint\_type) | Specify which type of endpoint to use for for cluster config access: 'default', 'private', 'vpe', 'link'. 'default' value will use the default endpoint of the cluster. | `string` | `"default"` | no |
+| [cluster\_id](#input\_cluster\_id) | Id of the target IBM Cloud OpenShift Cluster | `string` | n/a | yes |
+| [deploy\_operator](#input\_deploy\_operator) | Enable installing RedHat Service Mesh Operator | `bool` | `true` | no |
+| [develop\_mode](#input\_develop\_mode) | If true, output more logs, and reduce some wait periods | `bool` | `false` | no |
### Outputs
-| Name | Description |
-|------|-------------|
-| [account\_id](#output\_account\_id) | An alpha-numeric value identifying the account ID. |
-| [crn](#output\_crn) | The CRN of the resource instance. |
-| [guid](#output\_guid) | The GUID of the resource instance. |
-| [id](#output\_id) | The unique identifier of the resource instance. |
+No outputs.
diff --git a/chart/servicemeshoperator/.helmignore b/chart/servicemeshoperator/.helmignore
new file mode 100644
index 0000000..0e8a0eb
--- /dev/null
+++ b/chart/servicemeshoperator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/chart/servicemeshoperator/Chart.yaml b/chart/servicemeshoperator/Chart.yaml
new file mode 100644
index 0000000..47c15c4
--- /dev/null
+++ b/chart/servicemeshoperator/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+name: service-mesh-operator
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.0.1
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "0.0.1"
diff --git a/chart/servicemeshoperator/templates/service-mesh-operator-subscription.yaml b/chart/servicemeshoperator/templates/service-mesh-operator-subscription.yaml
new file mode 100644
index 0000000..5071ddc
--- /dev/null
+++ b/chart/servicemeshoperator/templates/service-mesh-operator-subscription.yaml
@@ -0,0 +1,15 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ labels:
+ operators.coreos.com/servicemeshoperator3.openshift-operators: ""
+ name: {{ .Values.operators.name }}
+ namespace: {{ .Values.operators.namespace }}
+spec:
+ channel: stable
+ installPlanApproval: {{ .Values.operators.installplanapproval }}
+ name: {{ .Values.operators.name }}
+ source: {{ .Values.operators.source }}
+ sourceNamespace: {{ .Values.operators.sourcenamespace }}
+ # startingCSV: servicemeshoperator3.v3.0.1
+ # startingCSV: servicemeshoperator3.v3.0.3
diff --git a/chart/servicemeshoperator/values.yaml b/chart/servicemeshoperator/values.yaml
new file mode 100755
index 0000000..fd40fca
--- /dev/null
+++ b/chart/servicemeshoperator/values.yaml
@@ -0,0 +1,11 @@
+# Default values for service-mesh.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+operators:
+ namespace: openshift-operators
+ name: servicemeshoperator3
+ version: v3.0.2
+ source: redhat-operators
+ sourcenamespace: openshift-marketplace
+ installplanapproval: Manual
diff --git a/common-dev-assets b/common-dev-assets
index 2ba5cc2..c432877 160000
--- a/common-dev-assets
+++ b/common-dev-assets
@@ -1 +1 @@
-Subproject commit 2ba5cc2c867361e8bcf34bd95f7359cc03d82b25
+Subproject commit c4328778ce1a62bc85f641d9249adaac0493cfc9
diff --git a/examples/advanced/README.md b/examples/advanced/README.md
deleted file mode 100644
index d52511a..0000000
--- a/examples/advanced/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Advanced example
-
-
-
diff --git a/examples/advanced/main.tf b/examples/advanced/main.tf
deleted file mode 100644
index 88360af..0000000
--- a/examples/advanced/main.tf
+++ /dev/null
@@ -1,32 +0,0 @@
-########################################################################################################################
-# Resource group
-########################################################################################################################
-
-module "resource_group" {
- source = "terraform-ibm-modules/resource-group/ibm"
- version = "1.2.0"
- # if an existing resource group is not set (null) create a new one using prefix
- resource_group_name = var.resource_group == null ? "${var.prefix}-resource-group" : null
- existing_resource_group_name = var.resource_group
-}
-
-########################################################################################################################
-# COS
-########################################################################################################################
-
-#
-# Developer tips:
-# - Call the local module / modules in the example to show how they can be consumed
-# - Include the actual module source as a code comment like below so consumers know how to consume from correct location
-#
-
-module "cos" {
- source = "../.."
- # remove the above line and uncomment the below 2 lines to consume the module from the registry
- # source = "terraform-ibm-modules//ibm"
- # version = "X.Y.Z" # Replace "X.Y.Z" with a release version to lock into a specific release
- name = "${var.prefix}-cos"
- resource_group_id = module.resource_group.resource_group_id
- resource_tags = var.resource_tags
- plan = "cos-one-rate-plan"
-}
diff --git a/examples/advanced/outputs.tf b/examples/advanced/outputs.tf
deleted file mode 100644
index 316751f..0000000
--- a/examples/advanced/outputs.tf
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-# Outputs
-##############################################################################
-
-#
-# Developer tips:
-# - Include all relevant outputs from the modules being called in the example
-#
-
-output "account_id" {
- description = "An alpha-numeric value identifying the account ID."
- value = module.cos.account_id
-}
-
-output "guid" {
- description = "The GUID of the resource instance."
- value = module.cos.account_id
-}
-
-output "id" {
- description = "The unique identifier of the resource instance."
- value = module.cos.id
-}
-
-output "crn" {
- description = "The CRN of the resource instance."
- value = module.cos.crn
-}
-
-output "resource_group_name" {
- description = "Resource group name."
- value = module.resource_group.resource_group_name
-}
-
-output "resource_group_id" {
- description = "Resource group ID."
- value = module.resource_group.resource_group_id
-}
diff --git a/examples/advanced/provider.tf b/examples/advanced/provider.tf
deleted file mode 100644
index 2080946..0000000
--- a/examples/advanced/provider.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Provider config
-##############################################################################
-
-provider "ibm" {
- ibmcloud_api_key = var.ibmcloud_api_key
- region = var.region
-}
diff --git a/examples/advanced/variables.tf b/examples/advanced/variables.tf
deleted file mode 100644
index d460364..0000000
--- a/examples/advanced/variables.tf
+++ /dev/null
@@ -1,39 +0,0 @@
-########################################################################################################################
-# Input variables
-########################################################################################################################
-
-#
-# Module developer tips:
-# - Examples are references that consumers can use to see how the module can be consumed. They are not designed to be
-# flexible re-usable solutions for general consumption, so do not expose any more variables here and instead hard
-# code things in the example main.tf with code comments explaining the different configurations.
-# - For the same reason as above, do not add default values to the example inputs.
-#
-
-variable "ibmcloud_api_key" {
- type = string
- description = "The IBM Cloud API Key."
- sensitive = true
-}
-
-variable "region" {
- type = string
- description = "Region to provision all resources created by this example."
-}
-
-variable "prefix" {
- type = string
- description = "A string value to prefix to all resources created by this example."
-}
-
-variable "resource_group" {
- type = string
- description = "The name of an existing resource group to provision resources in to. If not set a new resource group will be created using the prefix variable."
- default = null
-}
-
-variable "resource_tags" {
- type = list(string)
- description = "List of resource tag to associate with all resource instances created by this example."
- default = []
-}
diff --git a/examples/advanced/version.tf b/examples/advanced/version.tf
deleted file mode 100644
index ecfa978..0000000
--- a/examples/advanced/version.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-terraform {
- required_version = ">= 1.9.0"
-
- #
- # Developer tips:
- # - Ensure that there is always 1 example locked into the lowest provider version of the range defined in the main
- # module's version.tf (usually a basic example), and 1 example that will always use the latest provider version.
- #
-
- required_providers {
- ibm = {
- source = "IBM-Cloud/ibm"
- version = ">= 1.71.2, < 2.0.0"
- }
- }
-}
diff --git a/examples/basic/README.md b/examples/basic/README.md
deleted file mode 100644
index e5977ae..0000000
--- a/examples/basic/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Basic example
-
-
-
-An end-to-end basic example that will provision the following:
-- A new resource group if one is not passed in.
-- A new standard plan Cloud Object Storage instance using the root level module.
diff --git a/examples/basic/main.tf b/examples/basic/main.tf
index cf665db..caaed7f 100644
--- a/examples/basic/main.tf
+++ b/examples/basic/main.tf
@@ -1,31 +1,104 @@
-########################################################################################################################
-# Resource group
-########################################################################################################################
+##############################################################################
+# Resource Group
+##############################################################################
module "resource_group" {
- source = "terraform-ibm-modules/resource-group/ibm"
- version = "1.2.0"
+ source = "git::https://github.com/terraform-ibm-modules/terraform-ibm-resource-group.git?ref=v1.3.0"
# if an existing resource group is not set (null) create a new one using prefix
resource_group_name = var.resource_group == null ? "${var.prefix}-resource-group" : null
existing_resource_group_name = var.resource_group
}
########################################################################################################################
-# COS
+# VPC + Subnet + Public Gateway
+#
+# NOTE: This is a very simple VPC with single subnet in a single zone with a public gateway enabled, that will allow
+# all traffic ingress/egress by default.
+# For production use cases this would need to be enhanced by adding more subnets and zones for resiliency, and
+# ACLs/Security Groups for network security.
########################################################################################################################
-#
-# Developer tips:
-# - Call the local module / modules in the example to show how they can be consumed
-# - include the actual module source as a code comment like below so consumers know how to consume from correct location
-#
+resource "ibm_is_vpc" "vpc" {
+ name = "${var.prefix}-vpc"
+ resource_group = module.resource_group.resource_group_id
+ address_prefix_management = "auto"
+ tags = var.resource_tags
+}
+
+resource "ibm_is_public_gateway" "gateway" {
+ name = "${var.prefix}-gateway-1"
+ vpc = ibm_is_vpc.vpc.id
+ resource_group = module.resource_group.resource_group_id
+ zone = "${var.region}-1"
+}
+
+resource "ibm_is_subnet" "subnet_zone_1" {
+ name = "${var.prefix}-subnet-1"
+ vpc = ibm_is_vpc.vpc.id
+ resource_group = module.resource_group.resource_group_id
+ zone = "${var.region}-1"
+ total_ipv4_address_count = 256
+ public_gateway = ibm_is_public_gateway.gateway.id
+}
+
+########################################################################################################################
+# OCP VPC cluster (single zone)
+########################################################################################################################
+
+locals {
+ cluster_vpc_subnets = {
+ default = [
+ {
+ id = ibm_is_subnet.subnet_zone_1.id
+ cidr_block = ibm_is_subnet.subnet_zone_1.ipv4_cidr_block
+ zone = ibm_is_subnet.subnet_zone_1.zone
+ }
+ ]
+ }
+
+ worker_pools = [
+ {
+ subnet_prefix = "default"
+ pool_name = "default" # ibm_container_vpc_cluster automatically names default pool "default" (See https://github.com/IBM-Cloud/terraform-provider-ibm/issues/2849)
+ machine_type = "bx2.4x16"
+ workers_per_zone = 2 # minimum of 2 is allowed when using single zone
+ operating_system = "RHEL_9_64"
+ }
+ ]
+}
+
+##############################################################################
+# OCP CLUSTER
+##############################################################################
-module "cos" {
- source = "../.."
- # remove the above line and uncomment the below 2 lines to consume the module from the registry
- # source = "terraform-ibm-modules//ibm"
- # version = "X.Y.Z" # Replace "X.Y.Z" with a release version to lock into a specific release
- name = "${var.prefix}-cos"
+module "ocp_base" {
+ source = "terraform-ibm-modules/base-ocp-vpc/ibm"
+ version = "3.71.4"
+ resource_group_id = module.resource_group.resource_group_id
+ region = var.region
+ tags = var.resource_tags
+ cluster_name = "${var.prefix}-cluster"
+ force_delete_storage = true
+ vpc_id = ibm_is_vpc.vpc.id
+ vpc_subnets = local.cluster_vpc_subnets
+ worker_pools = local.worker_pools
+ disable_outbound_traffic_protection = true # set as True to enable outbound traffic; required for accessing Operator Hub in the OpenShift console.
+}
+
+##############################################################################
+# Init cluster config for helm and kubernetes providers
+##############################################################################
+
+data "ibm_container_cluster_config" "cluster_config" {
+ cluster_name_id = module.ocp_base.cluster_id
resource_group_id = module.resource_group.resource_group_id
- resource_tags = var.resource_tags
+ endpoint_type = "default"
+}
+
+module "service_mesh_operator" {
+ source = "../.."
+ cluster_id = module.ocp_base.cluster_id
+ deploy_operator = var.deploy_operator
+ develop_mode = var.develop_mode
+ cluster_config_endpoint_type = var.cluster_config_endpoint_type
}
diff --git a/examples/basic/outputs.tf b/examples/basic/outputs.tf
index 552db48..21858a4 100644
--- a/examples/basic/outputs.tf
+++ b/examples/basic/outputs.tf
@@ -1,38 +1,4 @@
-########################################################################################################################
-# Outputs
-########################################################################################################################
-
-#
-# Developer tips:
-# - Include all relevant outputs from the modules being called in the example
-#
-
-output "account_id" {
- description = "An alpha-numeric value identifying the account ID."
- value = module.cos.account_id
-}
-
-output "guid" {
- description = "The GUID of the resource instance."
- value = module.cos.account_id
-}
-
-output "id" {
- description = "The unique identifier of the resource instance."
- value = module.cos.id
-}
-
-output "crn" {
- description = "The CRN of the resource instance."
- value = module.cos.crn
-}
-
-output "resource_group_name" {
- description = "Resource group name."
- value = module.resource_group.resource_group_name
-}
-
-output "resource_group_id" {
- description = "Resource group ID."
- value = module.resource_group.resource_group_id
+output "cluster_id" {
+ value = module.ocp_base.cluster_id
+ description = "The id of the cluster"
}
diff --git a/examples/basic/provider.tf b/examples/basic/provider.tf
index 84b6985..34116e0 100644
--- a/examples/basic/provider.tf
+++ b/examples/basic/provider.tf
@@ -1,8 +1,22 @@
-########################################################################################################################
-# Provider config
-########################################################################################################################
+##############################################################################
+# Config providers
+##############################################################################
provider "ibm" {
ibmcloud_api_key = var.ibmcloud_api_key
region = var.region
}
+
+provider "helm" {
+ kubernetes = {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+ cluster_ca_certificate = data.ibm_container_cluster_config.cluster_config.ca_certificate
+ }
+}
+
+provider "kubernetes" {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+ cluster_ca_certificate = data.ibm_container_cluster_config.cluster_config.ca_certificate
+}
diff --git a/examples/basic/variables.tf b/examples/basic/variables.tf
index d460364..ba39764 100644
--- a/examples/basic/variables.tf
+++ b/examples/basic/variables.tf
@@ -1,39 +1,51 @@
-########################################################################################################################
-# Input variables
-########################################################################################################################
-
-#
-# Module developer tips:
-# - Examples are references that consumers can use to see how the module can be consumed. They are not designed to be
-# flexible re-usable solutions for general consumption, so do not expose any more variables here and instead hard
-# code things in the example main.tf with code comments explaining the different configurations.
-# - For the same reason as above, do not add default values to the example inputs.
-#
-
variable "ibmcloud_api_key" {
type = string
- description = "The IBM Cloud API Key."
+ description = "IBM Cloud API Key for a user / serviceId with write access to the corresponding namespace in the OCP cluster"
sensitive = true
}
-variable "region" {
+variable "prefix" {
type = string
- description = "Region to provision all resources created by this example."
+ description = "Prefix for name of all resource created by this example"
+ default = "ocpsm-basic"
}
-variable "prefix" {
+variable "region" {
type = string
- description = "A string value to prefix to all resources created by this example."
+ description = "Region where resources are created"
}
variable "resource_group" {
type = string
- description = "The name of an existing resource group to provision resources in to. If not set a new resource group will be created using the prefix variable."
+ description = "Optionally pass an existing resource group name to be used. If not passed a new one will be created"
default = null
}
variable "resource_tags" {
type = list(string)
- description = "List of resource tag to associate with all resource instances created by this example."
+ description = "Optional list of tags to be added to created resources"
default = []
}
+
+variable "deploy_operator" {
+ type = bool
+ description = "Enable installing RedHat Service Mesh Operator"
+ default = true
+}
+
+variable "develop_mode" {
+ type = bool
+ description = "If true, output more logs, and reduce some wait periods"
+ default = false
+}
+
+variable "cluster_config_endpoint_type" {
+ description = "Specify which type of endpoint to use for for cluster config access: 'default', 'private', 'vpe', 'link'. 'default' value will use the default endpoint of the cluster."
+ type = string
+ default = "default"
+ nullable = false
+ validation {
+ error_message = "Invalid Endpoint Type! Valid values are 'default', 'private', 'vpe', or 'link'"
+ condition = contains(["default", "private", "vpe", "link"], var.cluster_config_endpoint_type)
+ }
+}
diff --git a/examples/basic/version.tf b/examples/basic/version.tf
index 401504c..719c164 100644
--- a/examples/basic/version.tf
+++ b/examples/basic/version.tf
@@ -1,16 +1,18 @@
terraform {
required_version = ">= 1.9.0"
-
- #
- # Developer tips:
- # - Ensure that there is always 1 example locked into the lowest provider version of the range defined in the main
- # module's version.tf (usually a basic example), and 1 example that will always use the latest provider version.
- #
-
required_providers {
+ # Pin to the lowest provider version of the range defined in the main module to ensure lowest version still works
ibm = {
source = "IBM-Cloud/ibm"
- version = "1.71.2"
+ version = ">= 1.59.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 3.0.0, <4.0.0"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 2.16.1, < 3.0.0"
}
}
}
diff --git a/examples/exiting_cluster/main.tf b/examples/exiting_cluster/main.tf
new file mode 100644
index 0000000..cae552d
--- /dev/null
+++ b/examples/exiting_cluster/main.tf
@@ -0,0 +1,36 @@
+##############################################################################
+# Resource Group
+##############################################################################
+
+module "resource_group" {
+ source = "git::https://github.com/terraform-ibm-modules/terraform-ibm-resource-group.git?ref=v1.3.0"
+ # if an existing resource group is not set (null) create a new one using prefix
+ resource_group_name = var.resource_group == null ? "${var.prefix}-resource-group" : null
+ existing_resource_group_name = var.resource_group
+}
+
+############################################################################
+# CLUSTER PROXY
+############################################################################
+
+module "cluster_proxy" {
+ source = "git::https://github.ibm.com/GoldenEye/cluster-proxy-module.git?ref=4.2.4"
+ cluster_id = var.existing_cluster_id
+}
+
+##############################################################################
+# Init cluster config for helm and kubernetes providers for existing cluster
+##############################################################################
+
+data "ibm_container_cluster_config" "cluster_config" {
+ cluster_name_id = var.existing_cluster_id
+ resource_group_id = module.resource_group.resource_group_id
+}
+
+module "service_mesh_operator" {
+ source = "../.."
+ cluster_id = var.existing_cluster_id
+ deploy_operator = var.deploy_operator
+ develop_mode = var.develop_mode
+ cluster_config_endpoint_type = var.cluster_config_endpoint_type
+}
diff --git a/examples/exiting_cluster/outputs.tf b/examples/exiting_cluster/outputs.tf
new file mode 100644
index 0000000..e69de29
diff --git a/examples/exiting_cluster/provider.tf b/examples/exiting_cluster/provider.tf
new file mode 100644
index 0000000..2d42889
--- /dev/null
+++ b/examples/exiting_cluster/provider.tf
@@ -0,0 +1,23 @@
+##############################################################################
+# Config providers
+##############################################################################
+
+provider "ibm" {
+ ibmcloud_api_key = var.ibmcloud_api_key
+ region = var.region
+}
+
+provider "helm" {
+ kubernetes = {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+ }
+ # experiments = {
+ # manifest = true
+ # }
+}
+
+provider "kubernetes" {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+}
diff --git a/examples/exiting_cluster/variables.tf b/examples/exiting_cluster/variables.tf
new file mode 100644
index 0000000..fa80484
--- /dev/null
+++ b/examples/exiting_cluster/variables.tf
@@ -0,0 +1,51 @@
+variable "ibmcloud_api_key" {
+ type = string
+ description = "IBM Cloud API Key for a user / serviceId with write access to the corresponding namespace in the OCP cluster"
+ sensitive = true
+}
+
+variable "prefix" {
+ type = string
+ description = "Prefix for name of all resource created by this example"
+ default = "ocpsm-excluster"
+}
+
+variable "region" {
+ type = string
+ description = "Region where resources are created"
+}
+
+variable "existing_cluster_id" {
+ type = string
+ description = "Existing cluster ID to deploy the ServiceMesh"
+ nullable = false
+}
+
+variable "resource_group" {
+ type = string
+ description = "Optionally pass an existing resource group name to be used. If not passed a new one will be created"
+ default = null
+}
+
+variable "deploy_operator" {
+ type = bool
+ description = "Enable installing RedHat Service Mesh Operator"
+ default = true
+}
+
+variable "develop_mode" {
+ type = bool
+ description = "If true, output more logs, and reduce some wait periods"
+ default = false
+}
+
+variable "cluster_config_endpoint_type" {
+ description = "Specify which type of endpoint to use for for cluster config access: 'default', 'private', 'vpe', 'link'. 'default' value will use the default endpoint of the cluster."
+ type = string
+ default = "default"
+ nullable = false
+ validation {
+ error_message = "Invalid Endpoint Type! Valid values are 'default', 'private', 'vpe', or 'link'"
+ condition = contains(["default", "private", "vpe", "link"], var.cluster_config_endpoint_type)
+ }
+}
diff --git a/examples/exiting_cluster/version.tf b/examples/exiting_cluster/version.tf
new file mode 100644
index 0000000..551561b
--- /dev/null
+++ b/examples/exiting_cluster/version.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 1.9.0"
+ required_providers {
+ # Pin to the lowest provider version of the range defined in the main module to ensure lowest version still works
+ ibm = {
+ source = "IBM-Cloud/ibm"
+ version = ">= 1.59.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 3.0.0"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 2.16.1, < 3.0.0"
+ }
+ }
+}
diff --git a/examples/securetopology/main.tf b/examples/securetopology/main.tf
new file mode 100644
index 0000000..9a26388
--- /dev/null
+++ b/examples/securetopology/main.tf
@@ -0,0 +1,189 @@
+########################################################################################################################
+# VPC + Subnet + Public Gateway
+#
+# NOTE: This example deploys secure VPC deployment with 3 zones and 3 subnets in each zone with a public gateway enabled, that will allow
+# all traffic ingress/egress by default.
+# The three subnets allow to isolate the cluster nodes for their purpose: edge for public access enabled workers, default for workload deployment, transit for internal traffic
+# For production use cases this would need to be enhanced by adding ACLs/Security Groups for network security.
+########################################################################################################################
+
+##############################################################################
+# Locals
+##############################################################################
+
+locals {
+
+ # VPC Configuration
+ acl_rules_map = {
+ private = concat(
+ module.acl_profile.base_acl,
+ module.acl_profile.https_acl,
+ [
+ {
+ name = "allow-workload-http-inbound"
+ source = "0.0.0.0/0"
+ action = "allow"
+ destination = "0.0.0.0/0"
+ direction = "inbound"
+ tcp = {
+ source_port_min = 1
+ source_port_max = 65535
+ port_min = 80
+ port_max = 80
+ }
+ },
+ {
+ name = "allow-workload-http-outbound"
+ source = "0.0.0.0/0"
+ action = "allow"
+ destination = "0.0.0.0/0"
+ direction = "outbound"
+ tcp = {
+ source_port_min = 80
+ source_port_max = 80
+ port_min = 1
+ port_max = 65535
+ }
+ }
+ ],
+ module.acl_profile.deny_all_acl
+ )
+ }
+ vpc_cidr_bases = {
+ private = "192.168.0.0/20",
+ transit = "192.168.16.0/20",
+ edge = "192.168.32.0/20"
+ }
+
+ # OCP Configuration
+ ocp_worker_pools = [
+ {
+ subnet_prefix = "private"
+ pool_name = "default"
+ machine_type = "bx2.4x16"
+ workers_per_zone = 1
+ operating_system = "RHEL_9_64"
+ },
+ {
+ subnet_prefix = "edge"
+ pool_name = "edge"
+ machine_type = "bx2.4x16"
+ workers_per_zone = 1
+ operating_system = "RHEL_9_64"
+ }
+ ,
+ {
+ subnet_prefix = "transit"
+ pool_name = "transit"
+ machine_type = "bx2.4x16"
+ workers_per_zone = 1
+ operating_system = "RHEL_9_64"
+ }
+ ]
+
+ worker_pools_taints = {
+ all = []
+ transit = [
+ {
+ key = "dedicated"
+ value = "transit"
+ # Pod is evicted from the node if it is already running on the node,
+ # and is not scheduled onto the node if it is not yet running on the node.
+ effect = "NoExecute"
+ }
+ ]
+ edge = [
+ {
+ key = "dedicated"
+ value = "edge"
+ # Pod is evicted from the node if it is already running on the node,
+ # and is not scheduled onto the node if it is not yet running on the node.
+ effect = "NoExecute"
+ }
+ ]
+ default = []
+ }
+
+}
+
+##############################################################################
+# Resource Group
+##############################################################################
+
+module "resource_group" {
+ source = "git::https://github.com/terraform-ibm-modules/terraform-ibm-resource-group.git?ref=v1.3.0"
+ # if an existing resource group is not set (null) create a new one using prefix
+ resource_group_name = var.resource_group == null ? "${var.prefix}-resource-group" : null
+ existing_resource_group_name = var.resource_group
+}
+
+##############################################################################
+# VPC ACLs
+##############################################################################
+
+module "acl_profile" {
+ source = "git::https://github.ibm.com/GoldenEye/acl-profile-ocp.git?ref=1.3.5"
+}
+
+##############################################################################
+# VPC
+##############################################################################
+
+module "vpc" {
+ source = "git::https://github.ibm.com/GoldenEye/vpc-module.git?ref=6.7.3"
+ unique_name = var.prefix
+ ibm_region = var.region
+ resource_group_id = module.resource_group.resource_group_id
+ cidr_bases = local.vpc_cidr_bases
+ acl_rules_map = local.acl_rules_map
+ virtual_private_endpoints = {}
+ vpc_tags = var.resource_tags
+}
+
+##############################################################################
+# OCP CLUSTER
+##############################################################################
+
+module "ocp_base" {
+ source = "terraform-ibm-modules/base-ocp-vpc/ibm"
+ version = "3.71.4"
+ cluster_name = "${var.prefix}-cluster"
+ resource_group_id = module.resource_group.resource_group_id
+ region = var.region
+ force_delete_storage = true
+ vpc_id = module.vpc.vpc_id
+ vpc_subnets = module.vpc.subnets
+ worker_pools = local.ocp_worker_pools
+ worker_pools_taints = local.worker_pools_taints
+ tags = var.resource_tags
+ # outbound required by cluster proxy
+ disable_outbound_traffic_protection = true
+}
+
+############################################################################
+# CLUSTER PROXY
+############################################################################
+
+module "cluster_proxy" {
+ source = "git::https://github.ibm.com/GoldenEye/cluster-proxy-module.git?ref=4.2.4"
+ cluster_id = module.ocp_base.cluster_id
+}
+
+##############################################################################
+# Init cluster config for helm and kubernetes providers
+##############################################################################
+
+data "ibm_container_cluster_config" "cluster_config" {
+ cluster_name_id = module.ocp_base.cluster_id
+ resource_group_id = module.resource_group.resource_group_id
+}
+
+# deploying servicemesh operator
+
+module "service_mesh_operator" {
+ source = "../.."
+ cluster_id = module.ocp_base.cluster_id
+ deploy_operator = var.deploy_operator
+ develop_mode = var.develop_mode
+ cluster_config_endpoint_type = var.cluster_config_endpoint_type
+}
diff --git a/examples/securetopology/outputs.tf b/examples/securetopology/outputs.tf
new file mode 100644
index 0000000..a38cc7c
--- /dev/null
+++ b/examples/securetopology/outputs.tf
@@ -0,0 +1,19 @@
+output "cluster_id" {
+ description = "ID of the deployed cluster"
+ value = module.ocp_base.cluster_id
+}
+
+output "vpc_id" {
+ description = "ID of the deployed VPC"
+ value = module.ocp_base.vpc_id
+}
+
+output "subnets" {
+ description = "Details of the subnets deployed in the VPC and attached to the cluster"
+ value = module.vpc.subnets
+}
+
+output "ingress_alb_subnets" {
+ description = "Details of the subnets deployed in the VPC and attached to the cluster to be attached to the ALB loadbalancer"
+ value = [for subnet in module.vpc.subnets["edge"] : subnet["id"]]
+}
diff --git a/examples/securetopology/provider.tf b/examples/securetopology/provider.tf
new file mode 100644
index 0000000..2d42889
--- /dev/null
+++ b/examples/securetopology/provider.tf
@@ -0,0 +1,23 @@
+##############################################################################
+# Config providers
+##############################################################################
+
+provider "ibm" {
+ ibmcloud_api_key = var.ibmcloud_api_key
+ region = var.region
+}
+
+provider "helm" {
+ kubernetes = {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+ }
+ # experiments = {
+ # manifest = true
+ # }
+}
+
+provider "kubernetes" {
+ host = data.ibm_container_cluster_config.cluster_config.host
+ token = data.ibm_container_cluster_config.cluster_config.token
+}
diff --git a/examples/securetopology/variables.tf b/examples/securetopology/variables.tf
new file mode 100644
index 0000000..cff9e9f
--- /dev/null
+++ b/examples/securetopology/variables.tf
@@ -0,0 +1,51 @@
+variable "ibmcloud_api_key" {
+ type = string
+ description = "IBM Cloud API Key for a user / serviceId with write access to the corresponding namespace in the OCP cluster"
+ sensitive = true
+}
+
+variable "prefix" {
+ type = string
+ description = "Prefix for name of all resource created by this example"
+ default = "ocp-smv3"
+}
+
+variable "region" {
+ type = string
+ description = "Region where resources are created"
+}
+
+variable "resource_group" {
+ type = string
+ description = "Optionally pass an existing resource group name to be used. If not passed a new one will be created"
+ default = null
+}
+
+variable "resource_tags" {
+ type = list(string)
+ description = "Optional list of tags to be added to created resources"
+ default = []
+}
+
+variable "deploy_operator" {
+ type = bool
+ description = "Enable installing RedHat Service Mesh Operator"
+ default = true
+}
+
+variable "develop_mode" {
+ type = bool
+ description = "If true, output more logs, and reduce some wait periods"
+ default = false
+}
+
+variable "cluster_config_endpoint_type" {
+ description = "Specify which type of endpoint to use for for cluster config access: 'default', 'private', 'vpe', 'link'. 'default' value will use the default endpoint of the cluster."
+ type = string
+ default = "default"
+ nullable = false
+ validation {
+ error_message = "Invalid Endpoint Type! Valid values are 'default', 'private', 'vpe', or 'link'"
+ condition = contains(["default", "private", "vpe", "link"], var.cluster_config_endpoint_type)
+ }
+}
diff --git a/examples/securetopology/version.tf b/examples/securetopology/version.tf
new file mode 100644
index 0000000..551561b
--- /dev/null
+++ b/examples/securetopology/version.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 1.9.0"
+ required_providers {
+ # Pin to the lowest provider version of the range defined in the main module to ensure lowest version still works
+ ibm = {
+ source = "IBM-Cloud/ibm"
+ version = ">= 1.59.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 3.0.0"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 2.16.1, < 3.0.0"
+ }
+ }
+}
diff --git a/kubeconfig/.gitignore b/kubeconfig/.gitignore
new file mode 100644
index 0000000..632a28f
--- /dev/null
+++ b/kubeconfig/.gitignore
@@ -0,0 +1,6 @@
+# Ignore everything
+*
+
+# But not these files...
+!.gitignore
+!README.md
diff --git a/kubeconfig/README.md b/kubeconfig/README.md
new file mode 100644
index 0000000..dff9dd4
--- /dev/null
+++ b/kubeconfig/README.md
@@ -0,0 +1,2 @@
+This directory must exist in source control so the `ibm_container_cluster_config` data lookup can use it to place the
+config.yml used to connect to a kubernetes cluster (See https://github.ibm.com/GoldenEye/issues/issues/552).
diff --git a/main.tf b/main.tf
index b6b879e..f289f20 100644
--- a/main.tf
+++ b/main.tf
@@ -1,13 +1,112 @@
-#
-# Developer tips:
-# - Below code should be replaced with the code for the root level module
-#
-
-resource "ibm_resource_instance" "cos_instance" {
- name = var.name
- resource_group_id = var.resource_group_id
- service = "cloud-object-storage"
- plan = var.plan
- location = "global"
- tags = var.resource_tags
+##############################################################################
+# RedHat OpenShift Service Mesh 3
+# Deploy the Service Mesh operator on an OCP cluster and sets up
+# one or several service mesh control plane(s)
+##############################################################################
+
+##############################################################################
+# Locals
+##############################################################################
+
+locals {
+ operators_namespace = "openshift-operators"
+ sm_operator_release_name = "helm-release-smv3-subscription"
+ sm_operator_chart_path = "servicemeshoperator"
+ sm_operator_version = "v3.0.3"
+ sm_operator_name = "servicemeshoperator3"
+
+ # timeout in seconds for operators helm releases to be ready
+ operators_timeout = 600
+
+ # timeout in seconds for the operators to be ready with their installPlan to approve
+ operator_installplan_timeout = 1200
+
+ # Wait periods are overally conservative on purpose to cover majority of case. Divide them by 10 during dev
+ # calculating the wait period according to the amount of the operators addons to deploy (base is 60s for the service mesh operator only)
+ sleep_create = var.develop_mode ? 600 : 60
+ sleep_destroy = var.develop_mode ? 360 : 36
+}
+
+##############################################################################
+# Retrieve information about all the Cluster configuration files and
+# certificates to access the cluster through the kubernets provider
+##############################################################################
+
+data "ibm_container_cluster_config" "cluster_config" {
+ cluster_name_id = var.cluster_id
+ config_dir = "${path.module}/kubeconfig"
+ endpoint_type = var.cluster_config_endpoint_type != "default" ? var.cluster_config_endpoint_type : null # null represents default
+}
+
+##############################################################################
+# RedHat Service Mesh Operator, and its dependencies
+##############################################################################
+
+# installing helm chart to enable subscriptions for openshift servicemesh v3 operator
+resource "helm_release" "service_mesh_operator" {
+ depends_on = [data.ibm_container_cluster_config.cluster_config, null_resource.undeploy_servicemesh]
+ count = var.deploy_operator == true ? 1 : 0
+
+ name = local.sm_operator_release_name
+ chart = "${path.module}/chart/${local.sm_operator_chart_path}"
+ namespace = local.operators_namespace
+ create_namespace = false
+ timeout = local.operators_timeout
+ dependency_update = true
+ force_update = false
+ cleanup_on_fail = false
+ wait = true
+
+ disable_openapi_validation = false
+
+ set = [
+ {
+ name = "smoperator.namespace"
+ type = "string"
+ value = local.operators_namespace
+ }, {
+ name = "smoperator.version"
+ type = "string"
+ value = local.sm_operator_version
+ }, {
+ name = "smoperator.name"
+ type = "string"
+ value = local.sm_operator_name
+ }
+ ]
+
+ provisioner "local-exec" {
+ command = "${path.module}/scripts/approve-install-plan.sh \"${local.operators_namespace}\" ${local.operator_installplan_timeout}"
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = data.ibm_container_cluster_config.cluster_config.config_file_path
+ }
+ }
+
+}
+
+# trigger on destroy the removal of operator custom resources
+resource "null_resource" "undeploy_servicemesh" {
+ triggers = {
+ kubeconfig = data.ibm_container_cluster_config.cluster_config.config_file_path
+ namespace = local.operators_namespace
+ operatorname = local.sm_operator_name
+ }
+
+ provisioner "local-exec" {
+ when = destroy
+ command = "${path.module}/scripts/deprovision-sm-operator.sh \"${self.triggers.kubeconfig}\" \"${self.triggers.namespace}\" ${self.triggers.operatorname}"
+ on_failure = continue
+ }
+}
+
+# On create: give time for the istio pod operator to warm-up up
+# On delete: give time for the crd sm instance to be removed (which depends on running finalizer)
+# Cheap for now - replace with polling of specific resources
+resource "time_sleep" "wait_operators" {
+ depends_on = [helm_release.service_mesh_operator[0]]
+ count = var.deploy_operator == true ? 1 : 0
+
+ create_duration = "${local.sleep_create}s"
+ destroy_duration = "${local.sleep_destroy}s"
}
diff --git a/outputs.tf b/outputs.tf
index 0286200..b050c45 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,30 +1,3 @@
-########################################################################################################################
-# Outputs
-########################################################################################################################
-
-#
-# Developer tips:
-# - Below are some good practise sample outputs
-# - They should be updated for outputs applicable to the module being added
-# - Use variable validation when possible
-#
-
-output "account_id" {
- description = "An alpha-numeric value identifying the account ID."
- value = ibm_resource_instance.cos_instance.account_id
-}
-
-output "guid" {
- description = "The GUID of the resource instance."
- value = ibm_resource_instance.cos_instance.guid
-}
-
-output "id" {
- description = "The unique identifier of the resource instance."
- value = ibm_resource_instance.cos_instance.id
-}
-
-output "crn" {
- description = "The CRN of the resource instance."
- value = ibm_resource_instance.cos_instance.crn
-}
+# ##############################################################################
+# # Outputs
+# ##############################################################################
diff --git a/scripts/approve-install-plan-functions.sh b/scripts/approve-install-plan-functions.sh
new file mode 100644
index 0000000..1d5ab26
--- /dev/null
+++ b/scripts/approve-install-plan-functions.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+approve_install_plan(){
+ local subscription_name=$1
+ local namespace=$2
+ local timeout_secs=$3
+
+ echo "Waiting for installplan $subscription_name in namespace ${namespace} (${timeout_secs}s timeout)..."
+
+ local install_plan
+
+ # if running on MAC OS use "gtimeout" else run default "timeout" (
+ local timeout_cmd=timeout
+ if [[ $OSTYPE == 'darwin'* ]]; then
+ # If gtimeout not detected on mac, install coreutils
+ if ! gtimeout --help &> /dev/null; then
+ brew install coreutils
+ fi
+ timeout_cmd=gtimeout
+ fi
+ # shellcheck disable=SC2016,SC2086
+ install_plan="$($timeout_cmd $timeout_secs bash -c 'while [[ "$(kubectl get subscription "'"$subscription_name"'" -n "'"$namespace"'" -o jsonpath="{$.status.installplan.name}")" == "" ]]; do sleep 2; done; echo $(kubectl get subscription "'"$subscription_name"'" -n "'"$namespace"'" -o jsonpath="{$.status.installplan.name}")')"
+
+ if [[ $install_plan != "" ]]
+ then
+ echo "Install plan $install_plan found in namespace ${namespace}"
+ echo "Approving install plan $install_plan in namespace ${namespace}"
+ kubectl patch installplan "$install_plan" --type merge --patch "{\"spec\":{\"approved\":true}}" -n "$namespace"
+ else
+ echo "Error: Install plan for $subscription_name was not found (namespace: $namespace)"
+ echo "Grabbing some debug info..."
+ echo
+ echo "kubectl get pods -n openshift-marketplace -o wide"
+ kubectl get pods -n openshift-marketplace -o wide
+ echo
+ echo "kubectl get installplan -n $namespace"
+ kubectl get installplan -n "$namespace"
+ echo
+ echo "kubectl get subscription $subscription_name -n $namespace"
+ kubectl get subscription "$subscription_name" -n "$namespace"
+ echo
+ echo "kubectl describe subscription $subscription_name -n $namespace"
+ kubectl get subscription "$subscription_name" -n "$namespace"
+ exit 1
+ fi
+
+ echo "Waiting for installplan $install_plan for subscription $subscription_name to be installed (${timeout_secs}s timeout)..."
+ # shellcheck disable=SC2086
+ kubectl wait --for=condition=Installed --timeout ${timeout_secs}s installplan/"$install_plan" -n "$namespace"
+}
+
+wait_for_operator(){
+ local subscription_name=$1
+ local namespace=$2
+
+ echo "Waiting for $subscription_name operator to be running in $namespace (360s timeout)..."
+ local sm_csv=""
+
+ until [[ $sm_csv != "" ]]
+ do
+ echo "Waiting for csv to be created for subscription $subscription_name in $namespace"
+ sm_csv=$(kubectl get subscription "$subscription_name" -o jsonpath="{$.status.installedCSV}" -n "$namespace")
+ sleep 5
+ done
+
+ local loop_count=0
+ echo "CSV found. Waiting for $subscription_name operator to be running in $namespace"
+ until [[ $(kubectl get csv "$sm_csv" -o jsonpath="{$.status.phase}" -n "$namespace") == "Succeeded" || $loop_count -gt 72 ]]
+ do
+ echo "Still waiting for $subscription_name operator to be running"
+ sleep 5
+ loop_count=$((loop_count+1))
+ done
+
+ if [[ $loop_count -gt 72 ]]
+ then
+ echo "Giving up - $subscription_name operator is not running. Check the status of the operator in the $namespace namespace."
+ exit 1
+ fi
+
+ echo "Complete: $subscription_name operator is running in namespace ${namespace}"
+}
diff --git a/scripts/approve-install-plan.sh b/scripts/approve-install-plan.sh
new file mode 100755
index 0000000..e8d8f5a
--- /dev/null
+++ b/scripts/approve-install-plan.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+## Subscriptions are set for manual approval. This script approves the first installplan for the initial install
+
+SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+source "${SCRIPT_DIR}/approve-install-plan-functions.sh"
+
+namespace="$1"
+
+if [[ -z $2 ]]; then
+ operator_installplan_timeout=1200
+else
+ operator_installplan_timeout=$2
+fi
+
+### getting list of the operators installed
+echo "Retrieving subscriptions from namespace ${namespace}"
+OPERATORS=$(oc get subscriptions -n "${namespace}" -o jsonpath="{$.items[*].metadata.name}")
+## Wait for, and approve install plan for each operator installed
+# shellcheck disable=SC2068
+for operator in ${OPERATORS[@]}
+do
+ echo "Approving plan for operator ${operator} in namespace ${namespace} with timeout ${operator_installplan_timeout}"
+ approve_install_plan "${operator}" "${namespace}" "${operator_installplan_timeout}"
+done
+
+## Post install waits for each operator installed
+# shellcheck disable=SC2068
+for operator in ${OPERATORS[@]}
+do
+ echo "Waiting for operator ${operator} in namespace ${namespace} to be ready"
+ wait_for_operator "${operator}" "$namespace"
+done
+
+echo "Operators installation complete in namespace ${namespace}"
diff --git a/scripts/confirm-istio-operational.sh b/scripts/confirm-istio-operational.sh
new file mode 100755
index 0000000..d1d34c6
--- /dev/null
+++ b/scripts/confirm-istio-operational.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+# This script is designed to verify that all key components of Istio control plane are up and running.
+# This is needs before apps with inject-sidecar are deployed
+
+set -e
+
+namespace="${1}"
+service="${2}"
+fail=false
+
+# if service is not set using default value
+if [[ -z "${service}" ]]; then
+ service="istio-ingressgateway"
+fi
+
+echo "Checking istio ingressgateway with name ${service} is fully deployed in namespace ${namespace}"
+
+# sleep 60 seconds initially to provide time for each deployment to get created
+sleep 60
+
+# Get list of deployments in control plane namespace
+DEPLOYMENTS=()
+while IFS='' read -r line; do DEPLOYMENTS+=("$line"); done < <(kubectl get deployments -n "${namespace}" --no-headers | cut -f1 -d ' ')
+
+# Wait for all deployments to come up - timeout after 5 mins
+# shellcheck disable=SC2068
+for dep in ${DEPLOYMENTS[@]}; do
+ if ! kubectl rollout status deployment "$dep" -n "${namespace}" --timeout 5m; then
+ fail=true
+ fi
+done
+
+# Ensure the load balancer hostname is set
+counter=0
+wait=30
+retries=60 # 60 x 30 = 1800 secs = 30 mins
+ext_hostname=""
+while [ -z "${ext_hostname}" ]; do
+
+ # Get the hostname from the kube service (retry needed on service lookup, as sometimes service may not exist yet)
+ attempts=20 # 20 x 30 = 600 secs = 10 mins
+ n=0
+ until [ "$n" -ge $attempts ]; do
+ ext_hostname=$(kubectl get svc "${service}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.hostname}}{{end}}") && break
+ n=$((n+1))
+ if [ "$n" = $attempts ]; then
+ echo "Maximum attempts reached for gateway ${service} in namespace ${namespace}. Giving up!"
+ exit 1
+ else
+ echo "Retrying in ${wait} secs .."
+ sleep ${wait}
+ fi
+ done
+
+ # If not set yet, retry
+ if [ -z "${ext_hostname}" ]; then
+ # Give up when number of retries are reached
+ if [ ${counter} == ${retries} ]; then
+ echo "ERROR: Unable to detect external hostname for ${service} in namespace ${namespace}"
+ fail=true
+ break
+ fi
+ counter=$((counter+1))
+ sleep ${wait}
+ else
+ # break the loop if hostname value detected
+ echo "istio gateway ${service} in namespace ${namespace} assigned with hostname: ${ext_hostname}"
+ # TODO: Add some health checks against the LB
+ break
+ fi
+done
+
+# Fail with some debug prints if issues detected
+if [ ${fail} == true ]; then
+ echo "Problem detected with istio gateway ${service} in namespace ${namespace}. Printing some debug info.."
+ set +e
+ kubectl get svc -n "${namespace}" -o wide
+ kubectl get deployments -n "${namespace}" -o wide
+ kubectl get pods -n "${namespace}" -o wide
+ kubectl describe svc "${service}" -n "${namespace}"
+ kubectl describe svc "${service}" -n "${namespace}"
+ # exit 1
+fi
diff --git a/scripts/deprovision-sm-operator.sh b/scripts/deprovision-sm-operator.sh
new file mode 100755
index 0000000..9ba6d9c
--- /dev/null
+++ b/scripts/deprovision-sm-operator.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# this script cleans up the operator according to the input parameters for
+# 1 kubeconfig to login on the cluster
+# 2 operator namespace
+# 3 operatorn name
+# it cleans up
+# 1. the custom service version resources
+# 2. the custom resource definitions
+# 3. the opearator itself
+
+# enabling exit on errors
+set -e
+
+echo "Start deprovision-sm-operator.sh with ${1} ${2} ${3}"
+kubeconfig="${1}"
+export KUBECONFIG="${kubeconfig}"
+operator_namespace="${2:-openshift-operators}"
+operator_name="${3:-servicemeshoperator3}"
+
+echo "Fetching and deleting CSVs for ${operator_name} operator subscription in namespace ${operator_namespace}"
+
+CSV="$(kubectl get clusterserviceversion -n "${operator_namespace}" | grep servicemeshoperator3 | awk '{print $1}')"
+
+if [ -n "$CSV" ]
+then
+ echo "Deleting CSV ${CSV} in namespace ${operator_namespace}"
+ kubectl delete csv "$CSV" -n "${operator_namespace}"
+fi
+
+echo "Deleting all CRDs from istio operator"
+
+kubectl get crds -oname | grep -e istio.io -e sailoperator.io | xargs kubectl delete
+
+echo "Deleting operator ${operator_name} in namespace ${operator_namespace}"
+
+kubectl delete operator "${operator_name}"."${operator_namespace}"
+
+echo "Deprovisioning of ${operator_name} from namespace ${operator_namespace} completed"
+
+echo "Exit deprovision-sm-operator.sh"
+
+set +e
diff --git a/variables.tf b/variables.tf
index a9d9899..dd1cbbf 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,36 +1,31 @@
-########################################################################################################################
+##############################################################################
# Input Variables
-########################################################################################################################
+##############################################################################
-#
-# Developer tips:
-# - Below are some common module input variables
-# - They should be updated for input variables applicable to the module being added
-# - Use variable validation when possible
-#
-
-variable "name" {
+variable "cluster_id" {
type = string
- description = "A descriptive name used to identify the resource instance."
+ description = "Id of the target IBM Cloud OpenShift Cluster"
}
-variable "plan" {
- type = string
- description = "The name of the plan type supported by service."
- default = "standard"
- validation {
- condition = contains(["standard", "cos-one-rate-plan"], var.plan)
- error_message = "The specified pricing plan is not available. The following plans are supported: 'standard', 'cos-one-rate-plan'"
- }
+variable "deploy_operator" {
+ type = bool
+ description = "Enable installing RedHat Service Mesh Operator"
+ default = true
}
-variable "resource_group_id" {
- type = string
- description = "The ID of the resource group where you want to create the service."
+variable "develop_mode" {
+ type = bool
+ description = "If true, output more logs, and reduce some wait periods"
+ default = false
}
-variable "resource_tags" {
- type = list(string)
- description = "List of resource tag to associate with the instance."
- default = []
+variable "cluster_config_endpoint_type" {
+ description = "Specify which type of endpoint to use for for cluster config access: 'default', 'private', 'vpe', 'link'. 'default' value will use the default endpoint of the cluster."
+ type = string
+ default = "default"
+ nullable = false
+ validation {
+ error_message = "Invalid Endpoint Type! Valid values are 'default', 'private', 'vpe', or 'link'"
+ condition = contains(["default", "private", "vpe", "link"], var.cluster_config_endpoint_type)
+ }
}
diff --git a/version.tf b/version.tf
index e51de7f..1239f0a 100644
--- a/version.tf
+++ b/version.tf
@@ -1,18 +1,26 @@
terraform {
- # require 1.9 or later to make use of cross-object referencing for input variable validations
- # more info: https://www.hashicorp.com/blog/terraform-1-9-enhances-input-variable-validations
required_version = ">= 1.9.0"
-
- #
- # Developer tips:
- # - If your module requires any terraform providers, add them the "required_providers" section below.
- # - Each required provider's version should be a flexible range to future proof the module's usage with upcoming minor and patch versions.
- #
-
required_providers {
+ # Use a range in modules
ibm = {
- source = "IBM-Cloud/ibm"
- version = ">= 1.71.2, < 2.0.0"
+ source = "ibm-cloud/ibm"
+ version = ">= 1.59.0, < 2.0.0"
+ }
+ # kubernetes = {
+ # source = "hashicorp/kubernetes"
+ # version = ">= 2.16.1, < 3.0.0"
+ # }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 3.0.0"
+ }
+ time = {
+ source = "hashicorp/time"
+ version = ">= 0.9.1, < 1.0.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1, < 4.0.0"
}
}
}