-
Notifications
You must be signed in to change notification settings - Fork 15
AKS Improvements #36
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
AKS Improvements #36
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,12 +20,12 @@ resource "azurerm_kubernetes_cluster" "aks" { | |
| #checkov:skip=CKV_AZURE_4: "Ensure AKS logging to Azure Monitoring is Configured" | ||
| #checkov:skip=CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources" | ||
|
|
||
| name = var.aks_cluster_name | ||
| name = local.cluster_name | ||
| location = azurerm_resource_group.rg.location | ||
| resource_group_name = azurerm_resource_group.rg.name | ||
|
|
||
| # lets kubectl talk to the API over the public FQDN | ||
| dns_prefix = "${var.aks_cluster_name}-dns" | ||
| dns_prefix = "${local.cluster_name}-dns" | ||
|
|
||
| # workload identity federation | ||
| oidc_issuer_enabled = true # publishes an OIDC issuer URL | ||
|
|
@@ -36,16 +36,19 @@ resource "azurerm_kubernetes_cluster" "aks" { | |
| ######################################################################### | ||
| default_node_pool { | ||
| name = "sys" | ||
| vm_size = "Standard_D2s_v5" | ||
| vm_size = "Standard_D4s_v5" | ||
| vnet_subnet_id = azurerm_subnet.nodes.id | ||
| os_disk_size_gb = 64 | ||
| type = "VirtualMachineScaleSets" | ||
|
|
||
| # autoscaler | ||
| # autoscaler tuned for resilient system services | ||
| auto_scaling_enabled = true | ||
| min_count = 1 | ||
| max_count = 3 | ||
| min_count = 3 | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why change the min_count to 3? We should keep it as minimal as possible. |
||
| max_count = 5 | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why change it to 5? |
||
|
|
||
| upgrade_settings { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you remove this
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. with this one, if we don't provide it, I think the AKS rejects it - as in it has to have upgrade_settings.. |
||
| max_surge = "33%" | ||
| } | ||
| } | ||
|
|
||
| ######################################################################### | ||
|
|
@@ -63,107 +66,45 @@ resource "azurerm_kubernetes_cluster" "aks" { | |
| tags = var.tags | ||
| } | ||
|
|
||
| ############################################################################### | ||
| # CPU NODE POOL (Standard_D16s_v5) OnDemand | ||
| ############################################################################### | ||
| resource "azurerm_kubernetes_cluster_node_pool" "ondemand_cpu" { | ||
|
|
||
| #checkov:skip=CKV_AZURE_168: "Ensure Azure Kubernetes Cluster (AKS) nodes should use a minimum number of 50 pods" | ||
| #checkov:skip=CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources" | ||
|
|
||
| name = "cpu16" | ||
| kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id | ||
|
|
||
| vm_size = "Standard_D16s_v5" | ||
| mode = "User" | ||
| vnet_subnet_id = azurerm_subnet.nodes.id | ||
|
|
||
| auto_scaling_enabled = true | ||
| min_count = 0 | ||
| max_count = 10 | ||
|
|
||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=ON_DEMAND:NoSchedule" | ||
| ] | ||
|
|
||
| tags = var.tags | ||
| } | ||
|
|
||
| ############################################################################### | ||
| # CPU NODE POOL (Standard_D16s_v5) Spot | ||
| ############################################################################### | ||
| resource "azurerm_kubernetes_cluster_node_pool" "spot_cpu" { | ||
|
|
||
| #checkov:skip=CKV_AZURE_168: "Ensure Azure Kubernetes Cluster (AKS) nodes should use a minimum number of 50 pods" | ||
| #checkov:skip=CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources" | ||
|
|
||
| name = "cpu16spot" | ||
| kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id | ||
|
|
||
| vm_size = "Standard_D16s_v5" | ||
| mode = "User" | ||
| vnet_subnet_id = azurerm_subnet.nodes.id | ||
|
|
||
| auto_scaling_enabled = true | ||
| min_count = 0 | ||
| max_count = 10 | ||
|
|
||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=SPOT:NoSchedule" | ||
| ] | ||
|
|
||
| priority = "Spot" | ||
| eviction_policy = "Delete" | ||
|
|
||
| tags = var.tags | ||
| } | ||
|
|
||
| # USER NODE POOLS (CPU) | ||
| # Opinionated CPU node pools exposed to Anyscale users | ||
| locals { | ||
| gpu_pool_configs = { | ||
| T4 = { | ||
| name = "gput4" | ||
| vm_size = "Standard_NC16as_T4_v3" | ||
| product_name = "NVIDIA-T4" | ||
| gpu_count = "1" | ||
| user_node_pools = { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Where are those GPU node pools? They are necessary.
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will add them back on once I can test them, currently don't have GPU quota so couldn't test (WIP) |
||
| cpu8 = { | ||
| name = "cpu8" | ||
| vm_size = "Standard_D8s_v5" | ||
| min_count = 0 | ||
| max_count = 10 | ||
| node_labels = { | ||
| "node.anyscale.com/capacity-type" = "ON_DEMAND" | ||
| "nodepool.anyscale.com/name" = "cpu8" | ||
| } | ||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=ON_DEMAND:NoSchedule" | ||
| ] | ||
| } | ||
| A10 = { | ||
| name = "gpua10" | ||
| vm_size = "Standard_NV36ads_A10_v5" | ||
| product_name = "NVIDIA-A10" | ||
| gpu_count = "1" | ||
| cpu16 = { | ||
| name = "cpu16" | ||
| vm_size = "Standard_D16s_v5" | ||
| min_count = 0 | ||
| max_count = 10 | ||
| node_labels = { | ||
| "node.anyscale.com/capacity-type" = "ON_DEMAND" | ||
| "nodepool.anyscale.com/name" = "cpu16" | ||
| } | ||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=ON_DEMAND:NoSchedule" | ||
| ] | ||
| } | ||
| A100 = { | ||
| name = "gpua100" | ||
| vm_size = "Standard_NC24ads_A100_v4" | ||
| product_name = "NVIDIA-A100" | ||
| gpu_count = "1" | ||
| } | ||
| H100 = { | ||
| name = "gpuh100x8" | ||
| vm_size = "Standard_ND96isr_H100_v5" | ||
| product_name = "NVIDIA-H100" | ||
| gpu_count = "8" | ||
| } | ||
| } | ||
|
|
||
| # keep only the types the caller asked for | ||
| selected_gpu_pools = { | ||
| for k, v in local.gpu_pool_configs : | ||
| k => v if contains(var.node_group_gpu_types, k) | ||
| } | ||
| } | ||
|
|
||
| ############################################################################### | ||
| # GPU Node POOL (Standard_NC16as_T4_v3) OnDemand | ||
| ############################################################################### | ||
| resource "azurerm_kubernetes_cluster_node_pool" "user" { | ||
|
|
||
| #trivy:ignore:avd-azu-0168 | ||
| #trivy:ignore:avd-azu-0227 | ||
| resource "azurerm_kubernetes_cluster_node_pool" "gpu_ondemand" { | ||
| #checkov:skip=CKV_AZURE_168 | ||
| #checkov:skip=CKV_AZURE_227 | ||
| #checkov:skip=CKV_AZURE_168: "Ensure Azure Kubernetes Cluster (AKS) nodes should use a minimum number of 50 pods" | ||
| #checkov:skip=CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources" | ||
|
|
||
| for_each = local.selected_gpu_pools | ||
| for_each = local.user_node_pools | ||
|
|
||
| name = each.value.name | ||
| kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id | ||
|
|
@@ -172,74 +113,27 @@ resource "azurerm_kubernetes_cluster_node_pool" "gpu_ondemand" { | |
| mode = "User" | ||
| vnet_subnet_id = azurerm_subnet.nodes.id | ||
|
|
||
| # ── autoscaling (shared across all pools) ─────────────────────────────────── | ||
| auto_scaling_enabled = true | ||
| min_count = 0 | ||
| max_count = 10 | ||
|
|
||
| upgrade_settings { max_surge = "1" } | ||
|
|
||
| # ── labels & taints ──────────────────────────────────────────────────────── | ||
| node_labels = { | ||
| "nvidia.com/gpu.product" = each.value.product_name | ||
| "nvidia.com/gpu.count" = each.value.gpu_count | ||
| } | ||
|
|
||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=ON_DEMAND:NoSchedule", | ||
| "nvidia.com/gpu=present:NoSchedule", | ||
| "node.anyscale.com/accelerator-type=GPU:NoSchedule", | ||
| ] | ||
| min_count = each.value.min_count | ||
| max_count = each.value.max_count | ||
|
|
||
| tags = var.tags | ||
| } | ||
|
|
||
| ############################################################################### | ||
| # GPU Node POOL (Standard_NC16as_T4_v3) Spot | ||
| ############################################################################### | ||
| #trivy:ignore:avd-azu-0168 | ||
| #trivy:ignore:avd-azu-0227 | ||
| resource "azurerm_kubernetes_cluster_node_pool" "gpu_spot" { | ||
| #checkov:skip=CKV_AZURE_168 | ||
| #checkov:skip=CKV_AZURE_227 | ||
|
|
||
| for_each = local.selected_gpu_pools | ||
|
|
||
| name = "${each.value.name}spot" | ||
| kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id | ||
| node_taints = each.value.node_taints | ||
| node_labels = merge(each.value.node_labels, { | ||
| "nodepool.anyscale.com/type" = "cpu" | ||
| }) | ||
|
|
||
| vm_size = each.value.vm_size | ||
| mode = "User" | ||
| vnet_subnet_id = azurerm_subnet.nodes.id | ||
|
|
||
| # ── autoscaling (shared across all pools) ─────────────────────────────────── | ||
| auto_scaling_enabled = true | ||
| min_count = 0 | ||
| max_count = 10 | ||
|
|
||
| # ── labels & taints ──────────────────────────────────────────────────────── | ||
| node_labels = { | ||
| "nvidia.com/gpu.product" = each.value.product_name | ||
| "nvidia.com/gpu.count" = each.value.gpu_count | ||
| upgrade_settings { | ||
| max_surge = "1" | ||
| } | ||
|
|
||
| node_taints = [ | ||
| "node.anyscale.com/capacity-type=ON_DEMAND:NoSchedule", | ||
| "nvidia.com/gpu=present:NoSchedule", | ||
| "node.anyscale.com/accelerator-type=GPU:NoSchedule", | ||
| ] | ||
|
|
||
| priority = "Spot" | ||
| eviction_policy = "Delete" | ||
|
|
||
| tags = var.tags | ||
| } | ||
|
|
||
| ############################################################################## | ||
| # MANAGED IDENTITY FOR ANYSCALE OPERATOR | ||
| ############################################################################### | ||
| resource "azurerm_user_assigned_identity" "anyscale_operator" { | ||
| name = "${var.aks_cluster_name}-anyscale-operator-mi" | ||
| name = "${local.cluster_name}-anyscale-operator-mi" | ||
| location = azurerm_resource_group.rg.location | ||
| resource_group_name = azurerm_resource_group.rg.name | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,13 +1,25 @@ | ||
| resource "random_string" "storage_suffix" { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you revert this change? We prefer not to have any randomness here. Otherwise debugging issues would be harder. |
||
| length = 6 | ||
| upper = false | ||
| lower = true | ||
| numeric = true | ||
| special = false | ||
| } | ||
|
|
||
| locals { | ||
| vnet_cidr = "10.42.0.0/16" | ||
| nodes_subnet_cidr = "10.42.1.0/24" | ||
| vnet_cidr = "10.42.0.0/16" | ||
| nodes_subnet_cidr = "10.42.1.0/24" | ||
| cluster_name = var.aks_cluster_name | ||
| cluster_name_sanitized = join("", regexall("[a-z0-9]", lower(local.cluster_name))) | ||
| storage_account_name = substr("${local.cluster_name_sanitized}${random_string.storage_suffix.result}", 0, 24) | ||
| storage_container_name = "${local.cluster_name}-blob" | ||
| } | ||
|
|
||
| ############################################ | ||
| # resource group | ||
| ############################################ | ||
| resource "azurerm_resource_group" "rg" { | ||
| name = "${var.aks_cluster_name}-rg" | ||
| name = "${local.cluster_name}-rg" | ||
| location = var.azure_location | ||
| tags = var.tags | ||
| } | ||
|
|
@@ -30,7 +42,7 @@ resource "azurerm_storage_account" "sa" { | |
| #checkov:skip=CKV2_AZURE_21: "Ensure Storage logging is enabled for Blob service for read requests" | ||
| #checkov:skip=CKV2_AZURE_31: "Ensure VNET subnet is configured with a Network Security Group (NSG)" | ||
|
|
||
| name = replace("${var.aks_cluster_name}sa", "-", "") # demo-aks --> demoakssa | ||
| name = local.storage_account_name | ||
| resource_group_name = azurerm_resource_group.rg.name | ||
| location = azurerm_resource_group.rg.location | ||
| account_tier = "Standard" | ||
|
|
@@ -46,7 +58,7 @@ resource "azurerm_storage_container" "blob" { | |
|
|
||
| #checkov:skip=CKV2_AZURE_21: "Ensure Storage logging is enabled for Blob service for read requests" | ||
|
|
||
| name = "${var.aks_cluster_name}-blob" | ||
| name = local.storage_container_name | ||
| storage_account_id = azurerm_storage_account.sa.id | ||
| container_access_type = "private" # blobs are private but reachable via the public endpoint | ||
| } | ||
|
|
@@ -55,7 +67,7 @@ resource "azurerm_storage_container" "blob" { | |
| # networking (vnet and subnet) | ||
| ############################################ | ||
| resource "azurerm_virtual_network" "vnet" { | ||
| name = "${var.aks_cluster_name}-vnet" | ||
| name = "${local.cluster_name}-vnet" | ||
| location = azurerm_resource_group.rg.location | ||
| resource_group_name = azurerm_resource_group.rg.name | ||
| address_space = [local.vnet_cidr] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you keep the original name unchanged?