-
Notifications
You must be signed in to change notification settings - Fork 1k
Description
I get inconsistent final plan with kbernetes_namespace_v1, when i rerun after the error it works fine. I cant provide the debug logs since it contains secrets and ip's.
Terraform Version, Provider Version and Kubernetes Version
jens@DESKTOP-AL5L8LM:~/MyProjects/wynq-cluster/infra$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
control-plane-1 Ready control-plane 6m24s v1.33.0
jens@DESKTOP-AL5L8LM:~/MyProjects/wynq-cluster/infra$ tofu version
OpenTofu v1.11.3
on linux_amd64
+ provider registry.opentofu.org/alekc/kubectl v2.1.3
+ provider registry.opentofu.org/carlpett/sops v1.3.0
+ provider registry.opentofu.org/hashicorp/helm v3.1.1
+ provider registry.opentofu.org/hashicorp/http v3.5.0
+ provider registry.opentofu.org/hashicorp/kubernetes v3.0.1
+ provider registry.opentofu.org/hashicorp/local v2.5.3
+ provider registry.opentofu.org/hashicorp/tls v4.1.0
+ provider registry.opentofu.org/hetznercloud/hcloud v1.59.0
+ provider registry.opentofu.org/siderolabs/talos v0.10.1
Affected Resource(s)
- kubernetes_namespace_v1
Terraform Configuration Files
# Cluster
module "talos" {
source = "../../terraform-hcloud-talos"
#version = "v2.24.0"
talos_version = "v1.12.0"
kubernetes_version = "1.33.0"
cilium_version = "1.18.5"
hcloud_token = var.hcloud_token
firewall_use_current_ip = true
cluster_name = "wynq.eu"
datacenter_name = "hel1-dc2"
control_plane_count = 1
control_plane_server_type = "cax11"
# Add worker nodes for scaling
worker_count = var.worker_count
worker_server_type = var.worker_server_type
enable_floating_ip = true
}
locals {
decoded_kubeconfig = yamldecode(module.talos.kubeconfig)
cluster = local.decoded_kubeconfig["clusters"][0]["cluster"]
user = local.decoded_kubeconfig["users"][0]["user"]
}
data "http" "talos_health" {
count = var.control_plane_count > 0 ? 1 : 0
url = "https://${module.talos.public_ipv4_list[0]}:6443/version"
insecure = true
retry {
attempts = 60
min_delay_ms = 5000
max_delay_ms = 5000
}
depends_on = [module.talos]
}
# Argocd
resource "kubernetes_namespace_v1" "argocd" {
metadata {
name = "argocd"
}
depends_on = [data.http.talos_health]
}
resource "kubernetes_secret_v1" "sops_age_key" {
metadata {
name = "sops-age-key"
namespace = "argocd"
}
data = {
"keys.txt" = var.age_secret_key
}
depends_on = [kubernetes_namespace_v1.argocd]
}
resource "helm_release" "argocd" {
name = "argocd"
repository = "https://argoproj.github.io/argo-helm"
chart = "argo-cd"
namespace = "argocd"
create_namespace = false
version = "9.3.4"
values = [
templatefile("argocd-values.yaml", {
github_token = var.github_token
})
]
depends_on = [kubernetes_secret_v1.sops_age_key, data.http.talos_health, kubernetes_namespace_v1.argocd]
}
/*resource "kubernetes_manifest" "root_app" {
manifest = {
apiVersion = "argoproj.io/v1alpha1"
kind = "Application"
metadata = {
name = "root-app"
namespace = "argocd"
}
spec = {
project = "default"
source = {
repoURL = "https://github.com/JensvandeWiel/wynq-cluster"
path = "apps"
targetRevision = "HEAD"
}
destination = {
server = "https://kubernetes.default.svc"
namespace = "default"
}
syncPolicy = {
automated = {
prune = true
selfHeal = true
}
}
}
}
depends_on = [helm_release.argocd, kubernetes_namespace_v1.argocd]
}
*/
# Terraform
terraform {
required_providers {
kubectl = {
source = "alekc/kubectl"
version = ">= 2.1.3"
}
helm = {
source = "hashicorp/helm"
version = ">= 3.1.1"
}
sops = {
source = "carlpett/sops"
version = ">= 1.3.0"
}
local = {
source = "hashicorp/local"
version = "2.5.3"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 3.0.1"
}
}
}
provider "helm" {
kubernetes = {
load_config_file = false
host = local.cluster["server"]
cluster_ca_certificate = base64decode(
local.cluster["certificate-authority-data"],
)
client_certificate = base64decode(
local.user["client-certificate-data"],
)
client_key = base64decode(
local.user["client-key-data"],
)
}
}
provider "kubectl" {
host = local.cluster["server"]
cluster_ca_certificate = base64decode(
local.cluster["certificate-authority-data"],
)
client_certificate = base64decode(
local.user["client-certificate-data"],
)
client_key = base64decode(
local.user["client-key-data"],
)
load_config_file = false
}
provider "kubernetes" {
host = local.cluster["server"]
cluster_ca_certificate = base64decode(
local.cluster["certificate-authority-data"],
)
client_certificate = base64decode(
local.user["client-certificate-data"],
)
client_key = base64decode(
local.user["client-key-data"],
)
}
Debug Output
When I rerun it it works fine so, i needed to start from base:
https://gist.github.com/JensvandeWiel/66841718f25694ee54b05bbfc7fc5bcb
Steps to Reproduce
- tofu/terraform apply
Optional:
2. tofu/terraform apply (to watch it complete correctly)
Expected Behavior
The namespace is created, and the provider does not crash due to invalid IDs or runtime fields added by the cluster.
Actual Behavior
The provider crashes on fields that should be expected.
Important Factoids
Im using a fork of the terraform-hcloud-talos module which can be found at: https://github.com/JensvandeWiel/terraform-hcloud-talos/tree/fix/helm_template-invalid-object which is use because of: hcloud-talos/terraform-hcloud-talos#383
Community Note
- Please vote on this issue by adding a 👍 reaction to the original issue to help the community and maintainers prioritize this request
- If you are interested in working on this issue or have submitted a pull request, please leave a comment