add half done self managed version

This commit is contained in:
2024-03-03 12:39:01 +01:00
parent 42fc446cc7
commit 49b182878b
22 changed files with 1951 additions and 0 deletions

40
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Terraform CI
on:
push:
branches:
- main
tags:
- v*
pull_request:
branches:
- main
jobs:
validate:
name: Validate
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./example
steps:
- name: Check out code
uses: actions/checkout@v1
- name: Run a Terraform init
uses: docker://hashicorp/terraform:light
with:
entrypoint: terraform
args: init
- name: Run a Terraform validate
uses: docker://hashicorp/terraform:light
with:
entrypoint: terraform
args: validate
- name: Run a Terraform fmt
uses: docker://hashicorp/terraform:light
with:
entrypoint: terraform
args: fmt --recursive -check=true --diff ../

2
.gitignore vendored
View File

@@ -33,3 +33,5 @@ override.tf.json
# Ignore CLI configuration files
.terraformrc
terraform.rc
*tf.plan
*.terraform.lock.hcl

0
self-managed/README.md Normal file
View File

85
self-managed/data.tf Normal file
View File

@@ -0,0 +1,85 @@
resource "random_password" "k3s_token" {
length = 55
special = false
}
data "cloudinit_config" "k3s_server_tpl" {
gzip = true
base64_encode = true
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/files/k3s-install-server.sh", {
k3s_version = var.k3s_version,
k3s_subnet = var.k3s_subnet,
k3s_token = random_password.k3s_token.result,
is_k3s_server = true,
disable_ingress = var.disable_ingress,
ingress_controller = var.ingress_controller,
nginx_ingress_release = var.nginx_ingress_release,
istio_release = var.istio_release,
install_certmanager = var.install_certmanager,
certmanager_release = var.certmanager_release,
certmanager_email_address = var.certmanager_email_address,
compartment_ocid = var.compartment_ocid,
availability_domain = var.availability_domain,
k3s_url = oci_load_balancer_load_balancer.k3s_load_balancer.ip_address_details[0].ip_address,
k3s_tls_san = oci_load_balancer_load_balancer.k3s_load_balancer.ip_address_details[0].ip_address,
expose_kubeapi = var.expose_kubeapi,
k3s_tls_san_public = local.public_lb_ip[0],
argocd_image_updater_release = var.argocd_image_updater_release,
install_argocd_image_updater = var.install_argocd_image_updater,
install_argocd = var.install_argocd,
argocd_release = var.argocd_release,
install_longhorn = var.install_longhorn,
longhorn_release = var.longhorn_release,
ingress_controller_http_nodeport = var.ingress_controller_http_nodeport,
ingress_controller_https_nodeport = var.ingress_controller_https_nodeport,
})
}
}
data "cloudinit_config" "k3s_worker_tpl" {
gzip = true
base64_encode = true
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/files/k3s-install-agent.sh", {
k3s_version = var.k3s_version,
k3s_subnet = var.k3s_subnet,
k3s_token = random_password.k3s_token.result,
is_k3s_server = false,
disable_ingress = var.disable_ingress,
k3s_url = oci_load_balancer_load_balancer.k3s_load_balancer.ip_address_details[0].ip_address,
http_lb_port = var.http_lb_port,
install_longhorn = var.install_longhorn,
https_lb_port = var.https_lb_port,
ingress_controller_http_nodeport = var.ingress_controller_http_nodeport,
ingress_controller_https_nodeport = var.ingress_controller_https_nodeport,
})
}
}
data "oci_core_instance_pool_instances" "k3s_workers_instances" {
compartment_id = var.compartment_ocid
instance_pool_id = oci_core_instance_pool.k3s_workers.id
}
data "oci_core_instance" "k3s_workers_instances_ips" {
count = var.k3s_worker_pool_size
instance_id = data.oci_core_instance_pool_instances.k3s_workers_instances.instances[count.index].id
}
data "oci_core_instance_pool_instances" "k3s_servers_instances" {
depends_on = [
oci_core_instance_pool.k3s_servers,
]
compartment_id = var.compartment_ocid
instance_pool_id = oci_core_instance_pool.k3s_servers.id
}
data "oci_core_instance" "k3s_servers_instances_ips" {
count = var.k3s_server_pool_size
instance_id = data.oci_core_instance_pool_instances.k3s_servers_instances.instances[count.index].id
}

View File

@@ -0,0 +1,66 @@
variable "compartment_ocid" {}
variable "tenancy_ocid" {}
variable "user_ocid" {}
variable "fingerprint" {}
variable "private_key_path" {}
variable "availability_domain" {}
variable "my_public_ip_cidr" {}
variable "cluster_name" {}
variable "agent_os_image_id" {}
variable "server_os_image_id" {}
variable "certmanager_email_address" {}
variable "region" {}
variable "public_key_path" {}
variable "k3s_server_pool_size" {
default = 2
}
variable "k3s_worker_pool_size" {
default = 2
}
variable "k3s_extra_worker_node" {
default = false
}
variable "expose_kubeapi" {
default = false
}
variable "environment" {
default = "prod"
}
module "k3s_cluster" {
# k3s_version = "v1.23.8+k3s2" # Fix kubectl exec failure
# k3s_version = "v1.24.4+k3s1" # Kubernetes version compatible with longhorn
region = var.region
availability_domain = var.availability_domain
tenancy_ocid = var.tenancy_ocid
compartment_ocid = var.compartment_ocid
my_public_ip_cidr = var.my_public_ip_cidr
cluster_name = var.cluster_name
environment = var.environment
agent_os_image_id = var.agent_os_image_id
server_os_image_id = var.server_os_image_id
certmanager_email_address = var.certmanager_email_address
certmanager_release = "v1.13.3"
k3s_server_pool_size = var.k3s_server_pool_size
k3s_worker_pool_size = var.k3s_worker_pool_size
k3s_extra_worker_node = var.k3s_extra_worker_node
expose_kubeapi = var.expose_kubeapi
public_key_path = var.public_key_path
install_longhorn = false
# fault_domains = [ "FAULT-DOMAIN-3" ]
ingress_controller = "traefik2"
source = "../"
}
output "k3s_servers_ips" {
value = module.k3s_cluster.k3s_servers_ips
}
output "k3s_workers_ips" {
value = module.k3s_cluster.k3s_workers_ips
}
output "public_lb_ip" {
value = module.k3s_cluster.public_lb_ip
}

View File

@@ -0,0 +1,17 @@
terraform {
required_providers {
oci = {
source = "oracle/oci"
version = ">= 4.64.0"
}
}
}
provider "oci" {
tenancy_ocid = var.tenancy_ocid
user_ocid = var.user_ocid
private_key_path = pathexpand(var.private_key_path)
fingerprint = var.fingerprint
region = var.region
retry_duration_seconds = 120
}

View File

@@ -0,0 +1,267 @@
#!/bin/bash
check_os() {
name=$(cat /etc/os-release | grep ^NAME= | sed 's/"//g')
clean_name=$${name#*=}
version=$(cat /etc/os-release | grep ^VERSION_ID= | sed 's/"//g')
clean_version=$${version#*=}
major=$${clean_version%.*}
minor=$${clean_version#*.}
if [[ "$clean_name" == "Ubuntu" ]]; then
operating_system="ubuntu"
elif [[ "$clean_name" == "Oracle Linux Server" ]]; then
operating_system="oraclelinux"
else
operating_system="undef"
fi
echo "K3S install process running on: "
echo "OS: $operating_system"
echo "OS Major Release: $major"
echo "OS Minor Release: $minor"
}
install_oci_cli_ubuntu(){
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y python3 python3-pip nginx
systemctl enable nginx
pip install oci-cli
}
install_oci_cli_oracle(){
if [[ $major -eq 9 ]]; then
dnf -y install oraclelinux-developer-release-el9
dnf -y install python39-oci-cli python3-jinja2 nginx-all-modules
else
dnf -y install oraclelinux-developer-release-el8
dnf -y module enable nginx:1.20 python36:3.6
dnf -y install python36-oci-cli python3-jinja2 nginx-all-modules
fi
}
wait_lb() {
while [ true ]
do
curl --output /dev/null --silent -k https://${k3s_url}:6443
if [[ "$?" -eq 0 ]]; then
break
fi
sleep 5
echo "wait for LB"
done
}
check_os
if [[ "$operating_system" == "ubuntu" ]]; then
# Disable firewall
/usr/sbin/netfilter-persistent stop
/usr/sbin/netfilter-persistent flush
systemctl stop netfilter-persistent.service
systemctl disable netfilter-persistent.service
# END Disable firewall
apt-get update
apt-get install -y software-properties-common jq
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
%{ if ! disable_ingress }
install_oci_cli_ubuntu
%{ endif }
# Fix /var/log/journal dir size
echo "SystemMaxUse=100M" >> /etc/systemd/journald.conf
echo "SystemMaxFileSize=100M" >> /etc/systemd/journald.conf
systemctl restart systemd-journald
fi
if [[ "$operating_system" == "oraclelinux" ]]; then
# Disable firewall
systemctl disable --now firewalld
# END Disable firewall
# Fix iptables/SELinux bug
echo '(allow iptables_t cgroup_t (dir (ioctl)))' > /root/local_iptables.cil
semodule -i /root/local_iptables.cil
dnf -y update
dnf -y install jq curl
%{ if ! disable_ingress }
install_oci_cli_oracle
%{ endif }
# Nginx Selinux Fix
setsebool httpd_can_network_connect on -P
fi
k3s_install_params=()
%{ if k3s_subnet != "default_route_table" }
local_ip=$(ip -4 route ls ${k3s_subnet} | grep -Po '(?<=src )(\S+)')
flannel_iface=$(ip -4 route ls ${k3s_subnet} | grep -Po '(?<=dev )(\S+)')
k3s_install_params+=("--node-ip $local_ip")
k3s_install_params+=("--flannel-iface $flannel_iface")
%{ endif }
if [[ "$operating_system" == "oraclelinux" ]]; then
k3s_install_params+=("--selinux")
fi
INSTALL_PARAMS="$${k3s_install_params[*]}"
%{ if k3s_version == "latest" }
K3S_VERSION=$(curl --silent https://api.github.com/repos/k3s-io/k3s/releases/latest | jq -r '.name')
%{ else }
K3S_VERSION="${k3s_version}"
%{ endif }
wait_lb
until (curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=$K3S_VERSION K3S_TOKEN=${k3s_token} K3S_URL=https://${k3s_url}:6443 sh -s - $INSTALL_PARAMS); do
echo 'k3s did not install correctly'
sleep 2
done
proxy_protocol_stuff(){
cat << 'EOF' > /root/find_ips.sh
export OCI_CLI_AUTH=instance_principal
private_ips=()
# Fetch the OCID of all the running instances in OCI and store to an array
instance_ocids=$(oci search resource structured-search --query-text "QUERY instance resources where lifeCycleState='RUNNING'" --query 'data.items[*].identifier' --raw-output | jq -r '.[]' )
# Iterate through the array to fetch details of each instance one by one
for val in $${instance_ocids[@]} ; do
echo $val
# Get name of the instance
instance_name=$(oci compute instance get --instance-id $val --raw-output --query 'data."display-name"')
echo $instance_name
# Get Public Ip of the instance
public_ip=$(oci compute instance list-vnics --instance-id $val --raw-output --query 'data[0]."public-ip"')
echo $public_ip
private_ip=$(oci compute instance list-vnics --instance-id $val --raw-output --query 'data[0]."private-ip"')
echo $private_ip
private_ips+=($private_ip)
done
for i in "$${private_ips[@]}"
do
echo "$i" >> /tmp/private_ips
done
EOF
if [[ "$operating_system" == "ubuntu" ]]; then
NGINX_MODULE=/usr/lib/nginx/modules/ngx_stream_module.so
NGINX_USER=www-data
fi
if [[ "$operating_system" == "oraclelinux" ]]; then
NGINX_MODULE=/usr/lib64/nginx/modules/ngx_stream_module.so
NGINX_USER=nginx
fi
cat << EOD > /root/nginx-header.tpl
load_module $NGINX_MODULE;
user $NGINX_USER;
worker_processes auto;
pid /run/nginx.pid;
EOD
cat << 'EOF' > /root/nginx-footer.tpl
events {
worker_connections 768;
# multi_accept on;
}
stream {
upstream k3s-http {
{% for private_ip in private_ips -%}
server {{ private_ip }}:${ingress_controller_http_nodeport} max_fails=3 fail_timeout=10s;
{% endfor -%}
}
upstream k3s-https {
{% for private_ip in private_ips -%}
server {{ private_ip }}:${ingress_controller_https_nodeport} max_fails=3 fail_timeout=10s;
{% endfor -%}
}
log_format basic '$remote_addr [$time_local] '
'$protocol $status $bytes_sent $bytes_received '
'$session_time "$upstream_addr" '
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
access_log /var/log/nginx/k3s_access.log basic;
error_log /var/log/nginx/k3s_error.log;
proxy_protocol on;
server {
listen ${https_lb_port};
proxy_pass k3s-https;
proxy_next_upstream on;
}
server {
listen ${http_lb_port};
proxy_pass k3s-http;
proxy_next_upstream on;
}
}
EOF
cat /root/nginx-header.tpl /root/nginx-footer.tpl > /root/nginx.tpl
cat << 'EOF' > /root/render_nginx_config.py
from jinja2 import Template
import os
RAW_IP = open('/tmp/private_ips', 'r').readlines()
IPS = [i.replace('\n','') for i in RAW_IP]
nginx_config_template_path = '/root/nginx.tpl'
nginx_config_path = '/etc/nginx/nginx.conf'
with open(nginx_config_template_path, 'r') as handle:
nginx_config_template = handle.read()
new_nginx_config = Template(nginx_config_template).render(
private_ips = IPS
)
with open(nginx_config_path, 'w') as handle:
handle.write(new_nginx_config)
EOF
chmod +x /root/find_ips.sh
./root/find_ips.sh
python3 /root/render_nginx_config.py
nginx -t
systemctl restart nginx
}
%{ if ! disable_ingress }
proxy_protocol_stuff
%{ endif }
%{ if install_longhorn }
if [[ "$operating_system" == "ubuntu" ]]; then
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y open-iscsi curl util-linux
fi
systemctl enable --now iscsid.service
%{ endif }

View File

@@ -0,0 +1,430 @@
#!/bin/bash
check_os() {
name=$(cat /etc/os-release | grep ^NAME= | sed 's/"//g')
clean_name=$${name#*=}
version=$(cat /etc/os-release | grep ^VERSION_ID= | sed 's/"//g')
clean_version=$${version#*=}
major=$${clean_version%.*}
minor=$${clean_version#*.}
if [[ "$clean_name" == "Ubuntu" ]]; then
operating_system="ubuntu"
elif [[ "$clean_name" == "Oracle Linux Server" ]]; then
operating_system="oraclelinux"
else
operating_system="undef"
fi
echo "K3S install process running on: "
echo "OS: $operating_system"
echo "OS Major Release: $major"
echo "OS Minor Release: $minor"
}
wait_lb() {
while [ true ]
do
curl --output /dev/null --silent -k https://${k3s_url}:6443
if [[ "$?" -eq 0 ]]; then
break
fi
sleep 5
echo "wait for LB"
done
}
install_helm() {
curl -fsSL -o /root/get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 /root/get_helm.sh
/root/get_helm.sh
}
install_and_configure_traefik2() {
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
# Install Helm
install_helm
# Add traefik helm repo
kubectl create ns traefik
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
TRAEFIK_VALUES_FILE=/root/traefik2_values.yaml
render_traefik2_config
helm install --namespace=traefik -f $TRAEFIK_VALUES_FILE traefik traefik/traefik --kubeconfig /etc/rancher/k3s/k3s.yaml
helm repo add kubereboot https://kubereboot.github.io/charts
helm install kubereboot/kured --generate-name --kubeconfig /etc/rancher/k3s/k3s.yaml
}
render_traefik2_config() {
cat << 'EOF' > "$TRAEFIK_VALUES_FILE"
service:
enabled: true
type: NodePort
# Configure ports
ports:
# The name of this one can't be changed as it is used for the readiness and
# liveness probes, but you can adjust its config to your liking
traefik:
port: 9000
# Use hostPort if set.
# hostPort: 9000
#
# Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which
# means it's listening on all your interfaces and all your IPs. You may want
# to set this value if you need traefik to listen on specific interface
# only.
# hostIP: 192.168.100.10
# Override the liveness/readiness port. This is useful to integrate traefik
# with an external Load Balancer that performs healthchecks.
# Default: ports.traefik.port
# healthchecksPort: 9000
# Override the liveness/readiness scheme. Useful for getting ping to
# respond on websecure entryPoint.
# healthchecksScheme: HTTPS
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
# You SHOULD NOT expose the traefik port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
# The exposed port for this service
exposedPort: 9000
# The port protocol (TCP/UDP)
protocol: TCP
web:
port: 8000
# hostPort: 8000
expose: true
exposedPort: 80
# The port protocol (TCP/UDP)
protocol: TCP
# Use nodeport if set. This is useful if you have configured Traefik in a
# LoadBalancer
nodePort: ${ingress_controller_http_nodeport}
# Port Redirections
# Added in 2.2, you can make permanent redirects via entrypoints.
# https://docs.traefik.io/routing/entrypoints/#redirection
# redirectTo: websecure
#
# Trust forwarded headers information (X-Forwarded-*).
# forwardedHeaders:
# trustedIPs: []
# insecure: false
#
# Enable the Proxy Protocol header parsing for the entry point
proxyProtocol:
trustedIPs:
- 0.0.0.0/0
- 127.0.0.1/32
insecure: false
websecure:
port: 8443
# hostPort: 8443
expose: true
exposedPort: 443
# The port protocol (TCP/UDP)
protocol: TCP
nodePort: ${ingress_controller_https_nodeport}
# Enable HTTP/3.
# Requires enabling experimental http3 feature and tls.
# Note that you cannot have a UDP entrypoint with the same port.
# http3: true
# Set TLS at the entrypoint
# https://doc.traefik.io/traefik/routing/entrypoints/#tls
tls:
enabled: true
# this is the name of a TLSOption definition
options: ""
certResolver: ""
domains: []
# - main: example.com
# sans:
# - foo.example.com
# - bar.example.com
#
# Trust forwarded headers information (X-Forwarded-*).
# forwardedHeaders:
# trustedIPs: []
# insecure: false
#
# Enable the Proxy Protocol header parsing for the entry point
proxyProtocol:
trustedIPs:
- 0.0.0.0/0
- 127.0.0.1/32
insecure: false
#
# One can apply Middlewares on an entrypoint
# https://doc.traefik.io/traefik/middlewares/overview/
# https://doc.traefik.io/traefik/routing/entrypoints/#middlewares
# /!\ It introduces here a link between your static configuration and your dynamic configuration /!\
# It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace
# middlewares:
# - namespace-name1@kubernetescrd
# - namespace-name2@kubernetescrd
middlewares: []
metrics:
# When using hostNetwork, use another port to avoid conflict with node exporter:
# https://github.com/prometheus/prometheus/wiki/Default-port-allocations
port: 9100
# hostPort: 9100
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
# You may not want to expose the metrics port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
# The exposed port for this service
exposedPort: 9100
# The port protocol (TCP/UDP)
protocol: TCP
EOF
}
install_ingress(){
INGRESS_CONTROLLER=$1
elif [[ "$INGRESS_CONTROLLER" == "traefik2" ]]; then
install_and_configure_traefik2
else
echo "Ingress controller not supported"
fi
}
render_staging_issuer(){
STAGING_ISSUER_RESOURCE=$1
cat << 'EOF' > "$STAGING_ISSUER_RESOURCE"
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
namespace: cert-manager
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: ${certmanager_email_address}
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
class: nginx
EOF
}
render_prod_issuer(){
PROD_ISSUER_RESOURCE=$1
cat << 'EOF' > "$PROD_ISSUER_RESOURCE"
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: cert-manager
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: ${certmanager_email_address}
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
# Enable the HTTP-01 challenge provider
solvers:
- http01:
ingress:
class: nginx
EOF
}
check_os
if [[ "$operating_system" == "ubuntu" ]]; then
echo "Canonical Ubuntu"
# Disable firewall
/usr/sbin/netfilter-persistent stop
/usr/sbin/netfilter-persistent flush
systemctl stop netfilter-persistent.service
systemctl disable netfilter-persistent.service
# END Disable firewall
apt-get update
apt-get install -y software-properties-common jq git
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y python3 python3-pip
pip install oci-cli
# Fix /var/log/journal dir size
echo "SystemMaxUse=100M" >> /etc/systemd/journald.conf
echo "SystemMaxFileSize=100M" >> /etc/systemd/journald.conf
systemctl restart systemd-journald
fi
if [[ "$operating_system" == "oraclelinux" ]]; then
echo "Oracle Linux"
# Disable firewall
systemctl disable --now firewalld
# END Disable firewall
# Fix iptables/SELinux bug
echo '(allow iptables_t cgroup_t (dir (ioctl)))' > /root/local_iptables.cil
semodule -i /root/local_iptables.cil
dnf -y update
if [[ $major -eq 9 ]]; then
dnf -y install oraclelinux-developer-release-el9
dnf -y install jq python39-oci-cli curl git
else
dnf -y install oraclelinux-developer-release-el8
dnf -y module enable python36:3.6
dnf -y install jq python36-oci-cli curl git
fi
fi
apt install vim -y
cd ~
dd if=/dev/zero of=/swapfile bs=1024 count=4000000
chmod 0600 /swapfile
mkswap /swapfile
echo '/swapfile swap swap defaults 0 0' >> /etc/fstab
systemctl daemon-reload
swapon /swapfile
export OCI_CLI_AUTH=instance_principal
first_instance=$(oci compute instance list --compartment-id ${compartment_ocid} --availability-domain ${availability_domain} --lifecycle-state RUNNING --sort-by TIMECREATED | jq -r '.data[]|select(."display-name" | endswith("k3s-servers")) | .["display-name"]' | tail -n 1)
instance_id=$(curl -s -H "Authorization: Bearer Oracle" -L http://169.254.169.254/opc/v2/instance | jq -r '.displayName')
k3s_install_params=("--tls-san ${k3s_tls_san}")
%{ if k3s_subnet != "default_route_table" }
local_ip=$(ip -4 route ls ${k3s_subnet} | grep -Po '(?<=src )(\S+)')
flannel_iface=$(ip -4 route ls ${k3s_subnet} | grep -Po '(?<=dev )(\S+)')
k3s_install_params+=("--node-ip $local_ip")
k3s_install_params+=("--advertise-address $local_ip")
k3s_install_params+=("--flannel-iface $flannel_iface")
%{ endif }
%{ if disable_ingress }
k3s_install_params+=("--disable traefik")
%{ endif }
%{ if ! disable_ingress }
%{ if ingress_controller != "default" }
k3s_install_params+=("--disable traefik")
%{ endif }
%{ endif }
%{ if expose_kubeapi }
k3s_install_params+=("--tls-san ${k3s_tls_san_public}")
%{ endif }
if [[ "$operating_system" == "oraclelinux" ]]; then
k3s_install_params+=("--selinux")
fi
INSTALL_PARAMS="$${k3s_install_params[*]}"
%{ if k3s_version == "latest" }
K3S_VERSION=$(curl --silent https://api.github.com/repos/k3s-io/k3s/releases/latest | jq -r '.name')
%{ else }
K3S_VERSION="${k3s_version}"
%{ endif }
if [[ "$first_instance" == "$instance_id" ]]; then
echo "I'm the first yeeee: Cluster init!"
until (curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=$K3S_VERSION K3S_TOKEN=${k3s_token} sh -s - --cluster-init $INSTALL_PARAMS); do
echo 'k3s did not install correctly'
sleep 2
done
else
echo ":( Cluster join"
wait_lb
until (curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=$K3S_VERSION K3S_TOKEN=${k3s_token} sh -s - --server https://${k3s_url}:6443 $INSTALL_PARAMS); do
echo 'k3s did not install correctly'
sleep 2
done
fi
%{ if is_k3s_server }
until kubectl get pods -A | grep 'Running'; do
echo 'Waiting for k3s startup'
sleep 5
done
%{ if install_longhorn }
if [[ "$first_instance" == "$instance_id" ]]; then
if [[ "$operating_system" == "ubuntu" ]]; then
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y open-iscsi curl util-linux
fi
systemctl enable --now iscsid.service
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/${longhorn_release}/deploy/longhorn.yaml --kubeconfig /etc/rancher/k3s/k3s.yaml
fi
%{ endif }
%{ if ! disable_ingress }
%{ if ingress_controller != "default" }
if [[ "$first_instance" == "$instance_id" ]]; then
install_ingress ${ingress_controller}
fi
%{ endif }
%{ endif }
%{ if install_certmanager }
if [[ "$first_instance" == "$instance_id" ]]; then
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${certmanager_release}/cert-manager.yaml --kubeconfig /etc/rancher/k3s/k3s.yaml
render_staging_issuer /root/staging_issuer.yaml
render_prod_issuer /root/prod_issuer.yaml
# Wait cert-manager to be ready
until kubectl get pods -n cert-manager | grep 'Running'; do
echo 'Waiting for cert-manager to be ready'
sleep 15
done
kubectl create -f /root/prod_issuer.yaml
kubectl create -f /root/staging_issuer.yaml
fi
%{ endif }
%{ if install_argocd }
if [[ "$first_instance" == "$instance_id" ]]; then
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/${argocd_release}/manifests/install.yaml --kubeconfig /etc/rancher/k3s/k3s.yaml
%{ if install_argocd_image_updater }
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-image-updater/${argocd_image_updater_release}/manifests/install.yaml --kubeconfig /etc/rancher/k3s/k3s.yaml
%{ endif }
fi
%{ endif }
if [[ "$first_instance" == "$instance_id" ]]; then
helm repo add kubereboot https://kubereboot.github.io/charts
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
helm install kubereboot/kured --generate-name --kubeconfig /etc/rancher/k3s/k3s.yaml
TRAEFIK_VALUES_FILE=/root/traefik2_values.yaml
helm install --namespace=traefik -f $TRAEFIK_VALUES_FILE traefik traefik/traefik --kubeconfig /etc/rancher/k3s/k3s.yaml
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v2.10.0-rc1/manifests/install.yaml
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-image-updater/${argocd_image_updater_release}/manifests/install.yaml --kubeconfig /etc/rancher/k3s/k3s.yaml
fi
%{ endif }
fi

28
self-managed/iam.tf Normal file
View File

@@ -0,0 +1,28 @@
resource "oci_identity_dynamic_group" "compute_dynamic_group" {
compartment_id = var.tenancy_ocid
description = "Dynamic group which contains all instance in this compartment"
matching_rule = "All {instance.compartment.id = '${var.compartment_ocid}'}"
name = var.oci_identity_dynamic_group_name
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_identity_policy" "compute_dynamic_group_policy" {
compartment_id = var.compartment_ocid
description = "Policy to allow dynamic group ${oci_identity_dynamic_group.compute_dynamic_group.name} to read instance-family and compute-management-family in the compartment"
name = var.oci_identity_policy_name
statements = [
"allow dynamic-group ${oci_identity_dynamic_group.compute_dynamic_group.name} to read instance-family in compartment id ${var.compartment_ocid}",
"allow dynamic-group ${oci_identity_dynamic_group.compute_dynamic_group.name} to read compute-management-family in compartment id ${var.compartment_ocid}"
]
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}

View File

@@ -0,0 +1,37 @@
resource "oci_core_instance_pool" "k3s_servers" {
depends_on = [
oci_identity_dynamic_group.compute_dynamic_group,
oci_identity_policy.compute_dynamic_group_policy
]
timeouts {
create = "11m"
update = "11m"
delete = "10m"
}
lifecycle {
create_before_destroy = true
ignore_changes = [load_balancers, freeform_tags, instance_configuration_id]
}
display_name = "k3s-servers"
compartment_id = var.compartment_ocid
instance_configuration_id = oci_core_instance_configuration.k3s_server_template.id
placement_configurations {
availability_domain = var.availability_domain
primary_subnet_id = oci_core_subnet.default_oci_core_subnet10.id
fault_domains = var.fault_domains
}
size = var.k3s_server_pool_size
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
"k3s-cluster-name" = "${var.cluster_name}"
"k3s-instance-type" = "k3s-server"
}
}

107
self-managed/k3s-workers.tf Normal file
View File

@@ -0,0 +1,107 @@
resource "oci_core_instance_pool" "k3s_workers" {
depends_on = [
oci_load_balancer_load_balancer.k3s_load_balancer,
]
lifecycle {
create_before_destroy = true
ignore_changes = [load_balancers, freeform_tags, instance_configuration_id]
}
timeouts {
create = "11m"
update = "11m"
delete = "10m"
}
display_name = "k3s-workers"
compartment_id = var.compartment_ocid
instance_configuration_id = oci_core_instance_configuration.k3s_worker_template.id
placement_configurations {
availability_domain = var.availability_domain
primary_subnet_id = oci_core_subnet.default_oci_core_subnet10.id
fault_domains = var.fault_domains
}
size = var.k3s_worker_pool_size
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
"k3s-cluster-name" = "${var.cluster_name}"
"k3s-instance-type" = "k3s-worker"
}
}
resource "oci_core_instance" "k3s_extra_worker_node" {
count = var.k3s_extra_worker_node ? 1 : 0
depends_on = [
oci_load_balancer_load_balancer.k3s_load_balancer,
oci_core_instance_pool.k3s_workers
]
timeouts {
create = "11m"
update = "11m"
delete = "10m"
}
compartment_id = var.compartment_ocid
availability_domain = var.availability_domain
display_name = "K3s extra worker node"
agent_config {
is_management_disabled = "false"
is_monitoring_disabled = "false"
plugins_config {
desired_state = "DISABLED"
name = "Vulnerability Scanning"
}
plugins_config {
desired_state = "ENABLED"
name = "Compute Instance Monitoring"
}
plugins_config {
desired_state = "DISABLED"
name = "Bastion"
}
}
shape = var.agent_compute_shape
shape_config {
memory_in_gbs = "6"
ocpus = "1"
}
source_details {
source_id = var.agent_os_image_id
source_type = "image"
}
create_vnic_details {
assign_private_dns_record = true
assign_public_ip = true
subnet_id = oci_core_subnet.default_oci_core_subnet10.id
nsg_ids = [oci_core_network_security_group.lb_to_instances_http.id]
hostname_label = "k3s-extra-worker-node"
}
metadata = {
"ssh_authorized_keys" = file(var.public_key_path)
"user_data" = data.cloudinit_config.k3s_worker_tpl.rendered
}
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
"k3s-cluster-name" = "${var.cluster_name}"
"k3s-instance-type" = "k3s-worker"
}
}

141
self-managed/k3slb.tf Normal file
View File

@@ -0,0 +1,141 @@
resource "oci_network_load_balancer_network_load_balancer" "k3s_public_lb" {
compartment_id = var.compartment_ocid
display_name = var.public_load_balancer_name
subnet_id = oci_core_subnet.oci_core_subnet11.id
network_security_group_ids = [oci_core_network_security_group.public_lb_nsg.id]
is_private = false
is_preserve_source_destination = false
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
# HTTP
resource "oci_network_load_balancer_listener" "k3s_http_listener" {
default_backend_set_name = oci_network_load_balancer_backend_set.k3s_http_backend_set.name
name = "k3s_http_listener"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
port = var.http_lb_port
protocol = "TCP"
}
resource "oci_network_load_balancer_backend_set" "k3s_http_backend_set" {
health_checker {
protocol = "TCP"
port = var.http_lb_port
}
name = "k3s_http_backend"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
policy = "FIVE_TUPLE"
is_preserve_source = true
}
resource "oci_network_load_balancer_backend" "k3s_http_backend" {
depends_on = [
oci_core_instance_pool.k3s_workers,
]
count = var.k3s_worker_pool_size
backend_set_name = oci_network_load_balancer_backend_set.k3s_http_backend_set.name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
name = format("%s:%s", data.oci_core_instance_pool_instances.k3s_workers_instances.instances[count.index].id, var.http_lb_port)
port = var.http_lb_port
target_id = data.oci_core_instance_pool_instances.k3s_workers_instances.instances[count.index].id
}
resource "oci_network_load_balancer_backend" "k3s_http_backend_extra_node" {
count = var.k3s_extra_worker_node ? 1 : 0
backend_set_name = oci_network_load_balancer_backend_set.k3s_http_backend_set.name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
name = format("%s:%s", oci_core_instance.k3s_extra_worker_node[count.index].id, var.http_lb_port)
port = var.http_lb_port
target_id = oci_core_instance.k3s_extra_worker_node[count.index].id
}
# HTTPS
resource "oci_network_load_balancer_listener" "k3s_https_listener" {
default_backend_set_name = oci_network_load_balancer_backend_set.k3s_https_backend_set.name
name = "k3s_https_listener"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
port = var.https_lb_port
protocol = "TCP"
}
resource "oci_network_load_balancer_backend_set" "k3s_https_backend_set" {
health_checker {
protocol = "TCP"
port = var.https_lb_port
}
name = "k3s_https_backend"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
policy = "FIVE_TUPLE"
is_preserve_source = true
}
resource "oci_network_load_balancer_backend" "k3s_https_backend" {
depends_on = [
oci_core_instance_pool.k3s_workers,
]
count = var.k3s_worker_pool_size
backend_set_name = oci_network_load_balancer_backend_set.k3s_https_backend_set.name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
name = format("%s:%s", data.oci_core_instance_pool_instances.k3s_workers_instances.instances[count.index].id, var.https_lb_port)
port = var.https_lb_port
target_id = data.oci_core_instance_pool_instances.k3s_workers_instances.instances[count.index].id
}
resource "oci_network_load_balancer_backend" "k3s_https_backend_extra_node" {
count = var.k3s_extra_worker_node ? 1 : 0
backend_set_name = oci_network_load_balancer_backend_set.k3s_https_backend_set.name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
name = format("%s:%s", oci_core_instance.k3s_extra_worker_node[count.index].id, var.https_lb_port)
port = var.https_lb_port
target_id = oci_core_instance.k3s_extra_worker_node[count.index].id
}
## kube-api
resource "oci_network_load_balancer_listener" "k3s_kubeapi_listener" {
count = var.expose_kubeapi ? 1 : 0
default_backend_set_name = oci_network_load_balancer_backend_set.k3s_kubeapi_backend_set[count.index].name
name = "k3s_kubeapi_listener"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
port = var.kube_api_port
protocol = "TCP"
}
resource "oci_network_load_balancer_backend_set" "k3s_kubeapi_backend_set" {
count = var.expose_kubeapi ? 1 : 0
health_checker {
protocol = "TCP"
port = var.kube_api_port
}
name = "k3s_kubeapi_backend"
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
policy = "FIVE_TUPLE"
is_preserve_source = true
}
resource "oci_network_load_balancer_backend" "k3s_kubeapi_backend" {
depends_on = [
oci_core_instance_pool.k3s_servers,
]
count = var.expose_kubeapi ? var.k3s_server_pool_size : 0
backend_set_name = oci_network_load_balancer_backend_set.k3s_kubeapi_backend_set[0].name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.k3s_public_lb.id
name = format("%s:%s", data.oci_core_instance_pool_instances.k3s_servers_instances.instances[count.index].id, var.kube_api_port)
port = var.kube_api_port
target_id = data.oci_core_instance_pool_instances.k3s_servers_instances.instances[count.index].id
}

54
self-managed/lb.tf Normal file
View File

@@ -0,0 +1,54 @@
resource "oci_load_balancer_load_balancer" "k3s_load_balancer" {
lifecycle {
ignore_changes = [network_security_group_ids]
}
compartment_id = var.compartment_ocid
display_name = var.k3s_load_balancer_name
shape = var.public_lb_shape
subnet_ids = [oci_core_subnet.oci_core_subnet11.id]
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
ip_mode = "IPV4"
is_private = true
shape_details {
maximum_bandwidth_in_mbps = 10
minimum_bandwidth_in_mbps = 10
}
}
resource "oci_load_balancer_listener" "k3s_kube_api_listener" {
default_backend_set_name = oci_load_balancer_backend_set.k3s_kube_api_backend_set.name
load_balancer_id = oci_load_balancer_load_balancer.k3s_load_balancer.id
name = "K3s__kube_api_listener"
port = var.kube_api_port
protocol = "TCP"
}
resource "oci_load_balancer_backend_set" "k3s_kube_api_backend_set" {
health_checker {
protocol = "TCP"
port = var.kube_api_port
}
load_balancer_id = oci_load_balancer_load_balancer.k3s_load_balancer.id
name = "K3s__kube_api_backend_set"
policy = "ROUND_ROBIN"
}
resource "oci_load_balancer_backend" "k3s_kube_api_backend" {
depends_on = [
oci_core_instance_pool.k3s_servers,
]
count = var.k3s_server_pool_size
backendset_name = oci_load_balancer_backend_set.k3s_kube_api_backend_set.name
ip_address = data.oci_core_instance.k3s_servers_instances_ips[count.index].private_ip
load_balancer_id = oci_load_balancer_load_balancer.k3s_load_balancer.id
port = var.kube_api_port
}

3
self-managed/locals.tf Normal file
View File

@@ -0,0 +1,3 @@
locals {
public_lb_ip = [for interface in oci_network_load_balancer_network_load_balancer.k3s_public_lb.ip_addresses : interface.ip_address if interface.is_public == true]
}

63
self-managed/network.tf Normal file
View File

@@ -0,0 +1,63 @@
resource "oci_core_vcn" "default_oci_core_vcn" {
cidr_block = var.oci_core_vcn_cidr
compartment_id = var.compartment_ocid
display_name = "Default OCI core vcn"
dns_label = var.oci_core_vcn_dns_label
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_subnet" "default_oci_core_subnet10" {
cidr_block = var.oci_core_subnet_cidr10
compartment_id = var.compartment_ocid
display_name = "${var.oci_core_subnet_cidr10} (default) OCI core subnet"
dns_label = var.oci_core_subnet_dns_label10
route_table_id = oci_core_vcn.default_oci_core_vcn.default_route_table_id
vcn_id = oci_core_vcn.default_oci_core_vcn.id
security_list_ids = [oci_core_default_security_list.default_security_list.id]
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_subnet" "oci_core_subnet11" {
cidr_block = var.oci_core_subnet_cidr11
compartment_id = var.compartment_ocid
display_name = "${var.oci_core_subnet_cidr11} OCI core subnet"
dns_label = var.oci_core_subnet_dns_label11
route_table_id = oci_core_vcn.default_oci_core_vcn.default_route_table_id
vcn_id = oci_core_vcn.default_oci_core_vcn.id
security_list_ids = [oci_core_default_security_list.default_security_list.id]
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_internet_gateway" "default_oci_core_internet_gateway" {
compartment_id = var.compartment_ocid
display_name = "Internet Gateway Default OCI core vcn"
enabled = "true"
vcn_id = oci_core_vcn.default_oci_core_vcn.id
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_default_route_table" "default_oci_core_default_route_table" {
route_rules {
destination = "0.0.0.0/0"
destination_type = "CIDR_BLOCK"
network_entity_id = oci_core_internet_gateway.default_oci_core_internet_gateway.id
}
manage_default_resource_id = oci_core_vcn.default_oci_core_vcn.default_route_table_id
}

151
self-managed/nsg.tf Normal file
View File

@@ -0,0 +1,151 @@
resource "oci_core_network_security_group" "public_lb_nsg" {
compartment_id = var.compartment_ocid
vcn_id = oci_core_vcn.default_oci_core_vcn.id
display_name = "K3s public LB nsg"
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_network_security_group_security_rule" "allow_http_from_all" {
network_security_group_id = oci_core_network_security_group.public_lb_nsg.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow HTTP from all"
source = "0.0.0.0/0"
source_type = "CIDR_BLOCK"
stateless = false
tcp_options {
destination_port_range {
max = var.http_lb_port
min = var.http_lb_port
}
}
}
resource "oci_core_network_security_group_security_rule" "allow_https_from_all" {
network_security_group_id = oci_core_network_security_group.public_lb_nsg.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow HTTPS from all"
source = "0.0.0.0/0"
source_type = "CIDR_BLOCK"
stateless = false
tcp_options {
destination_port_range {
max = var.https_lb_port
min = var.https_lb_port
}
}
}
resource "oci_core_network_security_group_security_rule" "allow_kubeapi_from_all" {
count = var.expose_kubeapi ? 1 : 0
network_security_group_id = oci_core_network_security_group.public_lb_nsg.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow HTTPS from all"
source = var.my_public_ip_cidr
source_type = "CIDR_BLOCK"
stateless = false
tcp_options {
destination_port_range {
max = var.kube_api_port
min = var.kube_api_port
}
}
}
resource "oci_core_network_security_group" "lb_to_instances_http" {
compartment_id = var.compartment_ocid
vcn_id = oci_core_vcn.default_oci_core_vcn.id
display_name = "Public LB to K3s workers Compute Instances NSG"
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_network_security_group_security_rule" "nsg_to_instances_http" {
network_security_group_id = oci_core_network_security_group.lb_to_instances_http.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow HTTP from all"
source = oci_core_network_security_group.public_lb_nsg.id
source_type = "NETWORK_SECURITY_GROUP"
stateless = false
tcp_options {
destination_port_range {
max = var.http_lb_port
min = var.http_lb_port
}
}
}
resource "oci_core_network_security_group_security_rule" "nsg_to_instances_https" {
network_security_group_id = oci_core_network_security_group.lb_to_instances_http.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow HTTPS from all"
source = oci_core_network_security_group.public_lb_nsg.id
source_type = "NETWORK_SECURITY_GROUP"
stateless = false
tcp_options {
destination_port_range {
max = var.https_lb_port
min = var.https_lb_port
}
}
}
resource "oci_core_network_security_group" "lb_to_instances_kubeapi" {
compartment_id = var.compartment_ocid
vcn_id = oci_core_vcn.default_oci_core_vcn.id
display_name = "Public LB to K3s master Compute Instances NSG (kubeapi)"
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}
resource "oci_core_network_security_group_security_rule" "nsg_to_instances_kubeapi" {
count = var.expose_kubeapi ? 1 : 0
network_security_group_id = oci_core_network_security_group.lb_to_instances_kubeapi.id
direction = "INGRESS"
protocol = 6 # tcp
description = "Allow kubeapi access from my_public_ip_cidr"
source = oci_core_network_security_group.public_lb_nsg.id
source_type = "NETWORK_SECURITY_GROUP"
stateless = false
tcp_options {
destination_port_range {
max = var.kube_api_port
min = var.kube_api_port
}
}
}

View File

@@ -0,0 +1,10 @@
terraform {
required_providers {
oci = {
source = "oracle/oci"
version = ">= 5.30.0"
}
}
}
provider "oci" {}

17
self-managed/output.tf Normal file
View File

@@ -0,0 +1,17 @@
output "k3s_servers_ips" {
depends_on = [
data.oci_core_instance_pool_instances.k3s_servers_instances,
]
value = data.oci_core_instance.k3s_servers_instances_ips.*.public_ip
}
output "k3s_workers_ips" {
depends_on = [
data.oci_core_instance_pool_instances.k3s_workers_instances,
]
value = data.oci_core_instance.k3s_workers_instances_ips.*.public_ip
}
output "public_lb_ip" {
value = oci_network_load_balancer_network_load_balancer.k3s_public_lb.ip_addresses
}

43
self-managed/security.tf Normal file
View File

@@ -0,0 +1,43 @@
resource "oci_core_default_security_list" "default_security_list" {
compartment_id = var.compartment_ocid
manage_default_resource_id = oci_core_vcn.default_oci_core_vcn.default_security_list_id
display_name = "Default security list"
egress_security_rules {
destination = "0.0.0.0/0"
protocol = "all"
}
ingress_security_rules {
protocol = 1 # icmp
source = var.my_public_ip_cidr
description = "Allow icmp from ${var.my_public_ip_cidr}"
}
ingress_security_rules {
protocol = 6 # tcp
source = var.my_public_ip_cidr
description = "Allow SSH from ${var.my_public_ip_cidr}"
tcp_options {
min = 22
max = 22
}
}
ingress_security_rules {
protocol = "all"
source = var.oci_core_vcn_cidr
description = "Allow all from vcn subnet"
}
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
}
}

144
self-managed/template.tf Normal file
View File

@@ -0,0 +1,144 @@
resource "oci_core_instance_configuration" "k3s_server_template" {
compartment_id = var.compartment_ocid
display_name = "k3s server configuration"
timeouts {
create = "11m"
update = "11m"
delete = "10m"
}
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
"k3s-template-type" = "k3s-server"
}
instance_details {
instance_type = "compute"
launch_details {
agent_config {
is_management_disabled = "false"
is_monitoring_disabled = "false"
plugins_config {
desired_state = "DISABLED"
name = "Vulnerability Scanning"
}
plugins_config {
desired_state = "ENABLED"
name = "Compute Instance Monitoring"
}
plugins_config {
desired_state = "DISABLED"
name = "Bastion"
}
}
availability_domain = var.availability_domain
compartment_id = var.compartment_ocid
create_vnic_details {
assign_public_ip = true
subnet_id = oci_core_subnet.default_oci_core_subnet10.id
nsg_ids = [oci_core_network_security_group.lb_to_instances_kubeapi.id]
}
display_name = "k3s server template"
metadata = {
"ssh_authorized_keys" = file(var.public_key_path)
"user_data" = data.cloudinit_config.k3s_server_tpl.rendered
}
shape = var.server_compute_shape
shape_config {
memory_in_gbs = "1"
ocpus = "1"
}
source_details {
image_id = var.server_os_image_id
source_type = "image"
}
}
}
}
resource "oci_core_instance_configuration" "k3s_worker_template" {
compartment_id = var.compartment_ocid
display_name = "k3s worker configuration"
freeform_tags = {
"provisioner" = "terraform"
"environment" = "${var.environment}"
"${var.unique_tag_key}" = "${var.unique_tag_value}"
"k3s-template-type" = "k3s-worker"
}
timeouts {
create = "11m"
update = "11m"
delete = "10m"
}
instance_details {
instance_type = "compute"
launch_details {
agent_config {
is_management_disabled = "false"
is_monitoring_disabled = "false"
plugins_config {
desired_state = "DISABLED"
name = "Vulnerability Scanning"
}
plugins_config {
desired_state = "ENABLED"
name = "Compute Instance Monitoring"
}
plugins_config {
desired_state = "DISABLED"
name = "Bastion"
}
}
availability_domain = var.availability_domain
# fault_domain = "FAULT-DOMAIN-3"
compartment_id = var.compartment_ocid
create_vnic_details {
assign_public_ip = true
subnet_id = oci_core_subnet.default_oci_core_subnet10.id
nsg_ids = [oci_core_network_security_group.lb_to_instances_http.id]
}
display_name = "k3s worker template"
metadata = {
"ssh_authorized_keys" = file(var.public_key_path)
"user_data" = data.cloudinit_config.k3s_worker_tpl.rendered
}
shape = var.agent_compute_shape
shape_config {
memory_in_gbs = "12"
ocpus = "2"
}
source_details {
image_id = var.agent_os_image_id
source_type = "image"
}
}
}
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
oci = {
source = "oracle/oci"
version = ">= 4.64.0"
}
}
}

238
self-managed/vars.tf Normal file
View File

@@ -0,0 +1,238 @@
variable "region" {
type = string
}
variable "availability_domain" {
type = string
}
variable "tenancy_ocid" {
}
variable "compartment_ocid" {
type = string
}
variable "environment" {
type = string
}
variable "cluster_name" {
type = string
}
variable "server_os_image_id" {
type = string
}
variable "agent_os_image_id" {
type = string
}
variable "k3s_version" {
type = string
default = "latest"
}
variable "k3s_subnet" {
type = string
default = "default_route_table"
}
variable "fault_domains" {
type = list(any)
default = ["FAULT-DOMAIN-1", "FAULT-DOMAIN-2", "FAULT-DOMAIN-3"]
}
variable "public_key_path" {
type = string
default = "~/.ssh/id_rsa.pub"
description = "Path to your public workstation SSH key"
}
variable "agent_compute_shape" {
type = string
default = "VM.Standard.A1.Flex"
}
variable "server_compute_shape" {
type = string
default = "VM.Standard.E2.1.Micro"
}
variable "public_lb_shape" {
type = string
default = "flexible"
}
variable "oci_identity_dynamic_group_name" {
type = string
default = "Compute_Dynamic_Group"
description = "Dynamic group which contains all instance in this compartment"
}
variable "oci_identity_policy_name" {
type = string
default = "Compute_To_Oci_Api_Policy"
description = "Policy to allow dynamic group, to read OCI api without auth"
}
variable "oci_core_vcn_dns_label" {
type = string
default = "defaultvcn"
}
variable "oci_core_subnet_dns_label10" {
type = string
default = "defaultsubnet10"
}
variable "oci_core_subnet_dns_label11" {
type = string
default = "defaultsubnet11"
}
variable "oci_core_vcn_cidr" {
type = string
default = "10.0.0.0/16"
}
variable "oci_core_subnet_cidr10" {
type = string
default = "10.0.0.0/24"
}
variable "oci_core_subnet_cidr11" {
type = string
default = "10.0.1.0/24"
}
variable "kube_api_port" {
type = number
default = 6443
}
variable "k3s_load_balancer_name" {
type = string
default = "k3s internal load balancer"
}
variable "public_load_balancer_name" {
type = string
default = "K3s public LB"
}
variable "http_lb_port" {
type = number
default = 80
}
variable "https_lb_port" {
type = number
default = 443
}
variable "ingress_controller_http_nodeport" {
type = number
default = 30080
}
variable "ingress_controller_https_nodeport" {
type = number
default = 30443
}
variable "k3s_server_pool_size" {
type = number
default = 1
}
variable "k3s_worker_pool_size" {
type = number
default = 2
}
variable "k3s_extra_worker_node" {
type = bool
default = true
}
variable "unique_tag_key" {
type = string
default = "k3s-provisioner"
}
variable "unique_tag_value" {
type = string
default = "https://github.com/garutilorenzo/k3s-oci-cluster"
}
variable "my_public_ip_cidr" {
type = string
description = "My public ip CIDR"
}
variable "disable_ingress" {
type = bool
default = false
}
variable "ingress_controller" {
type = string
default = "default"
validation {
condition = contains(["default", "traefik2"], var.ingress_controller)
error_message = "Supported ingress controllers are: default, traefik2"
}
}
variable "install_certmanager" {
type = bool
default = true
}
variable "certmanager_release" {
type = string
default = "v1.11.0"
}
variable "certmanager_email_address" {
type = string
default = "changeme@example.com"
}
variable "install_longhorn" {
type = bool
default = true
}
variable "longhorn_release" {
type = string
default = "v1.4.0"
}
variable "install_argocd" {
type = bool
default = true
}
variable "argocd_release" {
type = string
default = "v2.4.11"
}
variable "install_argocd_image_updater" {
type = bool
default = true
}
variable "argocd_image_updater_release" {
type = string
default = "v0.12.0"
}
variable "expose_kubeapi" {
type = bool
default = false
}