Kubernetes Cluster API — Provision workload clusters on AWS

curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.14/clusterctl-linux-amd64 -o clusterctl
chmod +x ./clusterctl
sudo mv ./clusterctl /usr/local/bin/clusterctl
curl -L https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v0.6.4/clusterawsadm-linux-amd64 -o clusterawsadm
chmod +x ./clusterawsadm
sudo mv ./clusterawsadm /usr/local/bin/clusterawsadm
$ export AWS_ACCESS_KEY_ID='<-YOUR-ACCESS-KEY->'
$ export AWS_SECRET_ACCESS_KEY='<-YOUR-SECRET-ACCESS-KEY->'
$ export EXP_MACHINE_POOL=true
$ export EXP_CLUSTER_RESOURCE_SET=true
$ clusterawsadm bootstrap iam create-cloudformation-stack
Attempting to create AWS CloudFormation stack cluster-api-provider-aws-sigs-k8s-io
I1206 22:23:19.620891 357601 service.go:59] AWS Cloudformation stack "cluster-api-provider-aws-sigs-k8s-io" already exists, updating

Following resources are in the stack:

Resource |Type |Status
AWS::IAM::InstanceProfile |control-plane.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::InstanceProfile |controllers.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::InstanceProfile |nodes.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::ManagedPolicy |arn:aws:iam::552276840222:policy/control-plane.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::ManagedPolicy |arn:aws:iam::552276840222:policy/nodes.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::ManagedPolicy |arn:aws:iam::552276840222:policy/controllers.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::Role |control-plane.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::Role |controllers.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
AWS::IAM::Role |nodes.cluster-api-provider-aws.sigs.k8s.io |CREATE_COMPLETE
$ export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile)

WARNING: `encode-as-profile` should only be used for bootstrapping.

$ clusterctl init --infrastructure aws --watching-namespace k8s
Fetching providers
Installing cert-manager Version="v0.16.1"
Waiting for cert-manager to be available...
Installing Provider="cluster-api" Version="v0.3.14" TargetNamespace="capi-system"
Installing Provider="bootstrap-kubeadm" Version="v0.3.14" TargetNamespace="capi-kubeadm-bootstrap-system"
Installing Provider="control-plane-kubeadm" Version="v0.3.14" TargetNamespace="capi-kubeadm-control-plane-system"
Installing Provider="infrastructure-aws" Version="v0.6.3" TargetNamespace="capa-system"

Your management cluster has been initialized successfully!

You can now create your first workload cluster by running the following:

clusterctl config cluster [name] --kubernetes-version [version] | kubectl apply -f -
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSCluster
metadata:
name: cluster-1
namespace: k8s
spec:
region: eu-west-1
sshKeyName: default
networkSpec:
vpc:
cidrBlock: "10.0.0.0/23"
subnets:
- availabilityZone: eu-west-1a
cidrBlock: "10.0.0.0/27"
isPublic: true
- availabilityZone: eu-west-1b
cidrBlock: "10.0.0.32/27"
isPublic: true
- availabilityZone: eu-west-1c
cidrBlock: "10.0.0.64/27"
isPublic: true
- availabilityZone: eu-west-1a
cidrBlock: "10.0.1.0/27"
- availabilityZone: eu-west-1b
cidrBlock: "10.0.1.32/27"
- availabilityZone: eu-west-1c
cidrBlock: "10.0.1.64/27"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSCluster
metadata:
name: cluster-1
namespace: k8s
spec:
region: eu-west-1
sshKeyName: default
networkSpec:
vpc:
id: vpc-0425c335226437144
subnets:
- id: subnet-0261219d564bb0dc5
- id: subnet-0fdcccba78668e013
...
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate
metadata:
name: cluster-1
namespace: k8s
spec:
template:
spec:
iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
instanceType: t3.small
sshKeyName: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
kind: KubeadmControlPlane
metadata:
name: cluster-1-control-plane
namespace: k8s
spec:
infrastructureTemplate:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate
name: cluster-1-control-plane
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: aws
controllerManager:
extraArgs:
cloud-provider: aws
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: aws
name: '{{ ds.meta_data.local_hostname }}'
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: aws
name: '{{ ds.meta_data.local_hostname }}'
replicas: 1
version: v1.20.4
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate
metadata:
name: cluster-1-data-plane-0
namespace: k8s
spec:
template:
spec:
iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
instanceType: t3.small
sshKeyName: default
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfigTemplate
metadata:
name: cluster-1-data-plane-0
namespace: k8s
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: aws
name: '{{ ds.meta_data.local_hostname }}'
---
apiVersion: cluster.x-k8s.io/v1alpha3
kind: MachineDeployment
metadata:
name: cluster-1-data-plane-0
namespace: k8s
spec:
clusterName: cluster-1
replicas: 1
selector:
matchLabels: null
template:
metadata:
labels:
"nodepool": "nodepool-0"
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfigTemplate
name: cluster-1-data-plane-0
clusterName: cluster-1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate
name: cluster-1-data-plane-0
version: v1.20.4
---
apiVersion: addons.cluster.x-k8s.io/v1alpha3
kind: ClusterResourceSet
metadata:
name: cluster-1-crs-0
namespace: k8s
spec:
clusterSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: cluster-1
resources:
- kind: ConfigMap
name: calico-cni
- kind: ConfigMap
name: nginx-ingress
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: null
name: calico-cni
namespace: k8s
data:
calico.yaml: |+
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
...
---
apiVersion: v1
data:
deploy.yaml: |+
---
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
...
$ kubectl --kubeconfig=./cluster-1.kubeconfig   apply -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml$ kubectl --kubeconfig=./cluster-1.kubeconfig apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.41.2/deploy/static/provider/aws/deploy.yaml
---
apiVersion: cluster.x-k8s.io/v1alpha3
kind: Cluster
metadata:
name: cluster-1
namespace: k8s
labels:
cluster.x-k8s.io/cluster-name: cluster-1
spec:
clusterNetwork:
services:
cidrBlocks:
- 172.30.0.0/16
pods:
cidrBlocks:
- 10.128.0.0/14
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
kind: KubeadmControlPlane
name: cluster-1-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSCluster
name: cluster-1
$ kubectl scale KubeadmControlPlane cluster-1-control-plane --replicas=1
$ kubectl scale MachineDeployment cluster-1-data-plane-0 --replicas=0
$ kubectl --namespace=k8s get secret cluster-1-kubeconfig    -o jsonpath={.data.value} | base64 --decode    > cluster-1.kubeconfig
$ kubectl --kubeconfig=./cluster-1.kubeconfig get nodes
---
apiVersion: cluster.x-k8s.io/v1alpha3
kind: MachineHealthCheck
metadata:
name: cluster-1-node-unhealthy-5m
namespace: k8s
spec:
# clusterName is required to associate this MachineHealthCheck with a particular cluster
clusterName: cluster-1
# (Optional) maxUnhealthy prevents further remediation if the cluster is already partially unhealthy
maxUnhealthy: 40%
# (Optional) nodeStartupTimeout determines how long a MachineHealthCheck should wait for
# a Node to join the cluster, before considering a Machine unhealthy
nodeStartupTimeout: 10m
# selector is used to determine which Machines should be health checked
selector:
matchLabels:
nodepool: nodepool-0
# Conditions to check on Nodes for matched Machines, if any condition is matched for the duration of its timeout, the Machine is considered unhealthy
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s

--

--

Highly versatile Senior technical Lead Engineer, I am a consummate and competent qualified IT Professional specialising in distributed systems

Love podcasts or audiobooks? Learn on the go with our new app.

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store