This commit is contained in:
Maksym Sadovnychyy 2023-06-11 18:49:38 +02:00
commit f0c713c0e0
9 changed files with 513 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
.ssh/*
.tmp/*
.vagrant/*

48
Deploy-K8s.ps1 Normal file
View File

@ -0,0 +1,48 @@
if ($null -eq (Get-VMSwitch -SwitchName "k8s-Switch" -ErrorAction Ignore)) {
New-VMSwitch -SwitchName k8s-Switch -SwitchType Internal
New-NetIPAddress -IPAddress 192.168.99.1 -PrefixLength 24 -InterfaceIndex (Get-NetAdapter | Where-Object {$_.Name -match "k8s-Switch"}).ifIndex -AddressFamily IPv4
}
if($null -eq (Get-NetNat | Where-Object {$_.Name -match "K8s-NATNetwork"})) {
New-NetNAT -Name K8s-NATNetwork -InternalIPInterfaceAddressPrefix 192.168.99.0/24
}
# cmd /c ssh-keygen -t id_ed25519 -f '$PSScriptRoot\.ssh\id_ed25519' -P ''
# TODO must change file permissions!!! as key file is too open
Set-Location $PSScriptRoot
cmd /c "vagrant plugin install --plugin-clean-sources --plugin-source https://rubygems.org vagrant-reload"
cmd /c "vagrant plugin install --plugin-clean-sources --plugin-source https://rubygems.org vagrant-scp"
cmd /c "vagrant up"
# replace with custom ssh private_key file
Get-ChildItem -Path ".\.vagrant\machines" | Foreach-Object {
Rename-Item -Path "$($_.FullName)\hyperv\private_key" -NewName "private_key.bak"
Copy-Item ".\.ssh\id_ed25519" -Destination "$($_.FullName)\hyperv\private_key"
#TODO must change file permissions!!! as key file is too open
}
# import configuration in local kubectl
Copy-Item ".\.tmp\config" -Destination "$HOME\.kube\config" -Force
# install argo cd
cmd /c "kubectl create namespace argocd"
cmd /c "kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml"
$secret = ./kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}"
[Text.Encoding]::Utf8.GetString([Convert]::FromBase64String($secret)) > .\.tmp\argocd-secret.txt
# install dapr
# Add the official Dapr Helm chart.
cmd /c "helm repo add dapr https://dapr.github.io/helm-charts/"
cmd /c "helm repo update"
# See which chart versions are available
# cmd /c "helm search repo dapr --devel --versions"
# Install in developer mode
cmd /c "helm upgrade --install dapr dapr/dapr --version=1.11 --namespace dapr-system --create-namespace --wait"
cmd /c "helm upgrade --install dapr-dashboard dapr/dapr-dashboard --namespace dapr-dashboard --create-namespace --wait"

41
README.md Normal file
View File

@ -0,0 +1,41 @@
# Hyper-V k8s Dev cluster
Install Kubernetes development cluster on Hyper-V using Vagrant
## System requirements
* CPU:
* RAM:
## Default nodes
Will be installed and configured following nodes
* Master nodes:
* master-1
* Worker nodes:
* worker-1
* worker-2
* worker-3
## Default software provided in this package
* ArgocCD
* DAPR
* kubectl-v1.27.2
* helm-v3.12.0
## Test cluster
```powershell
kubectl get nodes
```
## ArgoCD UI
kubectl port-forward svc/argocd-server -n argocd 8080:443
## Dapr UI
kubectl port-forward svc/dapr-dashboard -n dapr-dashboard 8081:8080

127
Vagrantfile vendored Normal file
View File

@ -0,0 +1,127 @@
Vagrant.configure("2") do |config|
config.vm.box = "hashicorp/bionic64"
config.vm.box_check_update = false
NUM_MASTER_NODE = 1
NUM_WORKER_NODE = 3
# Provision core-linux Master Nodes
(1..NUM_MASTER_NODE).each do |i|
config.vm.define "master-#{i}" do |node|
node.vm.provider "hyperv" do |h|
h.memory = 2048
h.cpus = 2
h.vm_integration_services = {
guest_service_interface: true
}
h.vmname = "master-#{i}"
end
node.vm.synced_folder ".", "/vagrant", disabled: true
node.vm.network "public_network", ip: "192.168.99.9#{i}", bridge: "k8s-Switch"
node.vm.hostname = "master-#{i}"
node.vm.network "forwarded_port", guest: 22, host: "273#{i}"
# netplan
node.vm.provision "file", source: ".\\kubeadm\\01-netcfg.yaml", destination: "/home/vagrant/01-netcfg.yaml"
node.vm.provision "shell", inline: "sed 's/192.168.99.99/192.168.99.9#{i}/' /home/vagrant/01-netcfg.yaml > /tmp/01-netcfg.yaml"
node.vm.provision "shell", inline: "sudo mv -f /tmp/01-netcfg.yaml /etc/netplan/ -v"
node.vm.provision "shell", inline: "sudo netplan apply"
# add windows 10 friendly ssh pub key
node.vm.provision "file", source: ".\\.ssh\\id_ed25519.pub", destination: "/home/vagrant/id_ed25519.pub"
node.vm.provision "shell", inline: <<-SHELL
cat /home/vagrant/id_ed25519.pub >> /home/vagrant/.ssh/authorized_keys
SHELL
# install applications
node.vm.provision "Running-Kubeadm", type: "shell", :path => "kubeadm/install.sh"
# restart vm
node.vm.provision :reload
# disable swap
node.vm.provision :shell, :inline => "sudo swapoff -a", run: "always"
# kubeadm init
node.vm.provision "shell", inline: "sudo kubeadm init --apiserver-advertise-address 192.168.99.9#{i} --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all >> /home/vagrant/kubeinit.log 2>&1"
node.vm.provision "shell", inline: "sleep 60"
# create .kube/config file
node.vm.provision "shell", inline: "sudo mkdir -p /home/vagrant/.kube"
node.vm.provision "shell", inline: "sudo cp -i -f /etc/kubernetes/admin.conf /home/vagrant/.kube/config"
node.vm.provision "shell", inline: "sudo chown $(id vagrant -u):$(id vagrant -g) /home/vagrant/.kube/config"
node.vm.provision "shell", inline: "sleep 30"
# create join command
node.vm.provision "shell", inline: <<-SHELL
joinCommand=$(kubeadm token create --print-join-command 2>/dev/null)
echo "$joinCommand --ignore-preflight-errors=all" > /home/vagrant/joincluster.sh
SHELL
# kubernetes networking
node.vm.provision "file", source: ".\\kubeadm\\net.yaml", destination: "/home/vagrant/net.yaml"
node.vm.provision "shell", inline: "echo 'sudo kubectl apply -n kube-system -f /home/vagrant/net.yaml' | at now", privileged: false
# copy joincluster.sh and .kube/conf locally
$script = <<-SCRIPT
scp -i .ssh\\id_ed25519 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vagrant@192.168.99.9#{i}:/home/vagrant/joincluster.sh ./.tmp/"
scp -i .ssh\\id_ed25519 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no vagrant@192.168.99.9#{i}:/home/vagrant/.kube/config ./.tmp/"
SCRIPT
node.trigger.after :up do |trigger|
trigger.info = "Copy joincluster.sh and .kube/conf locally"
trigger.run = {inline: $script}
end
end
end
# Provision core-linux Worker Nodes
(1..NUM_WORKER_NODE).each do |i|
config.vm.define "worker-#{i}" do |node|
node.vm.provider "hyperv" do |h|
h.memory = 2048
h.cpus = 2
h.vm_integration_services = {
guest_service_interface: true
}
h.vmname = "worker-#{i}"
end
node.vm.synced_folder ".", "/vagrant", disabled: true
node.vm.network "public_network", ip: "192.168.99.8#{i}", bridge: "k8s-Switch"
node.vm.hostname = "worker-#{i}"
node.vm.network "forwarded_port", guest: 22, host: "272#{i}"
# netplan
node.vm.provision "file", source: ".\\kubeadm\\01-netcfg.yaml", destination: "/home/vagrant/"
node.vm.provision "shell", inline: "sed 's/192.168.99.99/192.168.99.8#{i}/' /home/vagrant/01-netcfg.yaml > /tmp/01-netcfg.yaml"
node.vm.provision "shell", inline: "sudo mv -f /tmp/01-netcfg.yaml /etc/netplan/ -v"
node.vm.provision "shell", inline: "sudo netplan apply"
# add windows 10 friendly ssh pub key
node.vm.provision "file", source: ".\\.ssh\\id_ed25519.pub", destination: "/home/vagrant/id_ed25519.pub"
node.vm.provision "shell", inline: <<-SHELL
cat /home/vagrant/id_ed25519.pub >> /home/vagrant/.ssh/authorized_keys
SHELL
# install applications
node.vm.provision "Running Worker#{i}", type: "shell", :path => "kubeadm/install.sh"
# restart vm
node.vm.provision :reload
# disable swap
node.vm.provision "shell", inline: "sudo swapoff -a", run: "always"
# Join worker node
node.vm.provision "Join worker node ", type: "shell", :path => ".tmp/joincluster.sh"
end
end
end

BIN
helm.exe Normal file

Binary file not shown.

10
kubeadm/01-netcfg.yaml Normal file
View File

@ -0,0 +1,10 @@
network:
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- 192.168.99.99/24
gateway4: 192.168.99.1
nameservers:
addresses: [1.1.1.1, 8.8.8.8]

33
kubeadm/install.sh Normal file
View File

@ -0,0 +1,33 @@
#!/bin/bash
echo "[TASK 1] Installing Docker on Master Node"
sudo apt-get update > /dev/null 2>&1
sudo apt-get install -y docker.io curl apt-transport-https > /dev/null 2>&1
echo "[TASK 2] Update /etc/hosts file"
cat >> /etc/hosts << EOF
192.168.99.91 localhost
192.168.99.91 master-1
192.168.99.81 worker-1
192.168.99.82 worker-2
192.168.99.83 worker-3
EOF
gpasswd -a vagrant root
sudo swapoff -a
sudo systemctl enable docker > /dev/null 2>&1
sudo systemctl start docker > /dev/null 2>&1
echo "[TASK 3] Installing Kubernetes on Master Node"
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add > /dev/null 2>&1
sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /dev/null 2>&1
sudo apt update > /dev/null 2>&1
sudo apt-get install -y kubeadm kubelet kubectl --allow-change-held-packages> /dev/null 2>&1
sudo apt-mark hold kubeadm kubelet kubectl > /dev/null 2>&1
sleep 5
sudo systemctl enable kubelet >/dev/null 2>&1
sudo echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > sudo /usr/bin/kubelet
sudo systemctl start kubelet >/dev/null 2>&1
echo $(kubectl version --output=yaml)
echo "Waiting for 10 Sec's"
sleep 10

251
kubeadm/net.yaml Normal file
View File

@ -0,0 +1,251 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
rules:
- apiGroups:
- ''
resourceNames:
- weave-net
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: apps/v1
kind: DaemonSet
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"original-request": {
"url": "/k8s/v1.10/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxNyIsIEdpdFZlcnNpb246InYxLjE3LjAiLCBHaXRDb21taXQ6IjcwMTMyYjBmMTMwYWNjMGJlZDE5M2Q5YmE1OWRkMTg2ZjBlNjM0Y2YiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE5LTEyLTA3VDIxOjIwOjEwWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjQiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjE3IiwgR2l0VmVyc2lvbjoidjEuMTcuMCIsIEdpdENvbW1pdDoiNzAxMzJiMGYxMzBhY2MwYmVkMTkzZDliYTU5ZGQxODZmMGU2MzRjZiIsIEdpdFRyZWVTdGF0ZToiY2xlYW4iLCBCdWlsZERhdGU6IjIwMTktMTItMDdUMjE6MTI6MTdaIiwgR29WZXJzaW9uOiJnbzEuMTMuNCIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==",
"date": "Sat Dec 28 2019 08:49:37 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
spec:
minReadySeconds: 5
selector:
matchLabels:
name: weave-net
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'docker.io/weaveworks/weave-kube:2.6.0'
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'docker.io/weaveworks/weave-npc:2.6.0'
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
updateStrategy:
type: RollingUpdate

BIN
kubectl.exe Normal file

Binary file not shown.