summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--recipes-containers/k3s/README.md208
1 files changed, 208 insertions, 0 deletions
diff --git a/recipes-containers/k3s/README.md b/recipes-containers/k3s/README.md
index 3fe5ccd1..e4cb3e33 100644
--- a/recipes-containers/k3s/README.md
+++ b/recipes-containers/k3s/README.md
@@ -28,3 +28,211 @@ k3s-agent -t <token> -s https://<master>:6443
28 28
29(Here `<token>` is found in `/var/lib/rancher/k3s/server/node-token` at the 29(Here `<token>` is found in `/var/lib/rancher/k3s/server/node-token` at the
30k3s master.) 30k3s master.)
31
32Example:
33```shell
34k3s-agent -t /var/lib/rancher/k3s/server/node-token -s https://localhost:6443
35```
36
37## Notes:
38
39if running under qemu, the default of 256M of memory is not enough, k3s will
40OOM and exit.
41
42Boot with qemuparams="-m 2048" to boot with 2G of memory (or choose the
43appropriate amount for your configuration)
44
45Disk: if using qemu and core-image* you'll need to add extra space in your disks
46to ensure containers can start. The following in your image recipe, or local.conf
47would add 2G of extra space to the rootfs:
48
49```shell
50IMAGE_ROOTFS_EXTRA_SPACE = "2097152"
51```
52
53## Example output from qemux86-64 running k3s server:
54
55```shell
56root@qemux86-64:~# kubectl get nodes
57NAME STATUS ROLES AGE VERSION
58qemux86-64 Ready master 46s v1.18.9-k3s1
59
60
61root@qemux86-64:~# kubectl get pods -n kube-system
62NAME READY STATUS RESTARTS AGE
63local-path-provisioner-6d59f47c7-h7lxk 1/1 Running 0 2m32s
64metrics-server-7566d596c8-mwntr 1/1 Running 0 2m32s
65helm-install-traefik-229v7 0/1 Completed 0 2m32s
66coredns-7944c66d8d-9rfj7 1/1 Running 0 2m32s
67svclb-traefik-pb5j4 2/2 Running 0 89s
68traefik-758cd5fc85-lxpr8 1/1 Running 0 89s
69
70root@qemux86-64:~# kubectl describe pods -n kube-system
71
72root@qemux86-64:~# ip a s
731: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
74 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
75 inet 127.0.0.1/8 scope host lo
76 valid_lft forever preferred_lft forever
77 inet6 ::1/128 scope host
78 valid_lft forever preferred_lft forever
792: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
80 link/ether 52:54:00:12:35:02 brd ff:ff:ff:ff:ff:ff
81 inet 10.0.2.15/24 brd 10.0.2.255 scope global eth0
82 valid_lft forever preferred_lft forever
83 inet6 fec0::5054:ff:fe12:3502/64 scope site dynamic mngtmpaddr
84 valid_lft 86239sec preferred_lft 14239sec
85 inet6 fe80::5054:ff:fe12:3502/64 scope link
86 valid_lft forever preferred_lft forever
873: sit0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
88 link/sit 0.0.0.0 brd 0.0.0.0
894: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
90 link/ether e2:aa:04:89:e6:0a brd ff:ff:ff:ff:ff:ff
91 inet 10.42.0.0/32 brd 10.42.0.0 scope global flannel.1
92 valid_lft forever preferred_lft forever
93 inet6 fe80::e0aa:4ff:fe89:e60a/64 scope link
94 valid_lft forever preferred_lft forever
955: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
96 link/ether 02:42:be:3e:25:e7 brd ff:ff:ff:ff:ff:ff
97 inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
98 valid_lft forever preferred_lft forever
996: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default qlen 1000
100 link/ether 82:8e:b4:f8:06:e7 brd ff:ff:ff:ff:ff:ff
101 inet 10.42.0.1/24 brd 10.42.0.255 scope global cni0
102 valid_lft forever preferred_lft forever
103 inet6 fe80::808e:b4ff:fef8:6e7/64 scope link
104 valid_lft forever preferred_lft forever
1057: veth82ac482e@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
106 link/ether ea:9d:14:c1:00:70 brd ff:ff:ff:ff:ff:ff link-netns cni-c52e6e09-f6e0-a47b-aea3-d6c47d3e2d01
107 inet6 fe80::e89d:14ff:fec1:70/64 scope link
108 valid_lft forever preferred_lft forever
1098: vethb94745ed@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
110 link/ether 1e:7f:7e:d3:ca:e8 brd ff:ff:ff:ff:ff:ff link-netns cni-86958efe-2462-016f-292d-81dbccc16a83
111 inet6 fe80::8046:3cff:fe23:ced1/64 scope link
112 valid_lft forever preferred_lft forever
1139: veth81ffb276@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
114 link/ether 2a:1d:48:54:76:50 brd ff:ff:ff:ff:ff:ff link-netns cni-5d77238e-6452-4fa3-40d2-91d48386080b
115 inet6 fe80::acf4:7fff:fe11:b6f2/64 scope link
116 valid_lft forever preferred_lft forever
11710: vethce261f6a@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
118 link/ether 72:a3:90:4a:c5:12 brd ff:ff:ff:ff:ff:ff link-netns cni-55675948-77f2-a952-31ce-615f2bdb0093
119 inet6 fe80::4d5:1bff:fe5d:db3a/64 scope link
120 valid_lft forever preferred_lft forever
12111: vethee199cf4@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
122 link/ether e6:90:a4:a3:bc:a1 brd ff:ff:ff:ff:ff:ff link-netns cni-4aeccd16-2976-8a78-b2c4-e028da3bb1ea
123 inet6 fe80::c85a:8bff:fe0b:aea0/64 scope link
124 valid_lft forever preferred_lft forever
125
126
127root@qemux86-64:~# kubectl describe nodes
128
129Name: qemux86-64
130Roles: master
131Labels: beta.kubernetes.io/arch=amd64
132 beta.kubernetes.io/instance-type=k3s
133 beta.kubernetes.io/os=linux
134 k3s.io/hostname=qemux86-64
135 k3s.io/internal-ip=10.0.2.15
136 kubernetes.io/arch=amd64
137 kubernetes.io/hostname=qemux86-64
138 kubernetes.io/os=linux
139 node-role.kubernetes.io/master=true
140 node.kubernetes.io/instance-type=k3s
141Annotations: flannel.alpha.coreos.com/backend-data: {"VtepMAC":"2e:52:6a:1b:76:d4"}
142 flannel.alpha.coreos.com/backend-type: vxlan
143 flannel.alpha.coreos.com/kube-subnet-manager: true
144 flannel.alpha.coreos.com/public-ip: 10.0.2.15
145 k3s.io/node-args: ["server"]
146 k3s.io/node-config-hash: MLFMUCBMRVINLJJKSG32TOUFWB4CN55GMSNY25AZPESQXZCYRN2A====
147 k3s.io/node-env: {}
148 node.alpha.kubernetes.io/ttl: 0
149 volumes.kubernetes.io/controller-managed-attach-detach: true
150CreationTimestamp: Tue, 10 Nov 2020 14:01:28 +0000
151Taints: <none>
152Unschedulable: false
153Lease:
154 HolderIdentity: qemux86-64
155 AcquireTime: <unset>
156 RenewTime: Tue, 10 Nov 2020 14:56:27 +0000
157Conditions:
158 Type Status LastHeartbeatTime LastTransitionTime Reason Message
159 ---- ------ ----------------- ------------------ ------ -------
160 NetworkUnavailable False Tue, 10 Nov 2020 14:43:46 +0000 Tue, 10 Nov 2020 14:43:46 +0000 FlannelIsUp Flannel is running on this node
161 MemoryPressure False Tue, 10 Nov 2020 14:51:48 +0000 Tue, 10 Nov 2020 14:45:46 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
162 DiskPressure False Tue, 10 Nov 2020 14:51:48 +0000 Tue, 10 Nov 2020 14:45:46 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
163 PIDPressure False Tue, 10 Nov 2020 14:51:48 +0000 Tue, 10 Nov 2020 14:45:46 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
164 Ready True Tue, 10 Nov 2020 14:51:48 +0000 Tue, 10 Nov 2020 14:45:46 +0000 KubeletReady kubelet is posting ready status
165Addresses:
166 InternalIP: 10.0.2.15
167 Hostname: qemux86-64
168Capacity:
169 cpu: 1
170 ephemeral-storage: 39748144Ki
171 memory: 2040164Ki
172 pods: 110
173Allocatable:
174 cpu: 1
175 ephemeral-storage: 38666994453
176 memory: 2040164Ki
177 pods: 110
178System Info:
179 Machine ID: 6a4abfacbf83457e9a0cbb5777457c5d
180 System UUID: 6a4abfacbf83457e9a0cbb5777457c5d
181 Boot ID: f5ddf6c8-1abf-4aef-9e29-106488e3c337
182 Kernel Version: 5.8.13-yocto-standard
183 OS Image: Poky (Yocto Project Reference Distro) 3.2+snapshot-20201105 (master)
184 Operating System: linux
185 Architecture: amd64
186 Container Runtime Version: containerd://1.4.1-4-ge44e8ebea.m
187 Kubelet Version: v1.18.9-k3s1
188 Kube-Proxy Version: v1.18.9-k3s1
189PodCIDR: 10.42.0.0/24
190PodCIDRs: 10.42.0.0/24
191ProviderID: k3s://qemux86-64
192Non-terminated Pods: (5 in total)
193 Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
194 --------- ---- ------------ ---------- --------------- ------------- ---
195 kube-system svclb-traefik-jpmnd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 54m
196 kube-system metrics-server-7566d596c8-wh29d 0 (0%) 0 (0%) 0 (0%) 0 (0%) 56m
197 kube-system local-path-provisioner-6d59f47c7-npn4d 0 (0%) 0 (0%) 0 (0%) 0 (0%) 56m
198 kube-system coredns-7944c66d8d-md8hr 100m (10%) 0 (0%) 70Mi (3%) 170Mi (8%) 56m
199 kube-system traefik-758cd5fc85-phjr2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 54m
200Allocated resources:
201 (Total limits may be over 100 percent, i.e., overcommitted.)
202 Resource Requests Limits
203 -------- -------- ------
204 cpu 100m (10%) 0 (0%)
205 memory 70Mi (3%) 170Mi (8%)
206 ephemeral-storage 0 (0%) 0 (0%)
207Events:
208 Type Reason Age From Message
209 ---- ------ ---- ---- -------
210 Normal Starting 56m kube-proxy Starting kube-proxy.
211 Normal Starting 55m kubelet Starting kubelet.
212 Warning InvalidDiskCapacity 55m kubelet invalid capacity 0 on image filesystem
213 Normal NodeHasSufficientPID 55m (x2 over 55m) kubelet Node qemux86-64 status is now: NodeHasSufficientPID
214 Normal NodeHasSufficientMemory 55m (x2 over 55m) kubelet Node qemux86-64 status is now: NodeHasSufficientMemory
215 Normal NodeHasNoDiskPressure 55m (x2 over 55m) kubelet Node qemux86-64 status is now: NodeHasNoDiskPressure
216 Normal NodeAllocatableEnforced 55m kubelet Updated Node Allocatable limit across pods
217 Normal NodeReady 54m kubelet Node qemux86-64 status is now: NodeReady
218 Normal Starting 52m kube-proxy Starting kube-proxy.
219 Normal NodeReady 50m kubelet Node qemux86-64 status is now: NodeReady
220 Normal NodeAllocatableEnforced 50m kubelet Updated Node Allocatable limit across pods
221 Warning Rebooted 50m kubelet Node qemux86-64 has been rebooted, boot id: a4e4d2d8-ddb4-49b8-b0a9-e81d12707113
222 Normal NodeHasSufficientMemory 50m (x2 over 50m) kubelet Node qemux86-64 status is now: NodeHasSufficientMemory
223 Normal Starting 50m kubelet Starting kubelet.
224 Normal NodeHasSufficientPID 50m (x2 over 50m) kubelet Node qemux86-64 status is now: NodeHasSufficientPID
225 Normal NodeHasNoDiskPressure 50m (x2 over 50m) kubelet Node qemux86-64 status is now: NodeHasNoDiskPressure
226 Normal NodeNotReady 17m kubelet Node qemux86-64 status is now: NodeNotReady
227 Warning InvalidDiskCapacity 15m (x2 over 50m) kubelet invalid capacity 0 on image filesystem
228 Normal Starting 12m kube-proxy Starting kube-proxy.
229 Normal Starting 10m kubelet Starting kubelet.
230 Warning InvalidDiskCapacity 10m kubelet invalid capacity 0 on image filesystem
231 Normal NodeAllocatableEnforced 10m kubelet Updated Node Allocatable limit across pods
232 Warning Rebooted 10m kubelet Node qemux86-64 has been rebooted, boot id: f5ddf6c8-1abf-4aef-9e29-106488e3c337
233 Normal NodeHasSufficientMemory 10m (x2 over 10m) kubelet Node qemux86-64 status is now: NodeHasSufficientMemory
234 Normal NodeHasNoDiskPressure 10m (x2 over 10m) kubelet Node qemux86-64 status is now: NodeHasNoDiskPressure
235 Normal NodeHasSufficientPID 10m (x2 over 10m) kubelet Node qemux86-64 status is now: NodeHasSufficientPID
236 Normal NodeReady 10m kubelet Node qemux86-64 status is now: NodeReady
237
238```shell