From eeb28fcfbb693e00df2f5d1fd100dcbb548179fc Mon Sep 17 00:00:00 2001 From: Miruna Paun Date: Fri, 30 Jun 2017 17:35:58 +0200 Subject: Updated remaing content save for benchmarks LXCR-7844 Signed-off-by: Miruna Paun --- .../doc/benchmarks.xml | 2 +- .../doc/container_virtualization.xml | 136 ++++++- .../doc/getting_started.xml | 214 ++++++++++- .../doc/hypervisor_virtualization.xml | 413 +++++++++++++++++++++ 4 files changed, 759 insertions(+), 6 deletions(-) diff --git a/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml b/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml index 0db4fa4..5d6e268 100644 --- a/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml +++ b/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml @@ -1,7 +1,7 @@ - + Benchmarks diff --git a/doc/book-enea-nfv-access-platform-guide/doc/container_virtualization.xml b/doc/book-enea-nfv-access-platform-guide/doc/container_virtualization.xml index 6f74061..c6ce223 100644 --- a/doc/book-enea-nfv-access-platform-guide/doc/container_virtualization.xml +++ b/doc/book-enea-nfv-access-platform-guide/doc/container_virtualization.xml @@ -1,8 +1,138 @@ - + - + Container Virtualization - +
+ Docker + + Docker is an open-source project that automates the deployment of + applications inside software containers, by providing an additional layer + of abstraction and automation of operating-system-level virtualization on + Linux. + + The software container mechanism uses resource isolation features + inside the Linux kernel, such as cgroups and kernel namespaces to allow + multiple containers to run within a single Linux instance, avoiding the + overhead of starting and maintaining virtual machines. + + Containers are lightweight and include everything needed to run + themselves: code, runtime, system tools, system libraries and settings. + The main advantage provided by containers is that the encapsulated + software is isolated from its surroundings. For example, differences + between development and staging environments can be kept separate in order + to reduce conflicts between teams running different software on the same + infrastructure. + + For a better understanding of what Docker is and how it works, the + official documentation provided on the Docker website should be consulted: + https://docs.docker.com/. + +
+ Launching a Docker container + + Docker provides a hello-world container which checks whether your + system is running the daemon correctly. This container can be launched + by simply running: + + >docker run hello-world + +Hello from Docker! + + This message shows that your installation appears to be working + correctly. +
+ +
+ Run an Enea NFV Access Platform guest image + + Enea NFV Access Platform guest images can run inside Docker as any + other container can. Before starting an Enea NFV Access Platform guest + image, a root filesystem has to be imported in Docker: + + >docker import enea-linux-virtualization-guest-x86-64.tar.gz el7guest + + To check that the Docker image has been imported successfully, + run: + + >docker images + + Finally, start an Enea NFV Access Platform container with + bash running as the shell, by running: + + >docker run -it el7guest /bin/bash +
+ +
+ Attach external resources to Docker containers + + Any system resource present on the host machine can be attached or + accessed by a Docker container. + + Typically, if a file or folder on the host machine needs to be + attached to a container, that container should be launched with the + -v parameter. For example, to attach the + roots home folder to a container, the command line + for Docker should have the following format: + + >docker run -it -v /home/root:/home/host_root/ el7guest /bin/bash + + To check that folders have been properly passed from the host to + the container, create a file in the source folder on the host root + filesystem and check for its existence inside the containers destination + location. + +
+ Attach vhost file descriptors + + If OVS is running on the host and vhost file descriptors need to + be passed to the container, this can be done by either mapping the + folder where all the file descriptors are located or mapping the file + descriptor itself: + + + + Mapping the folder can be done as exemplified above: + + docker run -it --rm -v /var/run/openvswitch/:/var/run/openvswitch/ el7guest /bin/bash + + + + Mapping a file descriptor is done in a similar way, but the + -v flag needs to point directly to it: + + >docker run -it --rm -v /var/run/openvswitch/vhost-user1 el7guest /bin/bash + + +
+ +
+ Attach hugepages mount folders + + Hugepages mount folders can also be accessed by a container + similarly to how a plain folder is mapped, as shown in 1.3. + + For example, if the host system has hugepages mounted in the + /mnt/huge location, a container can also access + hugepages by being launched with: + + >docker run -it -v /mnt/huge el7guest /bin/bash +
+ +
+ Access the PCI bus + + If the host machine has multiple SRIOV instances created, a + container can access the instances by being given privileged access to + the host system. Unlike folders, PCI devices do not have to be mounted + explicitly in order to be accessed and will be available to the + container if the --privileged flag is passed to the + command line: + + >docker run --privileged -it el7guest /bin/bash +
+
+
\ No newline at end of file diff --git a/doc/book-enea-nfv-access-platform-guide/doc/getting_started.xml b/doc/book-enea-nfv-access-platform-guide/doc/getting_started.xml index b534e20..aa0dcb5 100644 --- a/doc/book-enea-nfv-access-platform-guide/doc/getting_started.xml +++ b/doc/book-enea-nfv-access-platform-guide/doc/getting_started.xml @@ -120,8 +120,218 @@
- How to use Prebuilt Artifacts + How to use the Prebuilt Artifacts - +
+ Booting Enea NFV Access Platform using RAMDISK + + There may be use cases, especially at first target ramp-up, where + the HDD/SDD has no partitions and you need to prepare the disks for + boot. Booting from ramdisk can help with this task. + + The prerequisites needed to proceed: + + + + Enea Linux ext4 rootfs image - + enea-image-virtualization-host-inteld1521.ext4 + + + + Enea Linux kernel image - bzImage + + + + BIOS has PXE boot enabled + + + + PXE/tftp server configured and connected (ethernet) to + target. + + + + Copy bzImage and enea-image-virtualization-host-inteld1521.ext4.gz + images to the tftpserver configured for PXE boot. + + Use the following as an example for the PXE configuration + file: + + default vesamenu.c32 +prompt 1 +timeout 0 + +label el_ramfs + menu label ^EneaLinux_RAMfs + kernel bzImage + append root=/dev/ram0 initrd=enea-image-virtualization-host-inteld1521.ext4 / + ramdisk_size=1200000 console=ttyS0,115200 eralyprintk=ttyS0,115200 + + Restart the target. Then enter (F11) in the Boot Menu and select + the Ethernet interface used for PXE boot. From the PXE Boot Menu select + Enea Linux_RAMfs. Once the Enea NFV + Access Platform is started you can partition the HDD/SDD and install + GRUB as described in in the following section.s +
+ +
+ Partitioning a new harddisk and installing GRUB + + The prerequisites needed: + + + + grub (grub-efi-bootx64.efi) - availalble as + a pre-built artifact under + inteld1521/images/enea-image-virtualization-host. + + + + e2fsprogs-mke2fs_1.43.4-r0.0_amd64.deb, + dosfstools_4.1-r0.0_amd64.deb - available under + inteld1521/deb. + + + + Proceed using the following steps: + + + + Boot target with Enea NFV Access Platform from RAMDISK + + + + Install prerequisite packages: + + > dpkg -i e2fsprogs-mke2fs_1.43.4-r0.0_amd64.deb +> dpkg -i dosfstools_4.1-r0.0_amd64.deb + + + + Partition the disk: + + > fdisk /dev/sda +fdisk> g {GPT partition type} +fdisk> n +fdisk> 1 +fdisk> {default start part} +fdisk> +512M +fdisk> t +fdisk> 1 {ESP/EFI partition} +fdisk> n +fdisk> 2 +fdisk> {default start part} +fdisk> +18G +fdisk> 3 +fdisk> {default start part} +fdisk> +20G +... +fdisk> 7 +fdisk> {default start part} +fdisk> {default end end part} + +fdisk> p {print partion table} +fdisk> w {write to disk} +fdisk> q + + + + Format the partitions: + + > mkfs.fat -F32 -nEFI /dev/sda1 +> mkfs.ext4 -LROOT /dev/sda2 +> mkfs.ext4 -LROOT /dev/sda3 +> mkfs.ext4 -LROOT /dev/sda4 +> mkfs.ext4 -LROOT /dev/sda5 +> mkfs.ext4 -LROOT /dev/sda6 +> mkfs.ext4 -LROOT /dev/sda7 + + + + Create a GRUB partition: + + > mkdir /mnt/boot +> mount /dev/sda1 /mnt/boot +> mkdir -p /mnt/boot/EFI/boot + +> cp grub-efi-bootx64.efi /mnt/boot/EFI/boot/bootx64.efi +> vi /mnt/boot/EFI/boot/grub.cfg +default=1 + +menuentry "Linux Reference Image" { + linux (hd0,gpt2)/boot/bzImage root=/dev/sda2 ip=dhcp +} + +menuentry "Linux sda3" { + linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp +} + +menuentry "Linux sda4" { + linux (hd0,gpt4)/boot/bzImage root=/dev/sda4 ip=dhcp +} + +menuentry "Linux sda5" { + linux (hd0,gpt5)/boot/bzImage root=/dev/sda5 ip=dhcp +} + +menuentry "Linux sda6" { + linux (hd0,gpt6)/boot/bzImage root=/dev/sda6 ip=dhcp +} + +menuentry "Linux sda7" { + linux (hd0,gpt7)/boot/bzImage root=/dev/sda7 ip=dhcp +} + + +
+ +
+ Installing and booting Enea NFV Access Platform on the + harddisk + + After partitioning the harddisk, boot Enea NFV Access Platform + from RAMFS or from a reference image installed on one of the + partitions. + + To install Enea Linux image on a partition follow these + steps: + + + + Copy your platform image on target: + + server> scp ./enea-image-virtualization-host-inteld1521.tar.gz / +root@<target_ip>:/home/root/ + + + + Extract image onto the desired partition: + + target> mount /dev/sda3 /mnt/sda +target> tar -pzxf /home/root/enea-image-virtualization-host-inteld1521.tar.gz / +-C /mnt/sda + + Alternately, you can do both steps in one command from the + server: + + server> cat ./enea-image-virtualization-host-inteld1521.tar.gz | / +ssh root@<target_ip> "cd /mnt/sda6; tar -zxf -" + + + + Reboot + + + + From the GRUB menu select your partition + + + + + In order to change kernel boot parameters you need to mount the + GRUB partition (i.e. /dev/sda1) and change the + EFI/boot/grub.cfg file. + +
\ No newline at end of file diff --git a/doc/book-enea-nfv-access-platform-guide/doc/hypervisor_virtualization.xml b/doc/book-enea-nfv-access-platform-guide/doc/hypervisor_virtualization.xml index 092b52f..6242de4 100644 --- a/doc/book-enea-nfv-access-platform-guide/doc/hypervisor_virtualization.xml +++ b/doc/book-enea-nfv-access-platform-guide/doc/hypervisor_virtualization.xml @@ -324,5 +324,418 @@ $ testpmd -c 0x7 -- -i --rxq=2 --txq=2 --nb-cores=2 ... + +
+ Booting a KVM Guest + + There are several ways to boot a KVM guest. Here we describe how + to boot using a raw image. A direct kernel boot can be performed by + transferring the guest kernel and the file system files to the host and + specifying a <kernel> and an + <initrd> element inside the + <os> element of the guest XML file, as in the + following example: + + <os> + <kernel>bzImage</kernel> +</os> +<devices> + <disk type='file' device='disk'> + <driver name='qemu' type='raw' cache='none'/> + <source file='enea-image-virtualization-guest-qemux86-64.ext4'/> + <target dev='vda' bus='virtio'/> + </disk> +</devices> +
+ +
+ Starting a Guest + + Command virsh create starts a guest: + + virsh create example-guest-x86.xml + + If further configurations are needed before the guest is reachable + through ssh, a console can be started using command + virsh console. The example below shows how to start a + console where kvm-example-guest is the name of the guest defined in the + guest XML file: + + virsh console kvm-example-guest + + This requires that the guest domain has a console configured in + the guest XML file: + + <os> + <cmdline>console=ttyS0,115200</cmdline> +</os> +<devices> + <console type='pty'> + <target type='serial' port='0'/> + </console> +</devices> +
+ +
+ Isolation + + It may be desirable to isolate execution in a guest, to a specific + guest core. It might also be desirable to run a guest on a specific host + core. + + To pin the virtual CPUs of the guest to specific cores, configure + the <cputune> contents as follows: + + + + First explicitly state on which host core each guest core + shall run, by mapping vcpu to + cpuset in the <vcpupin> + tag. + + + + In the <cputune> tag it is further + possible to specify on which CPU the emulator shall run by adding + the cpuset to the <emulatorpin> tag. + + <vcpu placement='static'>2</vcpu> +<cputune> + <vcpupin vcpu='0' cpuset='2'/> + <vcpupin vcpu='1' cpuset='3'/> + <emulatorpin cpuset="2"/> +</cputune> + + libvirt will group all threads belonging to + a qemu instance into cgroups that will be created for that purpose. + It is possible to supply a base name for those cgroups using the + <resource> tag: + + <resource> + <partition>/rt</partition> +</resource> + + +
+ +
+ Networking using libvirt + + Command virsh net-create starts a network. If + any networks are listed in the guest XML file, those networks must be + started before the guest is started. As an example, if the network is + defined in a file named example-net.xml, it is started as + follows: + + virsh net-create example-net.xml +<network> + <name>sriov</name> + <forward mode='hostdev' managed='yes'> + <pf dev='eno3'/> + </forward> +</network> + + libvirt is a virtualization API that supports + virtual network creation. These networks can be connected to guests and + containers by referencing the network in the guest XML file. It is + possible to have a virtual network persistently running on the host by + starting the network with command virsh net-define + instead of the previously mentioned virsh net-create. + + + An example for the sample network defined in + meta-vt/recipes-example/virt-example/files/example-net.xml: + + virsh net-define example-net.xml + + Command virsh net-autostart enables a + persistent network to start automatically when the libvirt daemon + starts: + + virsh net-autostart example-net + + Guest configuration file (xml) must be updated to access newly + created network like so: + + <interface type='network'> + <source network='sriov'/> + </interface> + + The following presented here are a few modes of network access + from guest using virsh: + + + + vhost-user interface + + See Openvswitch chapter on how to create vhost-user interface + using OpenVSwitch. Currently there is no Open vSwitch support for + networks that are managed by libvirt (e.g. NAT). As of now, only + bridged networks are supported (those where the user has to manually + create the bridge). + + <interface type='vhostuser'> + <mac address='00:00:00:00:00:01'/> + <source type='unix' path='/var/run/openvswitch/vhost-user1' mode='client'/> + <model type='virtio'/> + <driver queues='1'> + <host mrg_rxbuf='off'/> + </driver> + </interface> + + + + PCI passthrough + (SR-IOV) + + KVM hypervisor support for attaching PCI devices on the host + system to guests. PCI passthrough allows guests to have exclusive + access to PCI devices for a range of tasks. PCI passthrough allows + PCI devices to appear and behave as if they were physically attached + to the guest operating system. + + Preparing an Intel system for PCI passthrough is done like + so: + + + + Enable the Intel VT-d extensions in BIOS + + + + Activate Intel VT-d in the kernel by using + intel_iommu=on as a kernel boot + parameter + + + + Allow unsafe interrupts in case the system doesn't support + interrupt remapping. This can be done using + vfio_iommu_type1.allow_unsafe_interrupts=1 as + a boot kernel parameter. + + + + VFs must be created on the host before starting the + guest: + + $ echo 2 > /sys/class/net/eno3/device/sriov_numvfs +$ modprobe vfio_pci +$ dpdk-devbind.py --bind=vfio-pci 0000:03:10.0 + <interface type='hostdev' managed='yes'> + <source> + <address type='pci' domain='0x0' bus='0x03' slot='0x10' function='0x0'/> + </source> + <mac address='52:54:00:6d:90:02'/> + </interface> + + + + Bridge interface + + In case an OVS bridge exists on host, it can be used to + connect the guest: + + <interface type='bridge'> + <mac address='52:54:00:71:b1:b6'/> + <source bridge='ovsbr0'/> + <virtualport type='openvswitch'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> + </interface> + + For further details on the network XML format, see http://libvirt.org/formatnetwork.html. + + +
+ +
+ Libvirt guest configuration examples + +
+ Guest configuration with vhost-user interface + + <domain type='kvm'> + <name>vm_vhost</name> + <uuid>4a9b3f53-fa2a-47f3-a757-dd87720d9d1d</uuid> + <memory unit='KiB'>4194304</memory> + <currentMemory unit='KiB'>4194304</currentMemory> + <memoryBacking> + <hugepages> + <page size='1' unit='G' nodeset='0'/> + </hugepages> + </memoryBacking> + <vcpu placement='static'>2</vcpu> + <cputune> + <shares>4096</shares> + <vcpupin vcpu='0' cpuset='4'/> + <vcpupin vcpu='1' cpuset='5'/> + <emulatorpin cpuset='4,5'/> + </cputune> + <os> + <type arch='x86_64' machine='pc'>hvm</type> + <kernel>/mnt/qemu/bzImage</kernel> + <cmdline>root=/dev/vda console=ttyS0,115200</cmdline> + <boot dev='hd'/> + </os> + <features> + <acpi/> + <apic/> + </features> + <cpu mode='host-model'> + <model fallback='allow'/> + <topology sockets='2' cores='1' threads='1'/> + <numa> + <cell id='0' cpus='0-1' memory='4194304' unit='KiB' memAccess='shared'/> + </numa> + </cpu> + <on_poweroff>destroy</on_poweroff> + <on_reboot>restart</on_reboot> + <on_crash>destroy</on_crash> + <devices> + <emulator>/usr/bin/qemu-system-x86_64</emulator> + <disk type='file' device='disk'> + <driver name='qemu' type='raw' cache='none'/> + <source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/> + <target dev='vda' bus='virtio'/> + </disk> + <interface type='vhostuser'> + <mac address='00:00:00:00:00:01'/> + <source type='unix' path='/var/run/openvswitch/vhost-user1' mode='client'/> + <model type='virtio'/> + <driver queues='1'> + <host mrg_rxbuf='off'/> + </driver> + </interface> + <serial type='pty'> + <target port='0'/> + </serial> + <console type='pty'> + <target type='serial' port='0'/> + </console> + </devices> +</domain> +
+ +
+ Guest configuration with PCI passthrough + + <domain type='kvm'> + <name>vm_sriov1</name> + <uuid>4a9b3f53-fa2a-47f3-a757-dd87720d9d1d</uuid> + <memory unit='KiB'>4194304</memory> + <currentMemory unit='KiB'>4194304</currentMemory> + <memoryBacking> + <hugepages> + <page size='1' unit='G' nodeset='0'/> + </hugepages> + </memoryBacking> + <vcpu>2</vcpu> + <os> + <type arch='x86_64' machine='q35'>hvm</type> + <kernel>/mnt/qemu/bzImage</kernel> + <cmdline>root=/dev/vda console=ttyS0,115200</cmdline> + <boot dev='hd'/> + </os> + <features> + <acpi/> + <apic/> + </features> + <cpu mode='host-model'> + <model fallback='allow'/> + <topology sockets='1' cores='2' threads='1'/> + <numa> + <cell id='0' cpus='0' memory='4194304' unit='KiB' memAccess='shared'/> + </numa> + </cpu> + <on_poweroff>destroy</on_poweroff> + <on_reboot>restart</on_reboot> + <on_crash>destroy</on_crash> + <devices> + <emulator>/usr/bin/qemu-system-x86_64</emulator> + <disk type='file' device='disk'> + <driver name='qemu' type='raw' cache='none'/> + <source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/> + <target dev='vda' bus='virtio'/> + </disk> + <interface type='hostdev' managed='yes'> + <source> + <address type='pci' domain='0x0' bus='0x03' slot='0x10' function='0x0'/> + </source> + <mac address='52:54:00:6d:90:02'/> + </interface> + <serial type='pty'> + <target port='0'/> + </serial> + <console type='pty'> + <target type='serial' port='0'/> + </console> + </devices> +</domain> +
+ +
+ Guest configuration with bridge interface + + <domain type='kvm'> + <name>vm_bridge</name> + <uuid>4a9b3f53-fa2a-47f3-a757-dd87720d9d1d</uuid> + <memory unit='KiB'>4194304</memory> + <currentMemory unit='KiB'>4194304</currentMemory> + <memoryBacking> + <hugepages> + <page size='1' unit='G' nodeset='0'/> + </hugepages> + </memoryBacking> + <vcpu placement='static'>2</vcpu> + <cputune> + <shares>4096</shares> + <vcpupin vcpu='0' cpuset='4'/> + <vcpupin vcpu='1' cpuset='5'/> + <emulatorpin cpuset='4,5'/> + </cputune> + <os> + <type arch='x86_64' machine='q35'>hvm</type> + <kernel>/mnt/qemu/bzImage</kernel> + <cmdline>root=/dev/vda console=ttyS0,115200</cmdline> + <boot dev='hd'/> + </os> + <features> + <acpi/> + <apic/> + </features> + <cpu mode='host-model'> + <model fallback='allow'/> + <topology sockets='2' cores='1' threads='1'/> + <numa> + <cell id='0' cpus='0-1' memory='4194304' unit='KiB' memAccess='shared'/> + </numa> + </cpu> + <on_poweroff>destroy</on_poweroff> + <on_reboot>restart</on_reboot> + <on_crash>destroy</on_crash> + <devices> + <emulator>/usr/bin/qemu-system-x86_64</emulator> + <disk type='file' device='disk'> + <driver name='qemu' type='raw' cache='none'/> + <source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/> + <target dev='vda' bus='virtio'/> + </disk> + <interface type='bridge'> + <mac address='52:54:00:71:b1:b6'/> + <source bridge='ovsbr0'/> + <virtualport type='openvswitch'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> + </interface> + <serial type='pty'> + <target port='0'/> + </serial> + <console type='pty'> + <target type='serial' port='0'/> + </console> + </devices> +</domain> +
+
\ No newline at end of file -- cgit v1.2.3-54-g00ecf