Commit ee21491a authored by 's avatar

first commit

parents
playbooks/*
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
export ANSIBLE_ROLES_PATH="$DIR/roles"
*.retry
.vaultpassword
*.log
inventories/*.gcharbon.yml
Host 192.168.20.*
ProxyCommand ssh -W %h:%p LFR028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host 192.168.100.*
ProxyCommand ssh -W %h:%p LF028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host 192.168.10.*
ProxyCommand ssh -W %h:%p LF028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host LFR028762
Hostname 10.68.150.240
User gcharbon
ControlMaster auto
ControlPath ~/.ssh/ansible-%r@%h:%p
ControlPersist 5m
GSSAPIAuthentication no
MIT License
Copyright (c) 2018 Guillaume Charbonnier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
.DEFAULT_GOAL := prepare_demo
VERSION := 1.0
ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
CUR_DATE := $(shell date +"%s")
PLAYBOOK_FOLDER := playbooks
INVENTORY_FOLDER := inventories
HYPERVISOR_INVENTORY := LFR028762.yml
GCHARBON_INVENTORY := demo.gcharbon.yml
VIRTUAL_INVENTORY := dynamic_inventories/demo_datalab.py
HYPERVISOR_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(HYPERVISOR_INVENTORY)"
VIRTUAL_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(VIRTUAL_INVENTORY)"
GCHARBON_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(GCHARBON_INVENTORY)"
VAULT_ID := --vault-id @prompt
setup_demo:
ANSIBLE_ROLES_PATH=$(ROOT_DIR)/roles ansible-playbook -i $(HYPERVISOR_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_init_virt_setup.yml
demo:
ANSIBLE_ROLES_PATH=$(ROOT_DIR)/roles ansible-playbook -i $(GCHARBON_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_init_virt_setup.yml
update_demo:
ansible-playbook -i $(HYPERVISOR_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_update_virt_setup.yml
provision_demo:
ansible-playbook -i $(VIRTUAL_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_vm_provisioning.yml
# Setup your Python environment with Ansible
Table of Content:
* [Getting started](#getting_started)
* [Available ansible roles](#ansible_roles)
* [Available ansible playbooks](#ansible_playbooks)
* [install_pip.yml](#install_pip)
* [prepare_rh-python36](#demo_nodes)
* [prepare_rh-python36_centos](#centos_nodes)
* [Authentication](#authentication)
* [How to generate new value for `ansible_become_pass` variable](#ansible_become_pass)
* [Providing Ansible vault secrets](#vault_secrets)
## Getting started
<a name="getting_started"></a>
This project uses Ansible to configure remote or local python environment.
You must use `ansible>=2.6`.
You can install it directly with `pip install -r requirements.txt` after cloning the repository.
#### Available ansible roles
<a name="ansible_roles"></a>
This repository is composed of 3 [ansible roles](https://docs.ansible.com/ansible/2.6/user_guide/playbooks_reuse_roles.html):
- **get-pip**: Can be used to install pip and virtualenv
-
- **pip**: Can be use to perform operations with pip
-
- **yum**: Can be used to perform action with yum
#### Available ansible playbooks
<a name="ansible_playbooks"></a>
Three [major playbooks](https://docs.ansible.com/ansible/devel/user_guide/playbooks.html) are available for direct usage:
> Note: You must be inside the playbooks directory to run example commands.
- _**install_pip.yml**_:
<a name="install_pip"></a>
This playbook install `pip` and `virtualenv` with `ansible_python_interpreter` by default. Any other interpreter can be used if `python_interpreter` variable is defined.
###### Workflow:
1) Check if pip is installed
If it is not instaled:
1.1) Download get-pip installer
1.2) Execute get-pip installer
1.3) Remove get-pip installer
2) Install virtualenv if it is not installed yet
###### Example usage:
- `python_interpreter` is not defined in inventory neither in any variable.
Default interpreter (`ansible_python_interpreter`) is used:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
install_pip.yml
```
- `python_interpreter` is set to `/opt/rh/rh-python36/root/bin/python`
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
--extra-vars "python_interpreter=/opt/rh/rh-python36/root/bin/python"
install_pip.yml
```
- _**prepare_rh-python36.yml**_
<a name="demo_nodes"></a>
This playbook assume that rhel repositories has already been enabled and `rh-pyhton36` package is available for download. It installs `rh-python36` and dependencies based on requirements file.
> Note: The playbook installs `rh-python36` because `ansible_python_interpreter` is set to `rh_python_interpreter` and `rh_python_interpreter` is set to `rh-python36` in group demo_nodes in inventory. You can change it to anyother package.
> Warning: If you want to install a package non managed by scl, set `scn_enable_python` to `false`.
###### Workflow:
1) Ensure `rh-pyhton-36` is installed
2) Optionally add line in .bashrc to enable rh-python36 with scl at startup
3) Copy python requirements to rmeote host
4) Install python requirements with pip from `rh-python36`
###### Example usages:
- `rh-python-interpreter` is set to `rh-python36` by default
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
prepare_rh-python36.yml
```
- Install another version of Python:
```
ansible-playbook -i inventories/test_centos.yml \
--ask-vault-pass \
--extra-vars "rh_python_package=rh-python35 python_interpreter=/opt/rh/rh-python35/root/bin/python" \
prepare_rh-python36.yml
```
- Install with another requirements file:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
--extra-vars "pip_requirements_path=~/some_folder/requirements.txt"
prepare_rh-python36.yml
```
- _**prepare_rh-python36_centos.yml**_:
<a name="centos_nodes"></a>
This playbook enables `centos-sclo-rh-testing` repository and
download `rh-python36` before installing python dependencies with this
interpreter.
###### Playbook workflow:
1) Ensure centos-release-scl is installed
2) Enable centos-sclo-rh-testing repository
3) Install `rh-python36`
2) Add line to `.bashrc` to enable rh-python36 by default at startup
3) Copy python requirements to remote host
4) Install python requirements
It ban be used the same wah `prepare_rh-python36` is used.
## Authentication
<a name="authentification"></a>
- Logging to remote host is realized with SSH Key-Based Authentication.
> Use `ssh-copy-id <ansible_user@ansible_remote_host>` to ensure your own ssh key is authorized by remote agent.
- [Privilege escalation method](https://docs.ansible.com/ansible/latest/user_guide/become.html) used in playbook can be configured with `ansible_become_method` variable.
Default value is `su`. Password of root user is expected to be present as vault encrypted variable named `ansible_become_pass`. Is can be accessed as any other vault secrets once the vault password is given to playbooks.
> List of available values: https://docs.ansible.com/ansible/latest/user_guide/become.html#command-line-options
## How to generate new value for `ansible_become_pass` variable ?
<a name="ansible_become_pass"></a>
- Run the following command
```
ansible-vault encrypt_string "PASSWORD"
```
- Before returning the encrypted string it will ask you for a pasword (you will provide this password at runtime to decrypt secret). Store it into a file like the following:
You shoud get something like:
```
ansible_become_pass: !vault |
$ANSIBLE_VAULT;1.1;AES256
34306464383862303338666336306239306335393366656136313362643334383264326530333136
3831326639343639643063643664666331356236346239640a346531326465333330363761373831
61353139323635333461313732386538366361326163613865333462353161623039356433643032
3962303266363532330a616432653534333431363938386531373864616635393462356337336334
3834
```
> See [official documentation](https://docs.ansible.com/ansible/2.4/vault.html#use-encrypt-string-to-create-encrypted-variables-to-embed-in-yaml) for more information.
## Providing vault secrets
<a name="vault_secrets"></a>
You can choose several options to [provide vault password](https://docs.ansible.com/ansible/2.4/vault.html#providing-vault-passwords) to playbooks at runtime:
- Using `--vault-id @prompt`.
Example:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
prepare_rh-python36.yml
```
- Using a file or an executable :
Examples:
- Assuming `get_vault_id.py` is an existing python script:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id get-vault-password.py \
prepare_rh-python36.yml
```
- Assuming `.vaultpassword` is an existing file:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id .vaultpassword \
prepare_rh-python36.yml
```
[defaults]
roles_path = $ANSIBLE_ROLES_PATH
[ssh_connection]
ssh_args = -F .ssh/ssh.cfg -o ControlMaster=auto -o ControlPersist=30m
control_path = ~/.ssh/ansible-%%r@%%h:%%p
version: '3.2'
services:
agent:
image: portainer/agent
environment:
# REQUIRED: Should be equal to the service name prefixed by "tasks." when
# deployed inside an overlay network
AGENT_CLUSTER_ADDR: tasks.agent
# AGENT_PORT: 9001
# LOG_LEVEL: debug
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- agent_network
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
portainer:
image: portainer/portainer
command: -H tcp://tasks.agent:9001 --tlsskipverify
ports:
- "9000:9000"
volumes:
- portainer_data:/data
networks:
- agent_network
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == manager]
networks:
agent_network:
driver: overlay
attachable: true
volumes:
portainer_data:
#!/usr/bin/env python
from __future__ import print_function
import getpass
if __name__ == "__main__":
pswd = getpass.getpass('Vault Password:')
print(pswd)
---
all:
vars:
ansible_user: root
ansible_python_interpreter: /usr/bin/python
children:
centos_hosts:
hosts:
LFR028762:
ansible_host: 10.68.150.240
datalab:
hosts:
LFR028762:
hypervisors:
hosts:
LFR028762:
---
all:
vars:
ansible_user: root
children:
centos_hosts:
hosts:
demo-centos-01:
ansible_host: 159.65.207.209
local_ipv4: 10.133.55.212
demo-centos-02:
ansible_host: 188.166.122.69
local_ipv4: 10.133.49.170
demo-centos-03:
ansible_host: 174.138.11.9
local_ipv4: 10.133.53.14
debian_hosts:
hosts:
demo-ubuntu-01:
ansible_host: 159.65.196.145
local_ipv4: 10.133.67.137
demo-ubuntu-02:
ansible_host: 188.166.45.196
local_ipv4: 10.133.41.182
demo-ubuntu-03:
ansible_host: 174.138.13.10
local_ipv4: 10.133.48.63
datalab:
hosts:
demo-centos-01:
demo-centos-02:
demo-centos-03:
demo-ubuntu-01:
demo-ubuntu-02:
demo-ubuntu-03:
docker:
hosts:
demo-centos-01:
demo-centos-02:
demo-centos-03:
demo-ubuntu-01:
demo-ubuntu-02:
demo-ubuntu-03:
swarm_managers:
hosts:
demo-centos-01:
demo-ubuntu-01:
swarm_workers:
hosts:
demo-centos-02:
demo-centos-03:
demo-ubuntu-02:
demo-ubuntu-03: # hypervisors:
# hosts:
# # demo_node-01:
# # demo_node-02:
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
LIBVIRT_URI="qemu+tcp://root@10.68.150.240:16509/system"
VIRT_NETWORK="demo_datalab"
COMMON_GROUPS="demo,virtual_machines,centos_hosts,docker"
INVENTORY_SCRIPT_RELATIVE_PATH="scripts/libvirt_inventory.py"
cmd = "python {0} --libvirt_uri '{1}' --virt_network {2} --common_groups {3}"
inventory_script = os.path.join(dir_path,
INVENTORY_SCRIPT_RELATIVE_PATH)
cmd = cmd.format(inventory_script,
LIBVIRT_URI,
VIRT_NETWORK,
COMMON_GROUPS)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output.decode())
exit(0)
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
inventory_script = dir_path + "/scripts/libvirt_inventory.py"
cmd = 'python {0}'.format(inventory_script)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output)
exit(0)
#!/usr/bin/env python
from __future__ import print_function
import argparse
import libvirt
import json
import logging
logger = logging.getLogger(__name__)
LIBVIRT_DEFAULT_URI = 'qemu:///system'
LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'
DEFAULT_SPLIT_HOSTNAME = '_'
DEFAULT_SPLIT_GROUP_POS = 1
DEFAULT_COMMON_GROUP = 'docker_hosts,centos_hosts,virtual_machines'
class LibvirtInventory(object):
def __init__(self,
target_network=LIBVIRT_DEFAULT_NETWORK,
uri=LIBVIRT_DEFAULT_URI,
group_split=DEFAULT_SPLIT_HOSTNAME,
group_pos=DEFAULT_SPLIT_GROUP_POS,
additional_group=DEFAULT_COMMON_GROUP):
self.uri = uri.replace("'","").replace('"','')
self.open_libvirt_connection(self.uri)
self.parser = {
"split": group_split,
"pos": group_pos,
}
self.common_groups = additional_group.split(",")
self.set_target_network(target_network)
self.inventory = {}
self.set_all_hosts_and_meta()
self.add_all_hosts_to_groups()
self.add_all_common_groups()
self.close_libvirt_connection()
def open_libvirt_connection(self, uri):
self.conn = libvirt.openReadOnly(uri)
if self.conn == None:
logger.error('Failed to open connection to {0}'.format(uri))
exit(1)
else:
logger.info("Connected to libvirt on remote host: {0}".format(uri))
def close_libvirt_connection(self):
self.conn.close()
def set_target_network(self, target_network):
self._networks = {network.name(): network for network in self.conn.listAllNetworks()}
try:
self._network = self._networks[target_network]
except KeyError:
available_networks = ", ".join(self._networks.keys())
raise ValueError("Target network ({0}) does not exist. Available networks: {1}".format(target_network, available_networks))
@property
def network(self):
return self._network
def set_all_hosts_and_meta(self):
self._hosts = { lease["hostname"]: { "ansible_host": lease["ipaddr"] } for lease in self.network.DHCPLeases() }
self.add_group("all")
self.inventory["all"]["hosts"] = self.hosts = list(self._hosts.keys())
self.inventory["_meta"]= {"hostvars": self._hosts}
def add_group(self, group):
""" Add a group to inventory """
if group not in self.inventory:
self.inventory[group] = dict()
def get_group(self, group):
""" Return a group as dictionary from inventory """
return self.inventory[group]
def add_child(self, child, group):
""" Add a child group (string) to group in inventory"""
if not "children" in self.get_group(group):
self.inventory[group]["children"] = [child]
else:
self.inventory[group]["children"] = list(set([child] + self.inventory[group]["children"]))
def add_childgroup(self, child_group, parent_group="all"):
""" Add a group and mark it as child of another group in inventory """
self.add_group(child_group)
self.add_child(child_group, parent_group)
def add_children(self, children, group):
""" Add list of children to group in inventory"""
if not "children" in self.get_group(group):
self.inventory[group]["children"] = list(children)
else:
self.inventory[group]["children"] = list(set(children + self.inventory[group]["children"]))
def add_groupvars(self, vars_dict, group):
""" Takes a dictionary as argument and add the keys and values as group variables"""
if "vars" not in self.get_group(group):
self.inventory[group]["vars"] = vars_dict
else:
self.inventory[group]["vars"].update(vars_dict)
def add_host(self, hostname, group, create=True):
""" Add one host (string) to a group.
If create is True (default) then the group will be automatically created if it does not exist yet.
Else it will fail on KeyError
"""
if create:
self.add_group(group)
if "hosts" not in self.inventory[group]:
self.inventory[group]["hosts"] = [hostname]
else:
self.inventory[group]["hosts"] = list(set([hostname] + self.inventory[group]["hosts"]))
def add_hosts(self, hostnames, group, create=True):
""" Add several hosts (list) to a group.
If create is True (default) then the group will be automatically created if it does not exist yet.
Else it will fail on KeyError
"""
if create:
self.add_group(group)
self.inventory[group]["hosts"] = list(set(hostnames + self.inventory[group]["hosts"]))
def add_all_hosts_to_groups(self):
for hostname in self.hosts:
prefix, group, last_ip_part = hostname.split(self.parser["split"])
self.add_host(hostname, group)
def add_all_common_groups(self):
for group in self.common_groups:
self.add_common_group(group)
def add_common_group(self, common_group):
self.add_childgroup(common_group)
for group in self.inventory:
if group in ["_meta", "all"] + self.common_groups:
continue
self.add_child(group, common_group)
def json_dump(self, indent=None):
return json.dumps(self.inventory, indent=indent)
def dump_inventory(libvirt_uri, virt_network, split_hostname, group_pos, common_groups):
inventory = LibvirtInventory(target_network=virt_network,
uri=libvirt_uri,
group_split=split_hostname,
group_pos=group_pos,
additional_group=common_groups)
return inventory.json_dump(2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--libvirt_uri",
"-l",
default=LIBVIRT_DEFAULT_URI,
type=str,
required=False,
help="""URI that python should use to connect to libvirt daemon.
Default to '{0}'.
Can use several protocols:
- qemu:/// for local socket connections
- qemu+tcp:// for raw tcp connections
- qemu+tls:// for tcp connections over tls encryption
More informations available at https://libvirt.org/remote.html
""".format(LIBVIRT_DEFAULT_URI))
parser.add_argument("--virt_network",
"-n",
default=LIBVIRT_DEFAULT_NETWORK,
type=str,
required=False,
help="""Virtual network from which we want to determine running guests.
This script can only list virtual machines located in same virtual network.
Default to '{0}'.
""".format(LIBVIRT_DEFAULT_NETWORK))
parser.add_argument("--split_hostname",
default=DEFAULT_SPLIT_HOSTNAME,
required=False,
help="""Which pattern should be used to split the found hostnames.
Default to '{0}'.
Hostnames are split in order to retrieve group they belong to.
This option must be used with `split_group_pos` option.
""".format(DEFAULT_SPLIT_HOSTNAME)
)
parser.add_argument("--split_group_pos",
default=DEFAULT_SPLIT_GROUP_POS,
required=False,
type=int,
help="""After splitting hostname, what is the index of the group name.
Default to '{0}'.
This option must be used with `split_hostname` option.
""".format(DEFAULT_SPLIT_GROUP_POS)
)
parser.add_argument("--common_groups",
"-g",
default=DEFAULT_COMMON_GROUP,
required=False,
type=str,
help="""Comma delimited list of group names to which all guests will belong.
Default to '{0}'.
This option must be used with `split_hostname` option.
""".format(DEFAULT_COMMON_GROUP)
)
args = parser.parse_args()
json_inv = dump_inventory(args.libvirt_uri,
args.virt_network,
args.split_hostname,
args.split_group_pos,
args.common_groups)
print(json_inv)
if __name__ == "__main__":
main()
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
LIBVIRT_URI="qemu+tcp://root@10.68.150.240:16509/system"
VIRT_NETWORK="testing_datalab"
COMMON_GROUPS="testing,datalab,virtual_machines,centos_hosts,docker"
INVENTORY_SCRIPT_RELATIVE_PATH="scripts/libvirt_inventory.py"
cmd = "python {0} --libvirt_uri '{1}' --virt_network {2} --common_groups {3}"
inventory_script = os.path.join(dir_path,
INVENTORY_SCRIPT_RELATIVE_PATH)
cmd = cmd.format(inventory_script,
LIBVIRT_URI,
VIRT_NETWORK,
COMMON_GROUPS)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output)
exit(0)
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"LIBVIRT_DEFAULT_URI = 'qemu+tls://root@10.68.150.240/system'\n",
"# Default to: libvirt_uri = 'qemu:///system'\n",
"\n",
"LIBVIRT_DEFAULT_NETWORK = \"testing_datalab\"\n",
"# Default ot virt_network = \"testing_datalab\"\n",
"\n",
"DEFAULT_SPLIT_HOSTNAME = \"-\"\n",
"# Default to split_hostname = \"-\"\n",
"\n",
"DEFAULT_SPLIT_GROUP_POS = 1\n",
"# Default to group_pos = 1\n",
"\n",
"DEFAULT_COMMON_GROUP = \"docker_hosts,centos_hosts,virtual_machines\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"\n",
"import libvirt\n",
"import sys\n",
"import json \n",
"\n",
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"class LibvirtInventory(object):\n",
" \n",
" def __init__(self,\n",
" target_network=LIBVIRT_DEFAULT_NETWORK,\n",
" uri=LIBVIRT_DEFAULT_URI,\n",
" group_split=DEFAULT_SPLIT_HOSTNAME,\n",
" group_pos=DEFAULT_SPLIT_GROUP_POS,\n",
" additional_group=DEFAULT_COMMON_GROUP):\n",
" \n",
" self.open_libvirt_connection(uri)\n",
" self.parser = { \n",
" \"split\": group_split,\n",
" \"pos\": group_pos,\n",
" }\n",
" self.common_groups = additional_group.split(\",\")\n",
" self.set_target_network(target_network)\n",
" self.inventory = {}\n",
" self.set_all_hosts_and_meta()\n",
" self.add_all_hosts_to_groups()\n",
" self.add_all_common_groups()\n",
" self.close_libvirt_connection()\n",
"\n",
" \n",
" def open_libvirt_connection(self, uri):\n",
" self.conn = libvirt.openReadOnly(uri)\n",
"\n",
" if self.conn == None:\n",
" logger.error('Failed to open connection to {0}'.format(uri))\n",
" exit(1)\n",
" else:\n",
" logger.info(\"Connected to libvirt on remote host: {0}\".format(uri))\n",
"\n",
" def close_libvirt_connection(self):\n",
" self.conn.close() \n",
"\n",
"\n",
" def set_target_network(self, target_network):\n",
" self._networks = {network.name(): network for network in self.conn.listAllNetworks()}\n",
" try:\n",
" self._network = self._networks[target_network]\n",
" except KeyError:\n",
" available_networks = \", \".join(self._networks.keys())\n",
" raise ValueError(\"Target network ({0}) does not exist. Available networks: {1}\".format(target_network, available_networks))\n",
"\n",
" \n",
" @property\n",
" def network(self):\n",
" return self._network\n",
" \n",
" def set_all_hosts_and_meta(self):\n",
" self._hosts = { lease[\"hostname\"]: { \"ansible_host\": lease[\"ipaddr\"] } for lease in self.network.DHCPLeases() }\n",
" \n",
" self.add_group(\"all\")\n",
" \n",
" self.inventory[\"all\"][\"hosts\"] = self.hosts = list(self._hosts.keys())\n",
" \n",
" self.inventory[\"_meta\"]= {\"hostvars\": self._hosts}\n",
" \n",
" def add_group(self, group):\n",
" \"\"\" Add a group to inventory \"\"\"\n",
" if group not in self.inventory:\n",
" self.inventory[group] = dict()\n",
" \n",
" def get_group(self, group):\n",
" \"\"\" Return a group as dictionary from inventory \"\"\"\n",
" return self.inventory[group]\n",
" \n",
" \n",
" def add_child(self, child, group):\n",
" \"\"\" Add a child group (string) to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = [child]\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set([child] + self.inventory[group][\"children\"]))\n",
" \n",
" \n",
" def add_childgroup(self, child_group, parent_group=\"all\"):\n",
" \"\"\" Add a group and mark it as child of another group in inventory \"\"\"\n",
" self.add_group(child_group)\n",
" self.add_child(child_group, parent_group)\n",
" \n",
" \n",
" def add_children(self, children, group):\n",
" \"\"\" Add list of children to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = list(children)\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set(children + self.inventory[group][\"children\"]))\n",
" \n",
"\n",
" def add_groupvars(self, vars_dict, group):\n",
" \"\"\" Takes a dictionary as argument and add the keys and values as group variables\"\"\"\n",
" if \"vars\" not in self.get_group(group):\n",
" self.inventory[group][\"vars\"] = vars_dict\n",
" else:\n",
" self.inventory[group][\"vars\"].update(vars_dict)\n",
" \n",
" \n",
" def add_host(self, hostname, group, create=True):\n",
" \"\"\" Add one host (string) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" \n",
" if \"hosts\" not in self.inventory[group]:\n",
" self.inventory[group][\"hosts\"] = [hostname]\n",
" else:\n",
" self.inventory[group][\"hosts\"] = list(set([hostname] + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_hosts(self, hostnames, group, create=True):\n",
" \"\"\" Add several hosts (list) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" self.inventory[group][\"hosts\"] = list(set(hostnames + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_all_hosts_to_groups(self):\n",
" for hostname in self.hosts:\n",
" prefix, group, last_ip_part = hostname.split(self.parser[\"split\"])\n",
" self.add_host(hostname, group)\n",
" \n",
" def add_all_common_groups(self):\n",
" for group in self.common_groups:\n",
" self.add_common_group(group)\n",
"\n",
" def add_common_group(self, common_group):\n",
" self.add_childgroup(common_group)\n",
" for group in self.inventory:\n",
" if group in [\"_meta\", \"all\"] + self.common_groups:\n",
" continue\n",
" self.add_child(group, common_group) \n",
" \n",
" def json_dump(self, indent=None):\n",
" return json.dumps(self.inventory, indent=indent)\n",
" \n",
" \n",
"def dump_inventory(libvirt_uri, virt_network, split_hostname, group_pos, common_groups):\n",
" \n",
" inventory = LibvirtInventory(target_network=virt_network,\n",
" uri=libvirt_uri,\n",
" group_split=split_hostname,\n",
" group_pos=group_pos,\n",
" additional_group=common_groups)\n",
" \n",
" return inventory.json_dump(2)"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"ename": "libvirtError",
"evalue": "Unable to read TLS confirmation: Input/output error",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mlibvirtError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-27-25513edafff5>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtest\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLibvirtInventory\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-3-e9e07bdc4230>\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, target_network, uri, group_split, group_pos, additional_group)\u001b[0m\n\u001b[1;32m 8\u001b[0m additional_group=DEFAULT_COMMON_GROUP):\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen_libvirt_connection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muri\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 11\u001b[0m self.parser = { \n\u001b[1;32m 12\u001b[0m \u001b[0;34m\"split\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mgroup_split\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-e9e07bdc4230>\u001b[0m in \u001b[0;36mopen_libvirt_connection\u001b[0;34m(self, uri)\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mopen_libvirt_connection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muri\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 25\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlibvirt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopenReadOnly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muri\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 26\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconn\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/lib/python3/dist-packages/libvirt.py\u001b[0m in \u001b[0;36mopenReadOnly\u001b[0;34m(name)\u001b[0m\n\u001b[1;32m 266\u001b[0m URIs are documented at http://libvirt.org/uri.html \"\"\"\n\u001b[1;32m 267\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlibvirtmod\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvirConnectOpenReadOnly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 268\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;32mraise\u001b[0m \u001b[0mlibvirtError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'virConnectOpenReadOnly() failed'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 269\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mvirConnect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_obj\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 270\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mlibvirtError\u001b[0m: Unable to read TLS confirmation: Input/output error"
]
}
],
"source": [
"test = LibvirtInventory()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T08:58:06.587633Z",
"start_time": "2018-10-23T08:58:06.583670Z"
}
},
"source": [
"# Ansible: \n",
"\n",
"## Dynamic provisioning with libvirt \n",
"\n",
"Ansible allows you to execute a python script to generate inventory at runtime. \n",
"\n",
"This would be really useful when creating virtual machines prior to running playbooks. We do not always knwow the name of virtual machines, or their IP adress.\n",
"\n",
"The expected JSON output should use a similar schema:\n",
"\n",
"```json\n",
"{\n",
" \"virtual_machines\": {\n",
" \"hosts\": [\"test-manager-101\", \"test-kairos-221\"],\n",
" \"vars\": {\n",
" \"ansible_python_interpreter\": \"/usr/bin/python\",\n",
" \"ansible_user\": \"gcharbon\"\n",
" },\n",
" \"children\": [\"managers\",\"kairosdb\",\"centos-hosts\"]\n",
" },\n",
" \"centos-hosts\": {\n",
" \"hosts\": [\"test-kairos-221\"]\n",
" },\n",
" \"kairosdb\": {\n",
" \"hosts\": [\"test-kairos-221\"],\n",
" \"vars\": {\n",
" \"services\": [\"kairosdb\",\"cassandra\"],\n",
" \"data_directory\": \"/data\"\n",
" }\n",
" },\n",
" \"managers\": {\n",
" \"hosts\": [\"test-manager-101\"],\n",
" \"vars\": {\n",
" \"services\": [\"nginx-proxy\",\"docker-gen\",\"nginx-letsencrypt\", \"portainer\"],\n",
" \"domain\": \"datalab.integmonsoon.com\"\n",
" }\n",
" },\n",
" \"_meta\": {\n",
" \"hostvars\": {\n",
" \"test-manager-101\": {\n",
" \"ansible_host\": \"1192.168.100.101\"\n",
" } ,\n",
" \"test-kairos-221\": {\n",
" \"ansible_host\": \"192.168.100.201\"\n",
" }\n",
" }\n",
" }\n",
"}\n",
"```\n",
"\n",
"\n",
"Below are a list of parameters that can be given at runtime"
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T09:01:00.850127Z",
"start_time": "2018-10-23T09:01:00.841123Z"
}
},
"outputs": [],
"source": [
"libvirt_uri = 'qemu+tcp://root@10.68.150.240/system'\n",
"# Default to: libvirt_uri = 'qemu:///system'\n",
"\n",
"virt_network = \"testing_datalab_network\"\n",
"# Default ot virt_network = \"testing_datalab\"\n",
"\n",
"split_hostname = \"-\"\n",
"# Default to split_hostname = \"-\"\n",
"\n",
"group_pos = 1\n",
"# Default to group_pos = 1\n",
"\n",
"\n",
"common_group = \"docker-hosts,centos-hosts,virtual_machines\"\n",
"# Default to \"docker-hosts\""
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {},
"outputs": [],
"source": [
"arg_values = [libvirt_uri, virt_network, split_hostname, group_pos, common_group]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"A few imports are needed to work with `libvirt` on remote system and JSON serialization.\n",
"\n",
"We want our code to be compatible between python2 and python3. \n",
"\n",
"We also want to use a logger if it exists"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T09:02:25.207310Z",
"start_time": "2018-10-23T09:02:24.734285Z"
}
},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"\n",
"import libvirt\n",
"import sys\n",
"import json \n",
"\n",
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### We can define the `LibvirtInventory` class"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"LIBVIRT_DEFAULT_URI = 'qemu:///system'\n",
"\n",
"LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'\n",
"\n",
"DEFAULT_SPLIT_HOSTNAME = '-'\n",
"\n",
"DEFAULT_SPLIT_GROUP_POS = 1\n",
"\n",
"DEFAULT_COMMON_GROUP = 'docker-hosts'\n",
"\n",
"\n",
"class LibvirtInventory(object):\n",
" \n",
" def __init__(self,\n",
" target_network=LIBVIRT_DEFAULT_NETWORK,\n",
" uri=LIBVIRT_DEFAULT_URI,\n",
" group_split=DEFAULT_SPLIT_HOSTNAME,\n",
" group_pos=DEFAULT_SPLIT_GROUP_POS,\n",
" additional_group=DEFAULT_COMMON_GROUP):\n",
" \n",
" self.open_libvirt_connection(uri)\n",
" self.parser = { \n",
" \"split\": group_split,\n",
" \"pos\": group_pos,\n",
" }\n",
" self.common_groups = additional_group.split(\",\")\n",
" self.set_target_network(target_network)\n",
" self.inventory = {}\n",
" self.set_all_hosts_and_meta()\n",
" self.add_all_hosts_to_groups()\n",
" self.add_all_common_groups()\n",
" self.close_libvirt_connection()\n",
"\n",
" \n",
" def open_libvirt_connection(self, uri):\n",
" self.conn = libvirt.openReadOnly(uri)\n",
"\n",
" if self.conn == None:\n",
" logger.error('Failed to open connection to {0}'.format(uri))\n",
" exit(1)\n",
" else:\n",
" logger.info(\"Connected to libvirt on remote host: {0}\".format(uri))\n",
"\n",
" def close_libvirt_connection(self):\n",
" self.conn.close() \n",
"\n",
"\n",
" def set_target_network(self, target_network):\n",
" self._networks = {network.name(): network for network in self.conn.listAllNetworks()}\n",
" try:\n",
" self._network = self._networks[target_network]\n",
" except KeyError:\n",
" available_networks = \", \".join(self._networks.keys())\n",
" raise ValueError(\"Target network ({0}) does not exist. Available networks: {1}\".format(target_network, available_networks))\n",
"\n",
" \n",
" @property\n",
" def network(self):\n",
" return self._network\n",
" \n",
" def set_all_hosts_and_meta(self):\n",
" self._hosts = { lease[\"hostname\"]: { \"ansible_host\": lease[\"ipaddr\"] } for lease in self.network.DHCPLeases() }\n",
" \n",
" self.add_group(\"all\")\n",
" \n",
" self.inventory[\"all\"][\"hosts\"] = self.hosts = list(self._hosts.keys())\n",
" \n",
" self.inventory[\"_meta\"]= {\"hostvars\": self._hosts}\n",
" \n",
" def add_group(self, group):\n",
" \"\"\" Add a group to inventory \"\"\"\n",
" if group not in self.inventory:\n",
" self.inventory[group] = dict()\n",
" \n",
" def get_group(self, group):\n",
" \"\"\" Return a group as dictionary from inventory \"\"\"\n",
" return self.inventory[group]\n",
" \n",
" \n",
" def add_child(self, child, group):\n",
" \"\"\" Add a child group (string) to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = [child]\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set([child] + self.inventory[group][\"children\"]))\n",
" \n",
" \n",
" def add_childgroup(self, child_group, parent_group=\"all\"):\n",
" \"\"\" Add a group and mark it as child of another group in inventory \"\"\"\n",
" self.add_group(child_group)\n",
" self.add_child(child_group, parent_group)\n",
" \n",
" \n",
" def add_children(self, children, group):\n",
" \"\"\" Add list of children to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = list(children)\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set(children + self.inventory[group][\"children\"]))\n",
" \n",
"\n",
" def add_groupvars(self, vars_dict, group):\n",
" \"\"\" Takes a dictionary as argument and add the keys and values as group variables\"\"\"\n",
" if \"vars\" not in self.get_group(group):\n",
" self.inventory[group][\"vars\"] = vars_dict\n",
" else:\n",
" self.inventory[group][\"vars\"].update(vars_dict)\n",
" \n",
" \n",
" def add_host(self, hostname, group, create=True):\n",
" \"\"\" Add one host (string) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" \n",
" if \"hosts\" not in self.inventory[group]:\n",
" self.inventory[group][\"hosts\"] = [hostname]\n",
" else:\n",
" self.inventory[group][\"hosts\"] = list(set([hostname] + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_hosts(self, hostnames, group, create=True):\n",
" \"\"\" Add several hosts (list) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" self.inventory[group][\"hosts\"] = list(set(hostnames + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_all_hosts_to_groups(self):\n",
" for hostname in self.hosts:\n",
" prefix, group, last_ip_part = hostname.split(self.parser[\"split\"])\n",
" self.add_host(hostname, group)\n",
" \n",
" def add_all_common_groups(self):\n",
" for group in self.common_groups:\n",
" self.add_common_group(group)\n",
"\n",
" def add_common_group(self, common_group):\n",
" self.add_childgroup(common_group)\n",
" for group in self.inventory:\n",
" if group in [\"_meta\", \"all\"] + self.common_groups:\n",
" continue\n",
" self.add_child(group, common_group) \n",
" \n",
" def json_dump(self, indent=None):\n",
" return json.dumps(self.inventory, indent=indent)\n",
" \n",
" \n",
"def dump_inventory(libvirt_uri, virt_network, split_hostname, group_pos, common_groups):\n",
" \n",
" inventory = LibvirtInventory(target_network=virt_network,\n",
" uri=libvirt_uri,\n",
" group_split=split_hostname,\n",
" group_pos=group_pos,\n",
" additional_group=common_groups)\n",
" \n",
" return inventory.json_dump(2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's test it"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [],
"source": [
"inventory = dump_inventory(*arg_values)"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"_meta\": {\n",
" \"hostvars\": {\n",
" \"test-kairos-222\": {\n",
" \"ansible_host\": \"192.168.200.222\"\n",
" },\n",
" \"test-grafic-231\": {\n",
" \"ansible_host\": \"192.168.200.231\"\n",
" },\n",
" \"test-kairos-221\": {\n",
" \"ansible_host\": \"192.168.200.221\"\n",
" }\n",
" }\n",
" },\n",
" \"kairos\": {\n",
" \"hosts\": [\n",
" \"test-kairos-222\",\n",
" \"test-kairos-221\"\n",
" ]\n",
" },\n",
" \"centos-hosts\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"grafic\": {\n",
" \"hosts\": [\n",
" \"test-grafic-231\"\n",
" ]\n",
" },\n",
" \"virtual_machines\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"docker-hosts\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"all\": {\n",
" \"children\": [\n",
" \"centos-hosts\",\n",
" \"docker-hosts\",\n",
" \"virtual_machines\"\n",
" ],\n",
" \"hosts\": [\n",
" \"test-kairos-222\",\n",
" \"test-grafic-231\",\n",
" \"test-kairos-221\"\n",
" ]\n",
" }\n",
"}\n"
]
}
],
"source": [
"print(inventory)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### And finally define a small script"
]
},
{
"cell_type": "code",
"execution_count": 167,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"usage: ipykernel_launcher.py [-h] [--libvirt_uri LIBVIRT_URI]\n",
" [--virt_network VIRT_NETWORK]\n",
" [--split_hostname SPLIT_HOSTNAME]\n",
" [--split_group_pos SPLIT_GROUP_POS]\n",
" [--common-groups COMMON_GROUPS]\n",
"ipykernel_launcher.py: error: unrecognized arguments: -f /home/gcharbon/.local/share/jupyter/runtime/kernel-053cf074-f981-497d-b5fc-9775b16ce427.json\n"
]
},
{
"ename": "SystemExit",
"evalue": "2",
"output_type": "error",
"traceback": [
"An exception has occurred, use %tb to see the full traceback.\n",
"\u001b[0;31mSystemExit\u001b[0m\u001b[0;31m:\u001b[0m 2\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.5/dist-packages/IPython/core/interactiveshell.py:2969: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n",
" warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n"
]
}
],
"source": [
"from __future__ import print_function\n",
"\n",
"import argparse\n",
"import libvirt\n",
"import json \n",
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)\n",
"\n",
"LIBVIRT_DEFAULT_URI = 'qemu:///system'\n",
"\n",
"LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'\n",
"\n",
"DEFAULT_SPLIT_HOSTNAME = '-'\n",
"\n",
"DEFAULT_SPLIT_GROUP_POS = 1\n",
"\n",
"DEFAULT_COMMON_GROUP = 'docker-hosts'\n",
"\n",
"class LibvirtInventory(object):\n",
" \n",
" def __init__(self,\n",
" target_network=LIBVIRT_DEFAULT_NETWORK,\n",
" uri=LIBVIRT_DEFAULT_URI,\n",
" group_split=DEFAULT_SPLIT_HOSTNAME,\n",
" group_pos=DEFAULT_SPLIT_GROUP_POS,\n",
" additional_group=DEFAULT_COMMON_GROUP):\n",
" \n",
" self.open_libvirt_connection(uri)\n",
" self.groups_metadata = { \n",
" \"split\": group_split,\n",
" \"pos\": group_pos,\n",
" \"defaults\": additional_group.split(\",\")\n",
" }\n",
" self.set_target_network(target_network)\n",
" self.inventory = {\n",
" \"all\": \n",
" { \"hosts\":{}, \n",
" \"vars\":{}, \n",
" \"children\":{}\n",
" }\n",
" }\n",
" self._group_mapping = {\"all\": self.inventory[\"all\"]}\n",
" self.set_all_hosts()\n",
" self.add_hosts_to_groups()\n",
" self.add_common_groups()\n",
" self.close_libvirt_connection()\n",
"\n",
" \n",
" def open_libvirt_connection(self, uri):\n",
" self.conn = libvirt.openReadOnly(uri)\n",
"\n",
" if self.conn == None:\n",
" logger.error('Failed to open connection to {0}'.format(uri))\n",
" exit(1)\n",
"\n",
"\n",
" def close_libvirt_connection(self):\n",
" self.conn.close() \n",
"\n",
"\n",
" def set_target_network(self, target_network):\n",
" self._networks = {network.name(): network for network in self.conn.listAllNetworks()}\n",
" try:\n",
" self._network = self._networks[target_network]\n",
" except KeyError:\n",
" available_networks = \", \".join(self._networks.keys())\n",
" raise ValueError(\"Target network ({0}) does not exist. Available networks: {1}\".format(target_network, available_networks))\n",
"\n",
" \n",
" @property\n",
" def network(self):\n",
" return self._network\n",
" \n",
" def set_all_hosts(self):\n",
" self._hosts = { lease[\"hostname\"]: { \"ansible_host\": lease[\"ipaddr\"] } for lease in self.network.DHCPLeases() }\n",
" self.inventory[\"all\"][\"hosts\"]= self._hosts\n",
" \n",
"\n",
" def add_childgroup(self, child_group, parent_group=\"all\"):\n",
" target_group = self._group_mapping[parent_group]\n",
" if not child_group in target_group[\"children\"]: \n",
" target_group[\"children\"][child_group] = {\n",
" \"hosts\": [],\n",
" \"vars\": {},\n",
" \"children\": {}\n",
" } \n",
" self._group_mapping[child_group] = target_group[\"children\"][child_group]\n",
" \n",
" \n",
" def add_host_to_childgroup(self, hostname, group):\n",
" target_group = self._group_mapping[group] \n",
" target_group[\"hosts\"].append(hostname)\n",
" target_group[\"hosts\"] = list(set(target_group[\"hosts\"]))\n",
" \n",
" def add_hosts_to_groups(self):\n",
" for hostname in self._hosts.keys():\n",
" prefix, group, last_ip_part = hostname.split(self.parser[\"split\"])\n",
" \n",
" self.add_childgroup(group)\n",
" self.add_host_to_childgroup(hostname=hostname, group=group)\n",
" \n",
" def add_common_groups(self):\n",
" for group in self.groups_metadata[\"defaults\"]:\n",
" self.add_childgroup(group)\n",
" for hostname in self._hosts.keys():\n",
" self.add_host_to_childgroup(hostname, group) \n",
" \n",
" def json_dump(self, indent=None):\n",
" return json.dumps(self.inventory, indent=indent)\n",
" \n",
"def dump_inventory(virt_network, libvirt_uri, split_hostname, group_pos, common_groups):\n",
" \n",
" inventory = LibvirtInventory(target_network=virt_network,\n",
" uri=libvirt_uri,\n",
" group_split=split_hostname,\n",
" group_pos=group_pos,\n",
" additional_group=common_groups)\n",
" \n",
" return inventory.json_dump(2)\n",
"\n",
"\n",
"def main():\n",
" \n",
" parser = argparse.ArgumentParser()\n",
"\n",
" parser.add_argument(\"--libvirt_uri\",\n",
" \"-l\",\n",
" default=LIBVIRT_DEFAULT_URI,\n",
" type=str,\n",
" required=False,\n",
" help=\"\"\"URI that python should use to connect to libvirt daemon.\n",
" Default to '{0}'.\n",
"\n",
" Can use several protocols:\n",
" - qemu:/// for local socket connections\n",
" - qemu+tcp:// for raw tcp connections\n",
" - qemu+tls:// for tcp connections over tls encryption\n",
"\n",
" More informations available at https://libvirt.org/remote.html\n",
" \"\"\".format(LIBVIRT_DEFAULT_URI))\n",
"\n",
" parser.add_argument(\"--virt_network\",\n",
" \"-n\",\n",
" default=LIBVIRT_DEFAULT_NETWORK,\n",
" type=str,\n",
" required=False,\n",
" help=\"\"\"Virtual network from which we want to determine running guests.\n",
" This script can only list virtual machines located in same virtual network.\n",
" Default to '{0}'.\n",
" \"\"\".format(LIBVIRT_DEFAULT_NETWORK))\n",
"\n",
"\n",
" parser.add_argument(\"--split_hostname\",\n",
" default=\"-\",\n",
" required=False,\n",
" help=\"\"\"Which pattern should be used to split the found hostnames. \n",
" Default to '{0}'.\n",
" Hostnames are split in order to retrieve group they belong to. \n",
" This option must be used with `split_group_pos` option. \n",
" \"\"\".format(DEFAULT_SPLIT_HOSTNAME)\n",
" )\n",
"\n",
"\n",
" parser.add_argument(\"--split_group_pos\",\n",
" default=1,\n",
" required=False,\n",
" type=int,\n",
" help=\"\"\"After splitting hostname, what is the index of the group name. \n",
" Default to '{0}'.\n",
" This option must be used with `split_hostname` option. \n",
" \"\"\".format(DEFAULT_SPLIT_GROUP_POS)\n",
" )\n",
"\n",
"\n",
" parser.add_argument(\"--common-groups\",\n",
" \"-g\",\n",
" default=\"docker-hosts\",\n",
" required=False,\n",
" type=str,\n",
" help=\"\"\"Comma delimited list of group names to which all guests will belong. \n",
" Default to '{0}'.\n",
" This option must be used with `split_hostname` option. \n",
" \"\"\".format(DEFAULT_COMMON_GROUP)\n",
" )\n",
" \n",
" args = parser.parse_args()\n",
" \n",
" json_inv = dump_inventory(args.virt_network,\n",
" args.libvirt_uri,\n",
" args.split_hostname,\n",
" args.split_group_pos,\n",
" args.common_groups)\n",
" \n",
" print(json_inv)\n",
" \n",
"if __name__ == \"__main__\":\n",
" main()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Demo "
]
},
{
"cell_type": "code",
"execution_count": 168,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\r\n",
" \"all\": {\r\n",
" \"hosts\": {\r\n",
" \"test-registry-251\": {\r\n",
" \"ansible_host\": \"192.168.200.251\"\r\n",
" }, \r\n",
" \"test-manager-101\": {\r\n",
" \"ansible_host\": \"192.168.200.101\"\r\n",
" }, \r\n",
" \"test-security-241\": {\r\n",
" \"ansible_host\": \"192.168.200.241\"\r\n",
" }, \r\n",
" \"test-nifi-201\": {\r\n",
" \"ansible_host\": \"192.168.200.201\"\r\n",
" }, \r\n",
" \"test-nifi-202\": {\r\n",
" \"ansible_host\": \"192.168.200.202\"\r\n",
" }, \r\n",
" \"test-kairos-222\": {\r\n",
" \"ansible_host\": \"192.168.200.222\"\r\n",
" }, \r\n",
" \"test-kairos-221\": {\r\n",
" \"ansible_host\": \"192.168.200.221\"\r\n",
" }, \r\n",
" \"test-manager-102\": {\r\n",
" \"ansible_host\": \"192.168.200.102\"\r\n",
" }, \r\n",
" \"test-grafic-231\": {\r\n",
" \"ansible_host\": \"192.168.200.231\"\r\n",
" }, \r\n",
" \"test-mongo-211\": {\r\n",
" \"ansible_host\": \"192.168.200.211\"\r\n",
" }, \r\n",
" \"test-mongo-212\": {\r\n",
" \"ansible_host\": \"192.168.200.212\"\r\n",
" }\r\n",
" }, \r\n",
" \"children\": {\r\n",
" \"nifi\": {\r\n",
" \"hosts\": [\r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"mongo\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"kairos\": {\r\n",
" \"hosts\": [\r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"grafic\": {\r\n",
" \"hosts\": [\r\n",
" \"test-grafic-231\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"manager\": {\r\n",
" \"hosts\": [\r\n",
" \"test-manager-101\", \r\n",
" \"test-manager-102\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"registry\": {\r\n",
" \"hosts\": [\r\n",
" \"test-registry-251\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"security\": {\r\n",
" \"hosts\": [\r\n",
" \"test-security-241\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"docker-hosts\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-registry-251\", \r\n",
" \"test-manager-101\", \r\n",
" \"test-security-241\", \r\n",
" \"test-grafic-231\", \r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\", \r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\", \r\n",
" \"test-manager-102\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
" }, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
"}\r\n"
]
}
],
"source": [
"!python libvirt_inventory.py -l \"qemu+tcp://root@10.68.150.240/system\" -n \"testing_datalab_network\" \\\n",
" --split_hostname - \\\n",
" --split_group_pos 1 \\\n",
" --common-groups docker-hosts"
]
},
{
"cell_type": "code",
"execution_count": 169,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\r\n",
" \"all\": {\r\n",
" \"hosts\": {\r\n",
" \"test-registry-251\": {\r\n",
" \"ansible_host\": \"192.168.200.251\"\r\n",
" }, \r\n",
" \"test-manager-101\": {\r\n",
" \"ansible_host\": \"192.168.200.101\"\r\n",
" }, \r\n",
" \"test-security-241\": {\r\n",
" \"ansible_host\": \"192.168.200.241\"\r\n",
" }, \r\n",
" \"test-nifi-201\": {\r\n",
" \"ansible_host\": \"192.168.200.201\"\r\n",
" }, \r\n",
" \"test-nifi-202\": {\r\n",
" \"ansible_host\": \"192.168.200.202\"\r\n",
" }, \r\n",
" \"test-kairos-222\": {\r\n",
" \"ansible_host\": \"192.168.200.222\"\r\n",
" }, \r\n",
" \"test-kairos-221\": {\r\n",
" \"ansible_host\": \"192.168.200.221\"\r\n",
" }, \r\n",
" \"test-manager-102\": {\r\n",
" \"ansible_host\": \"192.168.200.102\"\r\n",
" }, \r\n",
" \"test-grafic-231\": {\r\n",
" \"ansible_host\": \"192.168.200.231\"\r\n",
" }, \r\n",
" \"test-mongo-211\": {\r\n",
" \"ansible_host\": \"192.168.200.211\"\r\n",
" }, \r\n",
" \"test-mongo-212\": {\r\n",
" \"ansible_host\": \"192.168.200.212\"\r\n",
" }\r\n",
" }, \r\n",
" \"children\": {\r\n",
" \"nifi\": {\r\n",
" \"hosts\": [\r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"mongo\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"kairos\": {\r\n",
" \"hosts\": [\r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"grafic\": {\r\n",
" \"hosts\": [\r\n",
" \"test-grafic-231\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"manager\": {\r\n",
" \"hosts\": [\r\n",
" \"test-manager-101\", \r\n",
" \"test-manager-102\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"registry\": {\r\n",
" \"hosts\": [\r\n",
" \"test-registry-251\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"security\": {\r\n",
" \"hosts\": [\r\n",
" \"test-security-241\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"docker-hosts\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-registry-251\", \r\n",
" \"test-manager-101\", \r\n",
" \"test-security-241\", \r\n",
" \"test-grafic-231\", \r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\", \r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\", \r\n",
" \"test-manager-102\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
" }, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
"}\r\n"
]
}
],
"source": [
"!python libvirt_inventory.py -l \"qemu+tcp://root@10.68.150.240/system\" -n \"testing_datalab_network\" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T08:58:06.587633Z",
"start_time": "2018-10-23T08:58:06.583670Z"
}
},
"source": [
"# Ansible: \n",
"\n",
"## Dynamic provisioning with libvirt \n",
"\n",
"Ansible allows you to execute a python script to generate inventory at runtime. \n",
"\n",
"This would be really useful when creating virtual machines prior to running playbooks. We do not always knwow the name of virtual machines, or their IP adress.\n",
"\n",
"The expected JSON output should use a similar schema:\n",
"\n",
"```json\n",
"{\n",
" \"virtual_machines\": {\n",
" \"hosts\": [\"test-manager-101\", \"test-kairos-221\"],\n",
" \"vars\": {\n",
" \"ansible_python_interpreter\": \"/usr/bin/python\",\n",
" \"ansible_user\": \"gcharbon\"\n",
" },\n",
" \"children\": [\"managers\",\"kairosdb\",\"centos-hosts\"]\n",
" },\n",
" \"centos-hosts\": {\n",
" \"hosts\": [\"test-kairos-221\"]\n",
" },\n",
" \"kairosdb\": {\n",
" \"hosts\": [\"test-kairos-221\"],\n",
" \"vars\": {\n",
" \"services\": [\"kairosdb\",\"cassandra\"],\n",
" \"data_directory\": \"/data\"\n",
" }\n",
" },\n",
" \"managers\": {\n",
" \"hosts\": [\"test-manager-101\"],\n",
" \"vars\": {\n",
" \"services\": [\"nginx-proxy\",\"docker-gen\",\"nginx-letsencrypt\", \"portainer\"],\n",
" \"domain\": \"datalab.integmonsoon.com\"\n",
" }\n",
" },\n",
" \"_meta\": {\n",
" \"hostvars\": {\n",
" \"test-manager-101\": {\n",
" \"ansible_host\": \"1192.168.100.101\"\n",
" } ,\n",
" \"test-kairos-221\": {\n",
" \"ansible_host\": \"192.168.100.201\"\n",
" }\n",
" }\n",
" }\n",
"}\n",
"```\n",
"\n",
"\n",
"Below are a list of parameters that can be given at runtime"
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T09:01:00.850127Z",
"start_time": "2018-10-23T09:01:00.841123Z"
}
},
"outputs": [],
"source": [
"libvirt_uri = 'qemu+tcp://root@10.68.150.240/system'\n",
"# Default to: libvirt_uri = 'qemu:///system'\n",
"\n",
"virt_network = \"testing_datalab_network\"\n",
"# Default ot virt_network = \"testing_datalab\"\n",
"\n",
"split_hostname = \"-\"\n",
"# Default to split_hostname = \"-\"\n",
"\n",
"group_pos = 1\n",
"# Default to group_pos = 1\n",
"\n",
"\n",
"common_group = \"docker-hosts,centos-hosts,virtual_machines\"\n",
"# Default to \"docker-hosts\""
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {},
"outputs": [],
"source": [
"arg_values = [libvirt_uri, virt_network, split_hostname, group_pos, common_group]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"A few imports are needed to work with `libvirt` on remote system and JSON serialization.\n",
"\n",
"We want our code to be compatible between python2 and python3. \n",
"\n",
"We also want to use a logger if it exists"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {
"ExecuteTime": {
"end_time": "2018-10-23T09:02:25.207310Z",
"start_time": "2018-10-23T09:02:24.734285Z"
}
},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"\n",
"import libvirt\n",
"import sys\n",
"import json \n",
"\n",
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### We can define the `LibvirtInventory` class"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"LIBVIRT_DEFAULT_URI = 'qemu:///system'\n",
"\n",
"LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'\n",
"\n",
"DEFAULT_SPLIT_HOSTNAME = '-'\n",
"\n",
"DEFAULT_SPLIT_GROUP_POS = 1\n",
"\n",
"DEFAULT_COMMON_GROUP = 'docker-hosts'\n",
"\n",
"\n",
"class LibvirtInventory(object):\n",
" \n",
" def __init__(self,\n",
" target_network=LIBVIRT_DEFAULT_NETWORK,\n",
" uri=LIBVIRT_DEFAULT_URI,\n",
" group_split=DEFAULT_SPLIT_HOSTNAME,\n",
" group_pos=DEFAULT_SPLIT_GROUP_POS,\n",
" additional_group=DEFAULT_COMMON_GROUP):\n",
" \n",
" self.open_libvirt_connection(uri)\n",
" self.parser = { \n",
" \"split\": group_split,\n",
" \"pos\": group_pos,\n",
" }\n",
" self.common_groups = additional_group.split(\",\")\n",
" self.set_target_network(target_network)\n",
" self.inventory = {}\n",
" self.set_all_hosts_and_meta()\n",
" self.add_all_hosts_to_groups()\n",
" self.add_all_common_groups()\n",
" self.close_libvirt_connection()\n",
"\n",
" \n",
" def open_libvirt_connection(self, uri):\n",
" self.conn = libvirt.openReadOnly(uri)\n",
"\n",
" if self.conn == None:\n",
" logger.error('Failed to open connection to {0}'.format(uri))\n",
" exit(1)\n",
" else:\n",
" logger.info(\"Connected to libvirt on remote host: {0}\".format(uri))\n",
"\n",
" def close_libvirt_connection(self):\n",
" self.conn.close() \n",
"\n",
"\n",
" def set_target_network(self, target_network):\n",
" self._networks = {network.name(): network for network in self.conn.listAllNetworks()}\n",
" try:\n",
" self._network = self._networks[target_network]\n",
" except KeyError:\n",
" available_networks = \", \".join(self._networks.keys())\n",
" raise ValueError(\"Target network ({0}) does not exist. Available networks: {1}\".format(target_network, available_networks))\n",
"\n",
" \n",
" @property\n",
" def network(self):\n",
" return self._network\n",
" \n",
" def set_all_hosts_and_meta(self):\n",
" self._hosts = { lease[\"hostname\"]: { \"ansible_host\": lease[\"ipaddr\"] } for lease in self.network.DHCPLeases() }\n",
" \n",
" self.add_group(\"all\")\n",
" \n",
" self.inventory[\"all\"][\"hosts\"] = self.hosts = list(self._hosts.keys())\n",
" \n",
" self.inventory[\"_meta\"]= {\"hostvars\": self._hosts}\n",
" \n",
" def add_group(self, group):\n",
" \"\"\" Add a group to inventory \"\"\"\n",
" if group not in self.inventory:\n",
" self.inventory[group] = dict()\n",
" \n",
" def get_group(self, group):\n",
" \"\"\" Return a group as dictionary from inventory \"\"\"\n",
" return self.inventory[group]\n",
" \n",
" \n",
" def add_child(self, child, group):\n",
" \"\"\" Add a child group (string) to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = [child]\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set([child] + self.inventory[group][\"children\"]))\n",
" \n",
" \n",
" def add_childgroup(self, child_group, parent_group=\"all\"):\n",
" \"\"\" Add a group and mark it as child of another group in inventory \"\"\"\n",
" self.add_group(child_group)\n",
" self.add_child(child_group, parent_group)\n",
" \n",
" \n",
" def add_children(self, children, group):\n",
" \"\"\" Add list of children to group in inventory\"\"\"\n",
" if not \"children\" in self.get_group(group):\n",
" self.inventory[group][\"children\"] = list(children)\n",
" else:\n",
" self.inventory[group][\"children\"] = list(set(children + self.inventory[group][\"children\"]))\n",
" \n",
"\n",
" def add_groupvars(self, vars_dict, group):\n",
" \"\"\" Takes a dictionary as argument and add the keys and values as group variables\"\"\"\n",
" if \"vars\" not in self.get_group(group):\n",
" self.inventory[group][\"vars\"] = vars_dict\n",
" else:\n",
" self.inventory[group][\"vars\"].update(vars_dict)\n",
" \n",
" \n",
" def add_host(self, hostname, group, create=True):\n",
" \"\"\" Add one host (string) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" \n",
" if \"hosts\" not in self.inventory[group]:\n",
" self.inventory[group][\"hosts\"] = [hostname]\n",
" else:\n",
" self.inventory[group][\"hosts\"] = list(set([hostname] + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_hosts(self, hostnames, group, create=True):\n",
" \"\"\" Add several hosts (list) to a group. \n",
" If create is True (default) then the group will be automatically created if it does not exist yet.\n",
" Else it will fail on KeyError\n",
" \"\"\"\n",
" if create:\n",
" self.add_group(group)\n",
" self.inventory[group][\"hosts\"] = list(set(hostnames + self.inventory[group][\"hosts\"]))\n",
" \n",
" def add_all_hosts_to_groups(self):\n",
" for hostname in self.hosts:\n",
" prefix, group, last_ip_part = hostname.split(self.parser[\"split\"])\n",
" self.add_host(hostname, group)\n",
" \n",
" def add_all_common_groups(self):\n",
" for group in self.common_groups:\n",
" self.add_common_group(group)\n",
"\n",
" def add_common_group(self, common_group):\n",
" self.add_childgroup(common_group)\n",
" for group in self.inventory:\n",
" if group in [\"_meta\", \"all\"] + self.common_groups:\n",
" continue\n",
" self.add_child(group, common_group) \n",
" \n",
" def json_dump(self, indent=None):\n",
" return json.dumps(self.inventory, indent=indent)\n",
" \n",
" \n",
"def dump_inventory(libvirt_uri, virt_network, split_hostname, group_pos, common_groups):\n",
" \n",
" inventory = LibvirtInventory(target_network=virt_network,\n",
" uri=libvirt_uri,\n",
" group_split=split_hostname,\n",
" group_pos=group_pos,\n",
" additional_group=common_groups)\n",
" \n",
" return inventory.json_dump(2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's test it"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [],
"source": [
"inventory = dump_inventory(*arg_values)"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"_meta\": {\n",
" \"hostvars\": {\n",
" \"test-kairos-222\": {\n",
" \"ansible_host\": \"192.168.200.222\"\n",
" },\n",
" \"test-grafic-231\": {\n",
" \"ansible_host\": \"192.168.200.231\"\n",
" },\n",
" \"test-kairos-221\": {\n",
" \"ansible_host\": \"192.168.200.221\"\n",
" }\n",
" }\n",
" },\n",
" \"kairos\": {\n",
" \"hosts\": [\n",
" \"test-kairos-222\",\n",
" \"test-kairos-221\"\n",
" ]\n",
" },\n",
" \"centos-hosts\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"grafic\": {\n",
" \"hosts\": [\n",
" \"test-grafic-231\"\n",
" ]\n",
" },\n",
" \"virtual_machines\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"docker-hosts\": {\n",
" \"children\": [\n",
" \"kairos\",\n",
" \"grafic\"\n",
" ]\n",
" },\n",
" \"all\": {\n",
" \"children\": [\n",
" \"centos-hosts\",\n",
" \"docker-hosts\",\n",
" \"virtual_machines\"\n",
" ],\n",
" \"hosts\": [\n",
" \"test-kairos-222\",\n",
" \"test-grafic-231\",\n",
" \"test-kairos-221\"\n",
" ]\n",
" }\n",
"}\n"
]
}
],
"source": [
"print(inventory)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### And finally define a small script"
]
},
{
"cell_type": "code",
"execution_count": 167,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"usage: ipykernel_launcher.py [-h] [--libvirt_uri LIBVIRT_URI]\n",
" [--virt_network VIRT_NETWORK]\n",
" [--split_hostname SPLIT_HOSTNAME]\n",
" [--split_group_pos SPLIT_GROUP_POS]\n",
" [--common-groups COMMON_GROUPS]\n",
"ipykernel_launcher.py: error: unrecognized arguments: -f /home/gcharbon/.local/share/jupyter/runtime/kernel-053cf074-f981-497d-b5fc-9775b16ce427.json\n"
]
},
{
"ename": "SystemExit",
"evalue": "2",
"output_type": "error",
"traceback": [
"An exception has occurred, use %tb to see the full traceback.\n",
"\u001b[0;31mSystemExit\u001b[0m\u001b[0;31m:\u001b[0m 2\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.5/dist-packages/IPython/core/interactiveshell.py:2969: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n",
" warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n"
]
}
],
"source": [
"from __future__ import print_function\n",
"\n",
"import argparse\n",
"import libvirt\n",
"import json \n",
"import logging\n",
"\n",
"logger = logging.getLogger(__name__)\n",
"\n",
"LIBVIRT_DEFAULT_URI = 'qemu:///system'\n",
"\n",
"LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'\n",
"\n",
"DEFAULT_SPLIT_HOSTNAME = '-'\n",
"\n",
"DEFAULT_SPLIT_GROUP_POS = 1\n",
"\n",
"DEFAULT_COMMON_GROUP = 'docker-hosts'\n",
"\n",
"class LibvirtInventory(object):\n",
" \n",
" def __init__(self,\n",
" target_network=LIBVIRT_DEFAULT_NETWORK,\n",
" uri=LIBVIRT_DEFAULT_URI,\n",
" group_split=DEFAULT_SPLIT_HOSTNAME,\n",
" group_pos=DEFAULT_SPLIT_GROUP_POS,\n",
" additional_group=DEFAULT_COMMON_GROUP):\n",
" \n",
" self.open_libvirt_connection(uri)\n",
" self.groups_metadata = { \n",
" \"split\": group_split,\n",
" \"pos\": group_pos,\n",
" \"defaults\": additional_group.split(\",\")\n",
" }\n",
" self.set_target_network(target_network)\n",
" self.inventory = {\n",
" \"all\": \n",
" { \"hosts\":{}, \n",
" \"vars\":{}, \n",
" \"children\":{}\n",
" }\n",
" }\n",
" self._group_mapping = {\"all\": self.inventory[\"all\"]}\n",
" self.set_all_hosts()\n",
" self.add_hosts_to_groups()\n",
" self.add_common_groups()\n",
" self.close_libvirt_connection()\n",
"\n",
" \n",
" def open_libvirt_connection(self, uri):\n",
" self.conn = libvirt.openReadOnly(uri)\n",
"\n",
" if self.conn == None:\n",
" logger.error('Failed to open connection to {0}'.format(uri))\n",
" exit(1)\n",
"\n",
"\n",
" def close_libvirt_connection(self):\n",
" self.conn.close() \n",
"\n",
"\n",
" def set_target_network(self, target_network):\n",
" self._networks = {network.name(): network for network in self.conn.listAllNetworks()}\n",
" try:\n",
" self._network = self._networks[target_network]\n",
" except KeyError:\n",
" available_networks = \", \".join(self._networks.keys())\n",
" raise ValueError(\"Target network ({0}) does not exist. Available networks: {1}\".format(target_network, available_networks))\n",
"\n",
" \n",
" @property\n",
" def network(self):\n",
" return self._network\n",
" \n",
" def set_all_hosts(self):\n",
" self._hosts = { lease[\"hostname\"]: { \"ansible_host\": lease[\"ipaddr\"] } for lease in self.network.DHCPLeases() }\n",
" self.inventory[\"all\"][\"hosts\"]= self._hosts\n",
" \n",
"\n",
" def add_childgroup(self, child_group, parent_group=\"all\"):\n",
" target_group = self._group_mapping[parent_group]\n",
" if not child_group in target_group[\"children\"]: \n",
" target_group[\"children\"][child_group] = {\n",
" \"hosts\": [],\n",
" \"vars\": {},\n",
" \"children\": {}\n",
" } \n",
" self._group_mapping[child_group] = target_group[\"children\"][child_group]\n",
" \n",
" \n",
" def add_host_to_childgroup(self, hostname, group):\n",
" target_group = self._group_mapping[group] \n",
" target_group[\"hosts\"].append(hostname)\n",
" target_group[\"hosts\"] = list(set(target_group[\"hosts\"]))\n",
" \n",
" def add_hosts_to_groups(self):\n",
" for hostname in self._hosts.keys():\n",
" prefix, group, last_ip_part = hostname.split(self.parser[\"split\"])\n",
" \n",
" self.add_childgroup(group)\n",
" self.add_host_to_childgroup(hostname=hostname, group=group)\n",
" \n",
" def add_common_groups(self):\n",
" for group in self.groups_metadata[\"defaults\"]:\n",
" self.add_childgroup(group)\n",
" for hostname in self._hosts.keys():\n",
" self.add_host_to_childgroup(hostname, group) \n",
" \n",
" def json_dump(self, indent=None):\n",
" return json.dumps(self.inventory, indent=indent)\n",
" \n",
"def dump_inventory(virt_network, libvirt_uri, split_hostname, group_pos, common_groups):\n",
" \n",
" inventory = LibvirtInventory(target_network=virt_network,\n",
" uri=libvirt_uri,\n",
" group_split=split_hostname,\n",
" group_pos=group_pos,\n",
" additional_group=common_groups)\n",
" \n",
" return inventory.json_dump(2)\n",
"\n",
"\n",
"def main():\n",
" \n",
" parser = argparse.ArgumentParser()\n",
"\n",
" parser.add_argument(\"--libvirt_uri\",\n",
" \"-l\",\n",
" default=LIBVIRT_DEFAULT_URI,\n",
" type=str,\n",
" required=False,\n",
" help=\"\"\"URI that python should use to connect to libvirt daemon.\n",
" Default to '{0}'.\n",
"\n",
" Can use several protocols:\n",
" - qemu:/// for local socket connections\n",
" - qemu+tcp:// for raw tcp connections\n",
" - qemu+tls:// for tcp connections over tls encryption\n",
"\n",
" More informations available at https://libvirt.org/remote.html\n",
" \"\"\".format(LIBVIRT_DEFAULT_URI))\n",
"\n",
" parser.add_argument(\"--virt_network\",\n",
" \"-n\",\n",
" default=LIBVIRT_DEFAULT_NETWORK,\n",
" type=str,\n",
" required=False,\n",
" help=\"\"\"Virtual network from which we want to determine running guests.\n",
" This script can only list virtual machines located in same virtual network.\n",
" Default to '{0}'.\n",
" \"\"\".format(LIBVIRT_DEFAULT_NETWORK))\n",
"\n",
"\n",
" parser.add_argument(\"--split_hostname\",\n",
" default=\"-\",\n",
" required=False,\n",
" help=\"\"\"Which pattern should be used to split the found hostnames. \n",
" Default to '{0}'.\n",
" Hostnames are split in order to retrieve group they belong to. \n",
" This option must be used with `split_group_pos` option. \n",
" \"\"\".format(DEFAULT_SPLIT_HOSTNAME)\n",
" )\n",
"\n",
"\n",
" parser.add_argument(\"--split_group_pos\",\n",
" default=1,\n",
" required=False,\n",
" type=int,\n",
" help=\"\"\"After splitting hostname, what is the index of the group name. \n",
" Default to '{0}'.\n",
" This option must be used with `split_hostname` option. \n",
" \"\"\".format(DEFAULT_SPLIT_GROUP_POS)\n",
" )\n",
"\n",
"\n",
" parser.add_argument(\"--common-groups\",\n",
" \"-g\",\n",
" default=\"docker-hosts\",\n",
" required=False,\n",
" type=str,\n",
" help=\"\"\"Comma delimited list of group names to which all guests will belong. \n",
" Default to '{0}'.\n",
" This option must be used with `split_hostname` option. \n",
" \"\"\".format(DEFAULT_COMMON_GROUP)\n",
" )\n",
" \n",
" args = parser.parse_args()\n",
" \n",
" json_inv = dump_inventory(args.virt_network,\n",
" args.libvirt_uri,\n",
" args.split_hostname,\n",
" args.split_group_pos,\n",
" args.common_groups)\n",
" \n",
" print(json_inv)\n",
" \n",
"if __name__ == \"__main__\":\n",
" main()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Demo "
]
},
{
"cell_type": "code",
"execution_count": 168,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\r\n",
" \"all\": {\r\n",
" \"hosts\": {\r\n",
" \"test-registry-251\": {\r\n",
" \"ansible_host\": \"192.168.200.251\"\r\n",
" }, \r\n",
" \"test-manager-101\": {\r\n",
" \"ansible_host\": \"192.168.200.101\"\r\n",
" }, \r\n",
" \"test-security-241\": {\r\n",
" \"ansible_host\": \"192.168.200.241\"\r\n",
" }, \r\n",
" \"test-nifi-201\": {\r\n",
" \"ansible_host\": \"192.168.200.201\"\r\n",
" }, \r\n",
" \"test-nifi-202\": {\r\n",
" \"ansible_host\": \"192.168.200.202\"\r\n",
" }, \r\n",
" \"test-kairos-222\": {\r\n",
" \"ansible_host\": \"192.168.200.222\"\r\n",
" }, \r\n",
" \"test-kairos-221\": {\r\n",
" \"ansible_host\": \"192.168.200.221\"\r\n",
" }, \r\n",
" \"test-manager-102\": {\r\n",
" \"ansible_host\": \"192.168.200.102\"\r\n",
" }, \r\n",
" \"test-grafic-231\": {\r\n",
" \"ansible_host\": \"192.168.200.231\"\r\n",
" }, \r\n",
" \"test-mongo-211\": {\r\n",
" \"ansible_host\": \"192.168.200.211\"\r\n",
" }, \r\n",
" \"test-mongo-212\": {\r\n",
" \"ansible_host\": \"192.168.200.212\"\r\n",
" }\r\n",
" }, \r\n",
" \"children\": {\r\n",
" \"nifi\": {\r\n",
" \"hosts\": [\r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"mongo\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"kairos\": {\r\n",
" \"hosts\": [\r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"grafic\": {\r\n",
" \"hosts\": [\r\n",
" \"test-grafic-231\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"manager\": {\r\n",
" \"hosts\": [\r\n",
" \"test-manager-101\", \r\n",
" \"test-manager-102\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"registry\": {\r\n",
" \"hosts\": [\r\n",
" \"test-registry-251\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"security\": {\r\n",
" \"hosts\": [\r\n",
" \"test-security-241\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"docker-hosts\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-registry-251\", \r\n",
" \"test-manager-101\", \r\n",
" \"test-security-241\", \r\n",
" \"test-grafic-231\", \r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\", \r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\", \r\n",
" \"test-manager-102\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
" }, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
"}\r\n"
]
}
],
"source": [
"!python libvirt_inventory.py -l \"qemu+tcp://root@10.68.150.240/system\" -n \"testing_datalab_network\" \\\n",
" --split_hostname - \\\n",
" --split_group_pos 1 \\\n",
" --common-groups docker-hosts"
]
},
{
"cell_type": "code",
"execution_count": 169,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\r\n",
" \"all\": {\r\n",
" \"hosts\": {\r\n",
" \"test-registry-251\": {\r\n",
" \"ansible_host\": \"192.168.200.251\"\r\n",
" }, \r\n",
" \"test-manager-101\": {\r\n",
" \"ansible_host\": \"192.168.200.101\"\r\n",
" }, \r\n",
" \"test-security-241\": {\r\n",
" \"ansible_host\": \"192.168.200.241\"\r\n",
" }, \r\n",
" \"test-nifi-201\": {\r\n",
" \"ansible_host\": \"192.168.200.201\"\r\n",
" }, \r\n",
" \"test-nifi-202\": {\r\n",
" \"ansible_host\": \"192.168.200.202\"\r\n",
" }, \r\n",
" \"test-kairos-222\": {\r\n",
" \"ansible_host\": \"192.168.200.222\"\r\n",
" }, \r\n",
" \"test-kairos-221\": {\r\n",
" \"ansible_host\": \"192.168.200.221\"\r\n",
" }, \r\n",
" \"test-manager-102\": {\r\n",
" \"ansible_host\": \"192.168.200.102\"\r\n",
" }, \r\n",
" \"test-grafic-231\": {\r\n",
" \"ansible_host\": \"192.168.200.231\"\r\n",
" }, \r\n",
" \"test-mongo-211\": {\r\n",
" \"ansible_host\": \"192.168.200.211\"\r\n",
" }, \r\n",
" \"test-mongo-212\": {\r\n",
" \"ansible_host\": \"192.168.200.212\"\r\n",
" }\r\n",
" }, \r\n",
" \"children\": {\r\n",
" \"nifi\": {\r\n",
" \"hosts\": [\r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"mongo\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"kairos\": {\r\n",
" \"hosts\": [\r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"grafic\": {\r\n",
" \"hosts\": [\r\n",
" \"test-grafic-231\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"manager\": {\r\n",
" \"hosts\": [\r\n",
" \"test-manager-101\", \r\n",
" \"test-manager-102\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"registry\": {\r\n",
" \"hosts\": [\r\n",
" \"test-registry-251\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"security\": {\r\n",
" \"hosts\": [\r\n",
" \"test-security-241\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }, \r\n",
" \"docker-hosts\": {\r\n",
" \"hosts\": [\r\n",
" \"test-mongo-212\", \r\n",
" \"test-registry-251\", \r\n",
" \"test-manager-101\", \r\n",
" \"test-security-241\", \r\n",
" \"test-grafic-231\", \r\n",
" \"test-nifi-201\", \r\n",
" \"test-nifi-202\", \r\n",
" \"test-kairos-222\", \r\n",
" \"test-kairos-221\", \r\n",
" \"test-manager-102\", \r\n",
" \"test-mongo-211\"\r\n",
" ], \r\n",
" \"children\": {}, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
" }, \r\n",
" \"vars\": {}\r\n",
" }\r\n",
"}\r\n"
]
}
],
"source": [
"!python libvirt_inventory.py -l \"qemu+tcp://root@10.68.150.240/system\" -n \"testing_datalab_network\" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
- hosts: all
tasks:
- name: "Ensure local users exist and setup ssh configuration for each users"
import_role:
name: users
vars:
users: "{{ local_users }}"
become: true
- hosts: centos_hosts
tasks:
- name: Install common packages with yum
import_role:
name: yum
vars:
update_dist: true
upgrade_dist: true
packages:
- vim
- dos2unix
- git
- curl
- wget
- git
remove_packages: []
upgrade_packages: []
GPG_keys: []
yum_repositories: []
become: true
- hosts: debian_hosts
tasks:
- name: Install common packages with apt
import_role:
name: apt
vars:
update_dist: true
upgrade_dist: true
packages:
- vim
- dos2unix
- git
- curl
- wget
- git
remove_packages: []
upgrade_packages: []
GPG_keys: []
yum_repositories: []
become: true
- hosts: centos_hosts
tasks:
- name: "Ensure firewalld is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- firewalld
become: true
- name: "Ensure firewalld is started"
service:
name: firewalld
state: started
become: true
- name: "Set default firewalld policy"
import_role:
name: firewalld
become: true
- hosts: debian_hosts
tasks:
- name: "Ensure ufw is installed"
import_role:
name: apt
tasks_from: install_packages.yml
vars:
packages:
- ufw
become: true
- name: "Ensure ufw is started"
service:
name: ufw
state: started
become: true
- name: "Set default firewalld policy"
import_role:
name: ufw
become: true
- hosts: all
tasks:
- name: "Ensure pip is installed"
include_role:
name: get-pip
become: true
- name: "Ensure docker is installed"
include_role:
name: get-docker
vars:
docker_users:
- gcharbon
- jgaschler
- ansible_user
become: true
# - name: "Ensure certbot is installed"
# include_role:
# name: get-certbot
# vars:
# virtualenv: ~/certbot_env
# ansible_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: demo
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: demo
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: production
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: production
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: testing
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: testing
virt_group: kvm
virt_user: gcharbon
- hosts: centos_hosts
tasks:
- name: "Ensure firewalld is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- firewalld
- name: "Ensure firewalld is started"
service:
name: firewalld
state: started
- name: "Set default firewalld policy"
import_role:
name: firewalld
- hosts: debian_hosts
tasks:
- name: "Ensure ufw is installed"
import_role:
name: apt
tasks_from: install_packages.yml
vars:
packages:
- ufw
- name: "Ensure ufw is started"
service:
name: ufw
state: started
- name: "Set default firewalld policy"
import_role:
name: ufw
---
- hosts: all
tasks:
- name: "Ensure certbot is installed"
import_role:
name: get-certbot
---
- hosts: all
tasks:
- name: "Ensure docker is installed"
import_role:
name: get-docker
---
- hosts: all
tasks:
- name: "Ensure global pip is installed with extra args: {{ pip_extra_args | default('') }}"
import_role:
name: get-pip
sudoers_group: wheel
emulator: /usr/libexec/qemu-kvm
virt_dependencies:
- qemu-kvm
- libvirt
- virt-install
- genisoimage
- python-lxml
- libvirt-python
- virt-manager
docker_gpg_key: https://download.docker.com/linux/centos/gpg
docker_repository:
name: docker-ce.repo
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
enabled: 1
gpgcheck: 1
remove_docker_dependencies:
- docker
- docker-client
- docker-client-latest
- docker-common
- docker-latest-logrotate
- docker-selinux
- docker-engine-selinux
- docker-engine
docker_dependencies:
- yum-utils
- device-mapper-persistent-data
- lvm2
docker_package: docker-ce
libvirtd_group: libvirt
ansible_python_interpreter: /usr/bin/python
local_users:
-
name: gcharbon
gecos: Guillaume Charbonnier
createhome: yes
homedir: "/home/gcharbon"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
generate_ssh_key: true
sudo_user: true
passwordless_sudo: true
password_lock: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
-
name: jgaschler
gecos: Jean Gaschler
homedir: "/home/jgaschler"
shell: "/bin/bash"
ssh_authorized_keys:
# Git bash key (Jean)
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWi5wpJjHvfbAY9ekYJ2G042JF3VTGyXoYB99Z2deEuU/hDBYK4wTocJq2bCv7cU88S+a6uVewrO37SnVo0X1Vxehf9TMsiwNVNQXa+iuIvnSSzpnFzBSO0xWyP6bd4UJJsuCviZmlNjUSEHAaEH4NxAzFa+Jfz65jDBbSycHVcANcka9tVcIVazvCVGqalUKpMfcwrSnEf2u8GpmOfb5GQJ80Ax3r83NuJVjmQLDmBx98EgbhefXgP3Xt5oUWk1IInKaiOnbT6Vuo4eYW42HovVmxRFUJYGayNS2mNXAIkOakFVMQutHAVddXECyp0xKfBxyjWE4NTWTwiNZifgVv jgaschle@LFR010770
# MobaXterm (Jean)
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkRKXqZnDpW+p1jxIHUTo0ZbjoOzgVtM6lAoIC+KHN7WfF0IUIoUYi7q0ioPUBTp0RVuE1n5EhSzB6zjYo4vJHm4v1UG9/hhZUbJeG4Ob4cF281aSrvPYMMZ3DHEf2IIedtlVjoergogXdR3fVz0XmHq4CjzNHITlGBfglIz98MwK4j7pRVpAgff/C/zPILd2omJX1pJJ9vrmdHvITpfTbKWLDrB4SBL9Mh7NMZ+nnQNAuNDMUhHnUVFwS69Fl5ziBDO0Ce5dvk0ooyd6LqRomGUXy3l+F010IKSiljsESAYNXC9fMK93uuYZ3fBpNY9FDN3ZtA6bHdsR+5nvCIFFr jgaschle@LFR010770
sudo_user: true
passwordless_sudo: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
-
name: admin
gecos: Capgemini Admin User
createhome: true
homedir: "/home/admin"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
# jgaschler
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWi5wpJjHvfbAY9ekYJ2G042JF3VTGyXoYB99Z2deEuU/hDBYK4wTocJq2bCv7cU88S+a6uVewrO37SnVo0X1Vxehf9TMsiwNVNQXa+iuIvnSSzpnFzBSO0xWyP6bd4UJJsuCviZmlNjUSEHAaEH4NxAzFa+Jfz65jDBbSycHVcANcka9tVcIVazvCVGqalUKpMfcwrSnEf2u8GpmOfb5GQJ80Ax3r83NuJVjmQLDmBx98EgbhefXgP3Xt5oUWk1IInKaiOnbT6Vuo4eYW42HovVmxRFUJYGayNS2mNXAIkOakFVMQutHAVddXECyp0xKfBxyjWE4NTWTwiNZifgVv jgaschle@LFR010770
generate_ssh_key: true
sudo_user: true
passwordless_sudo: false
# Same password as other machines
password: $6$4x0Qf/oe3mXAJkYD$.4YyNeoIjbDuA60y0zjjMHhbLv7ZvpYxYLfrYqCZZ5XK7ehBkzIz8YcghxSHuB16CFNEsUPIJ0BB5yOBqAonz/
docker_user: true
deploy_environment: testing
virt_user: gcharbon
virt_group: kvm
sudoers_group: sudo
emulator: /usr/bin/kvm
virt_dependencies:
- qemu-kvm
- qemu-utils
- libvirt-bin
- virtinst
- genisoimage
- python3-lxml
- python3-libvirt
- virt-manager
docker_gpg_key:
url: https://download.docker.com/linux/ubuntu/gpg
docker_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ansible_distribution_release}} stable"
state: present
remove_docker_dependencies:
- docker
- docker-engine
- docker.io
docker_dependencies:
- apt-transport-https
- curl
- ca-certificates
- software-properties-common
docker_package: docker-ce
libvirtd_group: libvirtd
ansible_python_interpreter: /usr/bin/python3
virt_user: qemu
virt_group: libvirt-qemu
local_users:
-
name: gcharbon
gecos: Guillaume Charbonnier
createhome: yes
homedir: "/home/gcharbon"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
generate_ssh_key: true
sudo_user: true
passwordless_sudo: true
password_lock: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
deploy_environment: demo
virt_user: gcharbon
virt_group: kvm
# ansible_ssh_common_args: '-F .ssh/ssh.cfg -o StrictHostKeyChecking=no ProxyCommand="ssh -W %h:%p gcharbon@10.68.150.240"'
ansible_ssh_common_args: '-F .ssh/ssh.cfg -o StrictHostKeyChecking=no'
ansible_user: ansible_user
- hosts: hypervisors:&centos_hosts
tasks:
- name: ACTION | Upgrade dist ad install common packages on hypervisors hosts
import_role:
name: yum
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: yum
tasks_from: install_packages.yml
vars:
update_before_install: true
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- package
- hosts: hypervisors:&debian_hosts
tasks:
- name: ACTION | Upgrase dist and install common packages on hypervisors hosts
import_role:
name: apt
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: apt
tasks_from: install_packages.yml
vars:
update_before_install: false
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- packages
- hosts: hypervisors
tasks:
- name: ROLE | Ensure users exists
import_role:
name: users
vars:
users: "{{ local_users }}"
tags:
- virtualization
- user
- hosts: hypervisors
tasks:
- name: Create virtual machines
import_role:
name: virtualization
tasks_from: domains.yml
vars:
overwrite: true
os_images:
centos7:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
virtual_machines:
-
hostname: test-centos-01
volume:
name: test-centos-01.img
format: qcow2
size: 10G
pool: test_pool
os_image: centos7
memory:
value: 2
unit: GB
current_memory: 1
hugepages:
value: 2048
unit: kB
cores:
value: 1
cpu:
mode: host-passthrough
model:
fallback: allow
state: active
autostart: yes
on_poweroff: destroy
on_reboot: restart
on_crash: restart
os:
arch: x86_64
type: hvm
boot:
devices:
- hd
- cdrom
- network
bootmenu_enabled: no
bios:
userserial: yes
rebootTimeout: 0
---
- hosts: nfs
tasks:
- include_role:
name: nfs
vars:
nfs_exports: "/nfs/test_share"
- hosts: docker
tasks:
- name: Install docker python package
include_role:
name: pip
tasks_from: install_packages.yml
vars:
pip_install_packages:
- docker
- docker-compose
virtualenv: ~/.docker_env
ansible_user: gcharbon
- name: Leave current swarm cluster on all docker nodes
docker_swarm:
state: absent
force: true
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
- name: Create docker zone with firewalld
command: firewall-cmd --permanent --new-zone docker
become: true
register: create_zone_result
changed_when: |
("Error: NAME_CONFLICT: new_zone(): 'docker'" not in create_zone_result.stderr
and
create_zone_result.rc != 1)
or
create_zone_result.rc == 0
failed_when: |
"Error: NAME_CONFLICT: new_zone(): 'docker'" not in create_zone_result.stderr
and
create_zone_result.rc != 0
- name : Add ip addresses of members of docker group as source to docker zone on centos managers
include_role:
name: firewalld
tasks_from: sources.yml
vars:
firewall:
sources:
- subnet: "{{ hostvars[item].ansible_host }}"
zone: docker
permanent: true
state: enabled
with_items: "{{ groups['docker'] }}"
- name: Enable port 2377 on docker zone on centos managers
include_role:
name: firewalld
tasks_from: ports.yml
vars:
firewall:
ports:
- port: 2377
proto: tcp
permanent: true
state: enabled
zone: docker
when: "groups['centos_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']
and
inventory_hostname in groups['manager']"
- name: Prepare rules for ufw on debian managers
set_fact:
ufw_rules: "{{ ufw_rules|default([]) | union( [{ 'rule': 'allow', 'src': hostvars[item].ansible_host, 'port': '2377', 'proto': 'tcp'}] ) }}"
with_items: "{{ groups['docker'] }}"
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['debian_hosts']
and
inventory_hostname in groups['manager']"
- name: Allow port 2377 for members of docker group with ufw on debian managers
include_role:
name: ufw
tasks_from: rules.yml
vars:
firewall:
rules: "{{ ufw_rules }}"
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['debian_hosts']
and
inventory_hostname in groups['debian_hosts']"
- name: Reload firewallcmd
include_role:
name: firewalld
tasks_from: reload.yml
when: "groups['centos_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']"
- name: Reload UFW
ufw:
state: reloaded
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']"
- name: "Init a new swarm with default parameters on node {{ groups['manager'][0] }}"
docker_swarm:
state: present
advertise_addr: "{{ hostvars[groups['manager'][0]].ansible_host }}"
register: swarm_facts
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
run_once: true
delegate_to: "{{ groups['manager'][0] }}"
- name: gather facts from db servers
setup:
delegate_to: "{{ groups['manager'][0] }}"
delegate_facts: True
- name: Register join token on all pending workers
set_fact:
join_token: "{{ swarm_facts.swarm_facts.JoinTokens.Worker }}"
when: "inventory_hostname not in groups['manager']"
- name: Register join token on all pending managers
set_fact:
join_token: "{{ swarm_facts.swarm_facts.JoinTokens.Manager }}"
when: "inventory_hostname in groups['manager']
and
inventory_hostname != groups['manager'][0]"
- name: Join Swarm cluster
docker_swarm:
state: join
advertise_addr: "{{ ansible_host }}"
join_token: "{{ join_token }}"
remote_addrs: "{{ hostvars[groups['manager'][0]].ansible_host }}:2377"
when: "inventory_hostname != groups['manager'][0]"
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
---
- hosts: manager
tasks:
- name: Ensure work directory exists
become: true
file:
path: /work
state: directory
mode: 0775
owner: admin
group: docker
- name: Ensure portainer directory exists
become: true
file:
path: /work/portainer
state: directory
mode: 0775
owner: admin
group: docker
- name: Copy stack file to remote host
copy:
src: ../deploy/portainer/deploy-stack.yml
dest: /work/portainer/deploy-stack.yml
# - name: deploy portainer stack from file
# docker_stack:
# state: present
# name: portainer
# compose:
# - "/work/portainer/deploy-stack.yml"
# run_once: true
# vars:
# ansible_become_user: admin
# ansible_become: true
- hosts: all
tasks:
- name: Configure users and ssh keys
include_role:
name: users
- hosts: hypervisors:&centos_hosts
tasks:
- name: ACTION | Upgrade dist ad install common packages on hypervisors hosts
import_role:
name: yum
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: yum
tasks_from: install_packages.yml
vars:
update_before_install: true
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- package
- hosts: hypervisors:&debian_hosts
tasks:
- name: ACTION | Upgrase dist and install common packages on hypervisors hosts
import_role:
name: apt
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: apt
tasks_from: install_packages.yml
vars:
update_before_install: false
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- packages
- hosts: hypervisors
tasks:
- name: ROLE | Ensure users exists
import_role:
name: users
vars:
users: "{{ local_users }}"
tags:
- virtualization
- user
- name: INCLUDE | Set permissions for virtualization
include_role:
name: virtualization
tasks_from: set_permissions.yml
vars:
virt_user: gcharbon
- name: ACTION | Restart and enable the libvirtd service
service:
name: libvirtd
state: restarted
enabled: true
- name: ROLE | Setup virtualization infrastructure
include_role:
name: virtualization
vars:
virt_user: gcharbon
environments_path: ../virtual_infrastructure/environments
networks_path: ../virtual_infrastructure/networks
- name: INCLUDE | Forward ports of created virtual machine to host port with iptable
include_role:
name: virtualization
tasks_from: forward_ports.yml
vars:
virtual_networks: "{{ networks }}"
virtual_machines: "{{ deployment_configuration }}"
- name: INCLUDE | Ensure that virtual machines are reachable from SSH
include_role:
name: virtualization
tasks_from: wait_for_vms.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ssh_timeout: 30
ssh_port: 22
# We need to find something similar to add SSH keys of newly created VM to known_hosts file of local computer
# - name: ACTION | Write the new virtual instance host key to known hosts
# connection: local
# shell: "ssh-keyscan -H {{ item.ipv4 }} >> ~/.ssh/known_hosts"
# with_items:
# - "{{ deployment_configuration }}"
- hosts: hypervisors
tasks:
- name: ROLE | Setup virtualization infrastructure
include_role:
name: virtualization
vars:
virt_user: gcharbon
environments_path: ../virtual_infrastructure/environments
networks_path: ../virtual_infrastructure/networks
- name: INCLUDE | Forward ports of created virtual machine to host port with iptable
include_role:
name: virtualization
tasks_from: forward_ports.yml
vars:
virtual_networks: "{{ networks }}"
virtual_machines: "{{ deployment_configuration }}"
- name: INCLUDE | Ensure that virtual machines are reachable from SSH
include_role:
name: virtualization
tasks_from: wait_for_vms.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ssh_timeout: 30
ssh_port: 22
---
- hosts: centos_hosts
tasks:
- name: "Ensure epel repo is installed"
import_role:
name: yum
tasks_from: add_repositories.yml
vars:
yum_repositories:
- name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
enabled: 1
gpgcheck: 1
gpgkey: "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}"
---
- import_playbook: enable_epel.yml
vars:
ansible_become: true
- hosts: all
tasks:
- name: "Ensure python36 is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- python36
update_before_install: false
become: true
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Ensure global pip is installed with extra args"
import_role:
name: get-pip
vars:
python_interpreter: /usr/bin/python36
pip_extra_args: --user
- name: "Install python requirements"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-epel"
ansible_python_interpreter: /usr/bin/python36
---
- hosts: datalab
tasks:
- name: Ensure centos-release-scl is installed
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- centos-release-scl
update_before_install: true
become: true
- name: Enable centos-sclo-rh-testing repository
import_role:
name: yum
tasks_from: enable_repositories.yml
vars:
yum_repositories:
- centos-sclo-rh-testing
- name: "Ensure {{ rh_python_package }} is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- "{{ rh_python_package }}"
update_before_install: true
become: true
- name: "Optionally add line in .bashrc to enable {{ rh_python_package }} with scl"
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: "source scl_source enable {{ rh_python_package }}"
insertafter: EOF
state: present
when: "scn_enable_python|default(true) == true"
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Install python requirements with pip from {{ rh_python_interpreter }}"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-rh"
ansible_python_interpreter: "{{ rh_python_interpreter }}"
---
- hosts: all
tasks:
- name: "Ensure {{ rh_python_package }} is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- "{{ rh_python_package }}"
update_before_install: false
become: true
- name: "Optionally add line in .bashrc to enable {{ rh_python_package }} with scl"
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: "source scl_source enable {{ rh_python_package }}"
insertafter: EOF
state: present
when: "scn_enable_python|default(true) == true"
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Install python requirements with pip from {{ rh_python_interpreter }}"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-rh"
ansible_python_interpreter: "{{ rh_python_interpreter }}"
---
packages: []
remove_packages: []
upgrade_packages: []
install_recommends: no
dpkg_upgrade_options: 'force-confold,force-confdef'
update_before_install: true
---
- name: "Add GPG keys"
apt_key:
state: present
url: "{{ item.url | default(omit) }}"
id: "{{ item.id | default(omit) }}"
keyserver: "{{ item.keyserver | default(omit) }}"
keyring: "{{ item.keyring | default(omit) }}"
with_items:
- "{{ apt_keys | default([]) }}"
---
- name: "Add source repositories"
apt_repository:
repo: "{{ item.repo }}"
filename: "{{ item.filename | default(omit) }}"
state: present
with_items:
- "{{ apt_repositories | default([]) }}"
---
- name: "Install {{ packages | list | join(', ') }} with apt"
apt:
name: "{{ item }}"
state: present
update_cache : "{{ update_before_install|bool }}"
install_recommends: "{{ install_recommends }}"
with_items: "{{ packages }}"
---
- include: add_repositories.yml
- include: add_GPG_keys.yml
- include: remove_packages.yml
- include: upgrade_dist.yml
- include: upgrade_packages.yml
- include: install_packages.yml
---
- name: Remove {{ remove_packages | join(', ') }} with apt
apt:
name: "{{ item }}"
state: absent
with_items: "{{ remove_packages }}"
- name: Upgrade dist with apt dist upgrade udate
apt:
upgrade: dist
update_cache: yes
dpkg_options: "{{ dpkg_upgrade_options }}"
---
- name: Upgrade {{ upgrade_packages | list | join(', ') }} with apt
apt:
name: "{{ item }}"
state: latest
update_cache : "{{ update_before_install|bool }}"
with_items: "{{ upgrade_packages }}"
---
firewall:
services:
-
name: ssh
permanent: true
state: enabled
zone: public
-
name: http
permanent: true
state: enabled
zone: public
-
name: https
permanent: true
state: enabled
zone: public
ports:
-
port: 2375
proto: tcp
zone: internal
permanent: true
sources:
-
subnet: 192.168.0.0/16
zone: internal
permanent: true
---
- name: Enable firewalld service at boot
service:
name: firewalld
enabled: yes
become: true
# /roles/firewall/tasks/main.yml
# Configure firewall
---
- include: services.yml
- include: sources.yml
- include: ports.yml
- include: reload.yml
- include: enable.yml
# /roles/firewall/tasks/ufw.yml
# Configure firewall with firewalld on RHEL/CentOS systems
---
- name: Change registered ports with firewalld
firewalld:
port: "{{ port.port}}/{{ port.proto }}"
permanent: "{{ port.permanent }}"
state: "{{ port.state | default('enabled') }}"
zone: "{{ port.zone }}"
with_items:
- "{{ firewall.ports }}"
loop_control:
loop_var: port
become: true
---
- name: Reload firewalld service
service:
name: firewalld
state: reloaded
become: true
---
- name: Change registered services with firewalld
firewalld:
service: "{{ service.name }}"
permanent: "{{ service.permanent }}"
state: "{{ service.state | default('enabled') }}"
zone: "{{ service.zone }}"
with_items:
- "{{ firewall.services }}"
loop_control:
loop_var: service
become: true
---
- name: Change registered sources with firewalld
firewalld:
source : "{{ source.subnet }}"
zone: "{{ source.zone }}"
permanent: "{{ source.permanent }}"
state: "{{ source.state | default('enabled') }}"
with_items:
- "{{ firewall.sources }}"
loop_control:
loop_var: source
become: true
---
- name: Install certbot
include_role:
name: pip
tasks_from: install_packages.yml
vars:
pip_install_packages:
- pyopenssl
- certbot
#!/bin/bash
# Install virtualenv with pip if not already installed.
# pip must be installed before executing this script
pip freeze | grep virtualenv > /dev/null 2>&1
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv is already installed'
exit 0
else
pip install virtualenv > /dev/null 2>&1
echo 'virtualenv has been installed'
exit 0
fi
---
# file: roles/docker/meta/main.yml
dependencies:
# Ubuntu and Debian dependencies
-
role: apt
update_dist: true
upgrade_dist: true
remove_packages: "{{ remove_docker_dependencies }}"
packages: "{{ docker_dependencies }}"
apt_keys: "{{ docker_gpg_key }}"
apt_repositories:
- "{{ docker_repository }}"
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
become: true
-
role: yum
update_dist: true
upgrade_dist: true
remove_packages: "{{ remove_docker_dependencies }}"
packages: "{{ docker_dependencies }}"
GPG_keys: "{{ docker_gpg_key }}"
yum_repositories:
- "{{ docker_repository }}"
when: ansible_distribution == 'CentOS' or ansible_distribution == 'RHEL'
become: true
-
role: apt
upgrade_dist: false
update_dist: true
remove_packages: []
GPG_keys: []
apt_repositories: []
packages:
- "{{ docker_package }}"
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
become: true
-
role: yum
upgrade_dist: false
update_dist: true
remove_packages: []
GPG_keys: []
yum_repositories: []
packages:
- "{{ docker_package }}"
when: ansible_distribution == 'CentOS' or ansible_distribution == 'RHEL'
become: true
---
# file: roles/docker/tasks/add_user_to_docker_group.yml
- name: Add the docker users into docker group
user:
name: "{{ item }}"
groups: docker
with_items: "{{ docker_users | default([]) }}"
become: true
---
# file: roles/docker/tasks/configure_docker_daemon.yml
- name: Configure docker daemon
debug:
msg: "NOT IMPLEMENTED YET"
---
# file: roles/docker/tasks/install_docker.yml
- name: Ensure docker deamon is running
---
# file: roles/docker/tasks/main.yml
- include: add_user_to_docker_group.yml
- include: configure_daemon.yml
- include: start_docker.yml
---
# file: roles/docker/tasks/start_docker.yml
- name: Ensure docker deamon is running
service:
name: docker
state: started
become: true
python_interpreter: "{{ ansible_python_interpreter }}"
# pip_extra_args: '--user'
#!/bin/bash
# Install virtualenv with pip if not already installed.
# pip must be installed before executing this script
if [[ $# -ne 1 ]] && [[ $# -ne 2 ]]; then
echo "Wrong number of arguments. Usage: $0 <PYTHON_INTERPRETER> [<PIP_EXTRA_ARGS>]"
exit 1
fi
PIP_COMMAND="$1 -m pip"
PIP_EXTRA_ARGS="$2"
$($PIP_COMMAND freeze | grep virtualenv > /dev/null 2>&1)
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv is already installed'
exit 0
fi
$($PIP_COMMAND install virtualenv $PIP_EXTRA_ARGS > /dev/null 2> /tmp/virtualenv.error)
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv has been installed'
exit 0
else
$(>&2 echo "$(cat /tmp/virtualenv.error)")
exit 1
fi
---
# This tasks file install pip using get-pip.py script
# from https://bootstrap.pypa.io/get-pip.py
- name: Download pip installer
get_url:
url: https://bootstrap.pypa.io/get-pip.py
dest: /tmp/get-pip.py
- name: Execute the get-pip.py installer
become: true
shell: "{{ python_interpreter }} /tmp/get-pip.py {{ pip_extra_args | default('') }}"
register: successfull_install
- name: Remove the get-pip.py installer
file:
path: /tmp/get-pip.py
state: absent
when: successfull_install.rc == 0
---
# This tasks file install virtualenv.
# The official documentation states that virtualenv is a require dependency for
# ansible pip module.
- name: Install virtualenv if it is not installed yet with interpreter {{ python_interpreter }}"
become: true
script: >
install_virtualenv.sh
{{ python_interpreter }}
{{ pip_extra_args |default('') }}
register: virtualenv_result
changed_when: "virtualenv_result.rc == 0
and
virtualenv_result.stdout_lines[0] != 'virtualenv is already installed'"
---
# This tasks file install pip package from get-pip.py script with
# {{ python_version }} interpreter.
- name: "Checking if pip is already installed with interpreter {{ python_interpreter }}"
command: "{{ python_interpreter }} -m pip --version"
ignore_errors: true
register: pip_is_installed
changed_when: pip_is_installed.rc != 0
---
- include_tasks: is_pip_installed.yml
- include_tasks: install_pip.yml
when: pip_is_installed.rc != 0
- include_tasks: install_virtualenv.yml
- name: "change hostname to {{ hostname }}"
hostname:
name: "{{ hostname }}"
- name: add hostname to /etc/hosts
lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.0\.1[ \t]+localhost'
line: "127.0.0.1 localhost {{ hostname }}"
state: present
# Ansible Role: NFS
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-nfs.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-nfs)
Installs NFS utilities on RedHat/CentOS or Debian/Ubuntu.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
nfs_exports: []
A list of exports which will be placed in the `/etc/exports` file. See Ubuntu's simple [Network File System (NFS)](https://help.ubuntu.com/14.04/serverguide/network-file-system.html) guide for more info and examples. (Simple example: `nfs_exports: [ "/home/public *(rw,sync,no_root_squash)" ]`).
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true
(RedHat/CentOS/Fedora only) The state of the `rpcbind` service, and whether it should be enabled at system boot.
## Dependencies
None.
## Example Playbook
- hosts: db-servers
roles:
- { role: geerlingguy.nfs }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
---
nfs_exports: []
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true
---
- name: reload nfs
command: 'exportfs -ra'
---
# Include variables and define needed variables.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
- name: Include overrides specific to RHEL 7.
include_vars: RedHat-7.yml
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version == "7"
- name: Include overrides specific to Fedora.
include_vars: Fedora.yml
when:
- ansible_os_family == 'RedHat'
- ansible_distribution == "Fedora"
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
- name: Ensure directories to export exist
file: 'path="{{ item.strip().split()[0] }}" state=directory'
with_items: "{{ nfs_exports }}"
- name: Copy exports file.
template:
src: exports.j2
dest: /etc/exports
owner: root
group: root
mode: 0644
notify: reload nfs
- name: Ensure nfs is running.
service: "name={{ nfs_server_daemon }} state=started enabled=yes"
when: nfs_exports|length
---
- name: Ensure NFS utilities are installed.
apt:
name:
- nfs-common
- nfs-kernel-server
state: present
---
- name: Ensure NFS utilities are installed.
package: name=nfs-utils state=present
- name: Ensure rpcbind is running as configured.
service:
name: rpcbind
state: "{{ nfs_rpcbind_state }}"
enabled: "{{ nfs_rpcbind_enabled }}"
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
{% for export in nfs_exports %}
{{ export }}
{% endfor %}
\ No newline at end of file
---
nfs_server_daemon: nfs-kernel-server
---
nfs_server_daemon: nfs-server
---
nfs_server_daemon: nfs-server
---
nfs_server_daemon: nfs
pip_requirements_remote_path: "/tmp/requirements_demo.txt"
pip_requirements_owner: "{{ ansible_user }}"
pip_requirements_mode: 600
coverage==4.5.1
cycler==0.10.0
decorator==4.3.0
entrypoints==0.2.3
ipykernel==4.8.2
ipython==6.5.0
ipython-genutils==0.2.0
jsonschema==2.6.0
jupyter-client==5.2.3
jupyter-core==4.4.0
matplotlib==2.2.2
mistune==0.8.3
nbconvert==5.3.1
nbformat==4.4.0
numpy==1.15.0
pandas==0.23.4
prompt-toolkit==1.0.15
pymongo==3.7.1
pyparsing==2.2.0
python-dateutil==2.7.3
PyYAML==3.13
scikit-learn==0.19.2
scipy==1.1.0
seaborn==0.9.0
sklearn==0.0
statsmodels==0.9.0
---
# Requirements path should be local to remote file system.
- name: Copy requirements.txt file to remote file system
copy:
src: "{{ pip_requirements_path }}"
dest: "{{ pip_requirements_remote_path }}"
owner: "{{ pip_requirements_owner | default(omit) }}"
group: "{{ pip_requirements_group | default(omit) }}"
mode: "{{ pip_requirements_mode | default(omit) }}"
---
- name: "Install {{ pip_install_packages|join(', ') }} with pip (extra_args: {{ pip_extra_args|default('') }})"
pip:
name: "{{ item.name | default(item) }}"
virtualenv: "{{ virtualenv | default(omit) }}"
version: "{{ item.version | default(omit) }}"
extra_args: "{{pip_extra_args | default(omit) }}"
with_items:
- "{{ pip_install_packages }}"
---
# File can be specified as a relative path if using the chdir option.
- name: "Install python requirements with interpreter {{ ansible_python_interpreter }}"
pip:
requirements: "{{ pip_requirements_remote_path }}"
virtualenv: "{{ virtualenv | default(omit) }}"
virtualenv_site_packages: "{{ virtualenv_site_packages | default(omit) }}"
extra_args: "{{ pip_extra_args | default(omit) }}"
---
- name: Remove packages with pip
pip:
name: "{{ item.name }}"
state: absent
virtualenv: "{{ virtualenv | default(omit) }}"
extra_args: "{{pip_extra_args | default(omit) }}"
with_items: "{{ pip_remove_packages }}"
---
- name: Upgrade packages with pip
pip:
name: "{{ item.name }}"
state: latest
virtualenv: "{{ virtualenv | default(omit) }}"
extra_args: "{{pip_extra_args | default(omit) }}"
with_items: "{{ pip_upgrade_packages }}"
version: '3.2'
services:
agent:
image: portainer/agent
environment:
# REQUIRED: Should be equal to the service name prefixed by "tasks." when
# deployed inside an overlay network
AGENT_CLUSTER_ADDR: tasks.agent
# AGENT_PORT: 9001
# LOG_LEVEL: debug
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- agent_network
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
portainer:
image: portainer/portainer
command: -H tcp://tasks.agent:9001 --tlsskipverify
ports:
- "9000:9000"
volumes:
- portainer_data:/data
networks:
- agent_network
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == manager]
networks:
agent_network:
driver: overlay
attachable: true
volumes:
portainer_data:
---
firewall:
state: enabled
policy: deny
logging: on
rules:
-
rule: limit
port: ssh
proto: tcp
-
rule: allow
port: 80
proto: tcp
-
rule: allow
port: 443
proto: tcp
- rule: allow
src: 192.168.0.0/24
---
- name: Enable UFW and set default policy ({{ firewall.policy }})
ufw:
state: "{{ firewall.state }}"
policy: "{{ firewall.policy }}"
become: true
# - name: Define forwarding rules with ufw for specific interfaces
# ufw:
# rule: "{{ rule.rule }}"
# interface: "{{ rule.interface }}"
# direction: "{{ rule.direction | default('in') }}"
# proto: "{{ rule.proto | default('tcp') }}"
# src: "{{ rule.src }}"
# from_port: "{{ rule.from_port }}"
# dest: "{{ rule.dest }}"
# to_port: "{{ rule.to_port | default(22) }}"
# when: rule.interface is defined
# with_items: "{{ ufw_conf.port_forwarding }}"
# loop_control:
# loop_var: rule
#
# - name: Define forwarding rules with ufw for any interfaces
# ufw:
# rule: "{{ rule.rule }}"
# direction: "{{ rule.direction | default('in') }}"
# proto: "{{ rule.proto | default('tcp') }}"
# src: "{{ rule.src }}"
# from_port: "{{ rule.from_port }}"
# dest: "{{ rule.dest }}"
# to_port: "{{ rule.to_port | default(22) }}"
# when: rule.interface is not defined
#
# with_items: "{{ ufw_conf.port_forwarding }}"
# loop_control:
# loop_var: rule
#
# - name: Define forwarded/routed traffic rules for any interfaces
# ufw:
# rule: "{{ rule.rule }}"
# route: "{{ rule.route }}"
# src: "{{ rule.src }}"
# dest: "{{ rule.dest }}"
# when: "rule.route is defined
# and
# rule.interface is not defined"
---
- name: Set ufw logging policy to "{{ firewall.logging }}"
ufw:
logging: "{{ firewall.logging }}"
when: "firewall.logging is defined"
become: true
- include: logging.yml
- include: rules.yml
- include: enable.yml
- name: Define rules with ufw
ufw:
rule: "{{ rule.rule }}"
src: "{{ rule.src | default(omit) }}"
comment: "{{ rule.comment | default(omit) }}"
proto: "{{ rule.proto | default(omit) }}"
port: "{{ rule.port | default(omit) }}"
delete: "{{ rule.delete | default(omit) }}"
with_items: "{{ firewall.rules }}"
loop_control:
loop_var: rule
become: true
---
# file: roles/users/tasks/main.yml
- name: INCLUDE | Create users
import_tasks: users.yml
- name: INCLUDE | Configure ssh keys
import_tasks: ssh_keys.yml
- name: INCLUDE | Update sudoers
import_tasks: sudoers.yml
---
# file: roles/users/tasks/add_ssh_keys.yml
- name: ACTION | Create users ssh-keys when needed
user:
name: "{{ user.name }}"
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_comment: "{{ user.name }}@{{ inventory_hostname }}"
when: "user.generate_ssh_key is defined and user.generate_ssh_key|bool == true"
with_items: "{{ users }}"
loop_control:
loop_var: user
- name: ACTION | Add authorized SSH keys for each user. They can be used to log into target host.
authorized_key:
user: "{{ item.0.name }}"
key: "{{ item.1 }}"
with_subelements:
- "{{ users }}"
- ssh_authorized_keys
- flags:
skip_missing: true
- name: ACTION | Generate ssh public key of current user if it does not exists
user:
name: "{{ ansible_user }}"
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_comment: "{{ ansible_user }}@{{ inventory_hostname }}"
# - name: COLLECT | Get ssh public key of current user
# slurp:
# src: .ssh/id_rsa.pub
# register: slurp_public_key
#
# - name: ACTION | Set ssh public key of current user as authorized key for all users
# authorized_key:
# user: "{{ item.name }}"
# state: present
# key: "{{ slurp_public_key['content'] | b64decode }}"
# with_items:
# - "{{ users }}"
---
# file: roles/users/tasks/update_sudo.yml
- name: ACTION | Ensure users with sudo_user set to true belong to sudoers group
user:
name: "{{ item.name }}"
groups: "{{ sudoers_group }}"
when: "item.sudo_user is defined and item.sudo_user|bool == true"
with_items: "{{ users }}"
- name: ACTION | Update sudoers file and validate
lineinfile:
dest: /etc/sudoers
insertafter: EOF
line: "{{ item.name }} ALL=(ALL) NOPASSWD: ALL"
regexp: "^{{ item.name }} .*"
state: present
when: item.passwordless_sudo is defined and item.passwordless_sudo|bool == true
with_items: "{{ users }}"
---
# file: roles/users/tasks/create_users.yml
- name: ACTION | Configure users
user:
name: "{{ user.name }}"
comment: "{{ user.gecos | default(omit) }}"
create_home: "{{ user.create_home | default(true) }}"
shell: "{{ user.shell | default('/bin/bash') }}"
home: "{{ user.homedir | default(omit) }}"
state: "{{ user.state | default('present') }}"
password: "{{ user.password | default('omit') }}"
with_items: "{{ users }}"
loop_control:
loop_var: user
# Lock the password of the named account.
# This option disables a password by changing it to a value which matches
# no possible encrypted value (it adds a ´!´ at the beginning of the password).
# The user may still be able to login using another authentication token (e.g. an SSH key).
# Users with a locked password are not allowed to change their password.
- name: ACTION | Lock passwords of user not supposed to have one
user:
name: "{{ user.name }}"
password_lock: true
when: "user.password_lock is defined and user.password_lock|bool == true"
with_items: "{{ users }}"
loop_control:
loop_var: user
deploy_environment: testing
environments_path: ../virtual_infrastructure/environments
networks_path: ../virtual_infrastructure/networks
virt_base_dir: /work/virt
libvirt_image_cache_path: "{{ virt_base_dir }}/images"
storage_pool_root_path: "{{ virt_base_dir }}/pools"
mac_address_inventory: "{{ virt_base_dir }}/interfaces/mac_address_inventory.json"
cloud_init_base_dir: "{{ virt_base_dir }}/cloud-init"
libvirt_volume_default_format: qcow2
qemu_uri: qemu:///system
from __future__ import print_function
import json
import random
import argparse
def read_json_or_get_empty_dict(file, mode="r"):
try:
with open(file, mode) as f:
content = json.load(f)
return content
except IOError:
return dict(mac_addresses=dict())
def write_json(content, file, mode="w+"):
with open(file, mode) as f:
f.write(json.dumps(content))
def randomMAC():
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def uniqueMAC(existing_mac_addresses):
mac = randomMAC()
while mac in existing_mac_addresses:
mac = randomMAC()
return mac
def get_address_from_file(hostname, file):
virt_hardware_config = read_json_or_get_empty_dict(file)
# Try to get the mac address for the given hostname
try:
new_mac_address = virt_hardware_config["mac_addresses"][hostname]
# Generate a new mac address when it does not exists yet
except KeyError:
existing_addresses = set(virt_hardware_config["mac_addresses"].values())
new_mac_address = virt_hardware_config["mac_addresses"][hostname] = uniqueMAC(existing_mac_addresses=existing_addresses)
write_json(virt_hardware_config, file)
status = "CHANGED"
else:
status = "OK"
msg = dict()
msg[hostname] = new_mac_address
stdout_lines = [json.dumps(msg), status]
return("\n".join(stdout_lines))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--hostname",
required=True,
help="Hostname of the virtual machine")
parser.add_argument("--file",
required=True,
help="Path to file with all mac addresses")
args = parser.parse_args()
msg = get_address_from_file(args.hostname, args.file)
print(msg)
#!/bin/bash
if [[ $# -ne 1 ]] ; then
echo "Wrong number of arguments. Usage: $0 <output> "
exit 1
fi
OUTPUT=$1
genisoimage -output $OUTPUT -volid cidata -joliet -rock user-data meta-data
#!/bin/bash
# Ensure that a libvirt volume exists, optionally uploading an image.
# On success, output a JSON object with a 'changed' item.
if [[ $# -ne 5 ]] && [[ $# -ne 6 ]] && [[ $# -ne 7 ]]; then
echo "Wrong number of arguments. Usage: $0 <name> <pool> <capacity> <format> <image> [<overwrite>] [<qemu_uri>]"
exit 1
fi
DEFAULT_FORCE_OVERWRITE="False"
DEFAULT_QEMU_URI="qemu:///system"
DEFAULT_QEMU_GROUP="qemu"
NAME=$1
POOL=$2
CAPACITY=$3
FORMAT=$4
IMAGE=$5
FORCE_OVERWRITE=$6
QEMU_URI=$7
QEMU_GROUP=$8
if [ -z "$FORCE_OVERWRITE" ]; then
FORCE_OVERWRITE=$DEFAULT_FORCE_OVERWRITE
fi
if [ -z "$QEMU_URI" ]; then
QEMU_URI=$DEFAULT_QEMU_URI
fi
if [ -z "QEMU_GROUP" ]; then
QEMU_GROUP=$DEFAULT_QEMU_GROUP
fi
# Check whether a volume with this name exists.
output=$(virsh --connect "$QEMU_URI" vol-info --pool $POOL --vol $NAME 2>&1)
result=$?
if [[ $result -eq 0 ]]; then
if [[ $FORCE_OVERWRITE == "False" ]]; then
echo '{"changed": false}'
exit 0
else
output_delete=$(virsh --connect "$QEMU_URI" vol-delete --pool $POOL $NAME > /dev/null)
result_delete=$?
if [[ $result_delete != "0" ]]; then
echo "Unexpected error occured. Check stderr_lines"
exit $result_delete
fi
fi
elif ! echo "$output" | grep 'Storage volume not found' >/dev/null 2>&1; then
echo "Unexpected error while getting volume info"
echo "$output"
exit $result
fi
# Create the volume.
output=$(virsh --connect "$QEMU_URI" vol-create-as --pool $POOL --name $NAME --capacity $CAPACITY --format $FORMAT 2>&1)
result=$?
if [[ $result -ne 0 ]]; then
echo "Failed to create volume"
echo "$output"
exit $result
fi
# Determine the path to the volume file.
output=$(virsh --connect "$QEMU_URI" vol-key --pool $POOL --vol $NAME 2>&1)
result=$?
if [[ $result -ne 0 ]]; then
echo "Failed to get volume file path"
echo "$output"
virsh --connect "$QEMU_URI" vol-delete --pool $POOL --vol $NAME
exit $result
fi
# Change the ownership of the volume to qemu. Without doing this libvirt cannot
# access the volume.
# output=$(chown $USER:$QEMU_GROUP $output 2>1)
# result=$?
# if [[ $result -ne 0 ]]; then
# echo "Failed to change ownership of the volume to qemu"
# echo "$output"
# virsh --connect "$QEMU_URI" vol-delete --pool $POOL --vol $NAME
# exit $result
# fi
if [[ -n $IMAGE ]]; then
# Upload an image to the volume.
output=$(virsh --connect "$QEMU_URI" vol-upload --pool $POOL --vol $NAME --file $IMAGE 2>&1)
result=$?
if [[ $result -ne 0 ]]; then
echo "Failed to upload image $IMAGE to volume $NAME"
echo "$output"
virsh --connect "$QEMU_URI" vol-delete --pool $POOL --vol $NAME
exit $result
fi
# Resize the volume to the requested capacity.
output=$(virsh --connect "$QEMU_URI" vol-resize --pool $POOL --vol $NAME --capacity $CAPACITY 2>&1)
result=$?
if [[ $result -ne 0 ]]; then
echo "Failed to resize volume $VOLUME to $CAPACITY"
echo "$output"
virsh --connect "$QEMU_URI" vol-delete --pool $POOL --vol $NAME
exit $result
fi
fi
echo '{"changed": true}'
exit 0
#!/bin/bash
set -e
# Update virtual network dhcp section in live and in its definition
# On success, output a JSON object with a 'changed' item.
if [[ $# -ne 4 ]] && [[ $# -ne 5 ]]; then
echo "Wrong number of arguments. Usage: $0 <network> <hostname> <mac_address> <ipv4> <qemu_uri>"
exit 1
fi
NETWORK=$1
HOSTNAME=$2
MAC_ADDRESS=$3
IPV4=$4
QEMU_URI=$5
if [ -z "$QEMU_URI" ]; then
QEMU_URI=qemu:///system
fi
status=$(virsh --connect "$QEMU_URI" net-dumpxml $NETWORK | grep $HOSTNAME && echo "OK" || echo "CHANGED")
if [ "$status" == "CHANGED" ]; then
virsh --connect "$QEMU_URI" net-update $NETWORK add ip-dhcp-host "<host mac='$MAC_ADDRESS' name='$HOSTNAME' ip='$IPV4' />" --live --config > /dev/null
echo "CHANGED"
exit 0
fi
echo "OK"
exit 0
- name: Ensure cloud-init base dir exists
file:
path: "{{ cloud_init_base_dir }}"
state: directory
mode: 0770
- name: Ensure that cloud-init directory exists for each we're gonna to create
file:
path: "{{ cloud_init_base_dir }}/{{ vm.hostname }}"
state: directory
mode: 0770
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Generate meta-data based on templates for each vm
template:
src: cloud-init/meta-data.j2
dest: "{{ cloud_init_base_dir }}/{{ vm.hostname }}/meta-data"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Generate user-data based on templates for each vm
template:
src: cloud-init/user-data.j2
dest: "{{ cloud_init_base_dir }}/{{ vm.hostname }}/user-data"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Generate cloud-init ISO seed for each VM
script: >
prepare_cloud-init_seed.sh
{{ cloud_init_base_dir }}/{{ vm.hostname }}/seed.iso
args:
chdir: "{{ cloud_init_base_dir }}/{{ vm.hostname }}"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
# - name: "ACTION | Set each volume ownership to libvirt-qemu on debian hosts"
# file:
# dest: "{{ cloud_init_base_dir }/{{ item.hostname }}/seed.iso"
# owner: libvirt-qemu
# group: libvirt-qemu group=apache mode=u=rwX,g=rX,o=rX recurse=yes
- name: " COLLECT | Search for targeted environment with path: '{{ environments_path }}/{{ deploy_environment }}'"
stat:
path: "{{ environments_path }}/{{ deploy_environment }}"
follow: yes
register: stat_deploy_environment
delegate_to: localhost
- name: " ASSERT | Assert that configuration exists for {{ deploy_environment}}"
assert:
that:
- "stat_deploy_environment.stat.isdir is defined
and
stat_deploy_environment.stat.isdir"
- name: "COLLECT | Determine virtual groups that should be deployed"
find:
paths: "{{ environments_path }}/{{ deploy_environment }}"
recurse: false
file_type: directory
follow: true
register: virtual_groups_search
delegate_to: localhost
- name: 'COLLECT | Register virtual groups as fact for "{{ deploy_environment}}" environment'
set_fact:
virtual_groups: "{{ virtual_groups_search.files | map(attribute='path') | list }}"
- name: "DEBUG | Print virtual groups"
debug:
msg: "{{ item | basename }}"
with_items:
- "{{ virtual_groups }}"
tags:
- DEBUG
- name: "COLLECT | Store variables common to all virtual groups into all_group_variables fact"
set_fact:
all_group_variables: "{{ lookup('file', environments_path + '/' + deploy_environment + '/all.yml' )|from_yaml }}"
- name: "INCLUDE | Load configuration for each virtual group"
include_tasks: virtual_group.yml
with_items:
- "{{ virtual_groups }}"
loop_control:
loop_var: virtual_group
- name: "COLLECT | Store configuration fact into more flexible/readable format"
set_fact:
deployment_configuration: "{{ deployment_configuration|default([]) | union([vm.value]) }} "
with_dict:
- "{{ vm_conf }}"
loop_control:
loop_var: vm
- name: "COLLECT | Register names of networks to create or modify"
set_fact:
networks: "{{ networks|default({}) | combine( { item.network : { 'dhcp-leases' : {} } }) }}"
with_items:
- "{{ virtual_machines }}"
tags:
- virtualization
- network
- name: "COLLECT | Register gateway and dhcp leases for each network"
set_fact:
networks: "{{ networks|combine(
{ item.network: {
'dhcp-leases': {item.hostname: { 'mac_address': mac_addresses[item.hostname], 'ipv4': item.ipv4} }|combine(networks[item.network]['dhcp-leases'])
}
}
) }}"
with_items:
- "{{ virtual_machines }}"
tags:
- virtualization
- network
- name: "COLLECT | Merge network information (gateway, dns, netmask,...) with dhcp leases informations"
set_fact:
networks: "{{
networks | combine(
{
item.network: lookup('file', networks_path + '/' + item.network + '.yml')|from_yaml|combine(
{
'dhcp-leases': networks[item.network]['dhcp-leases']
}
)
}
)
}}"
with_items:
- "{{ virtual_machines }}"
tags:
- virtualization
- network
- name: "DEBUG | Print networks"
debug:
msg: "{{ item.value }}"
with_dict:
- "{{ networks }}"
tags:
- DEBUG
- virtualization
- network
- name: "FACTS | Register pools to create"
set_fact:
virtual_pools: "{{ virtual_pools|default([]) | union([item.image.volume.pool]) | unique }}"
with_items:
- "{{ virtual_machines }}"
tags:
- virtualization
- storage
- pool
- name: "DEBUG | Print pools to create"
debug:
msg: "{{ item }}"
with_items:
- "{{ virtual_pools }}"
tags:
- virtualization
- storage
- pool
- DEBUG
- name: "Template xml domain and define it for each virtual machine"
virt:
uri: "{{ qemu_uri}}"
name: "{{ vm.hostname }}"
command: define
xml: "{{ lookup('template', 'libvirt/domain.xml.j2') }}"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Start the VM with libvirt
virt:
uri: "{{ qemu_uri}}"
name: "{{ vm.hostname }}"
state: running
when: "vm.state == 'active'"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Ensure autostart policy is defined
virt:
uri: "{{ qemu_uri}}"
name: "{{ vm.hostname }}"
state: running
autostart: "{{ vm.autostart }}"
when: "vm.autostart is defined"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: Add iptables rules to allow forwarded traffic on virtual networks
command: "iptables -I FORWARD -m state -d {{ item.value.gateway_ipv4 }}/24 --state NEW,RELATED,ESTABLISHED -j ACCEPT"
with_dict:
- "{{ virtual_networks }}"
- name: Add forwarding rule for each VM in order to SSH directly from host port
command: "iptables -t nat -I PREROUTING -p tcp -d {{ ansible_host }} --dport {{ vm.ssh_host_port }} -j DNAT --to-destination {{ vm.ipv4 }}:22"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
- name: "INCLUDE | Load task to detect networks"
include_tasks: detect_networks.yml
tags:
- network
- virtualization
- name: "INCLUDE | Load task to detect pools"
include_tasks: detect_pools.yml
tags:
- storage
- virtualization
- name: "ACTION | Ensure virtual mac address registry file exists"
file:
path: "{{ mac_address_inventory | dirname }}"
state: directory
mode: 0770
owner: "{{ virt_user }}"
group: "{{ virt_group }}"
recurse: true
tags:
- virtualization
- network
- name: "ACTION | Generate virtual mac address for each virtual machine"
script: >
get_mac_address.py
--hostname={{ item.hostname }}
--file={{ mac_address_inventory }}
args:
executable: "{{ ansible_python_interpreter }}"
with_items:
- "{{ virtual_machines }}"
register: mac_addresses_result
changed_when:
- mac_addresses_result is success
- mac_addresses_result.stdout_lines[1] == "CHANGED"
tags:
- virtualization
- network
- name: "COLLECT | Register virtual mac address for each virtual machine"
set_fact:
mac_addresses: "{{ mac_addresses|default({}) | combine( item.stdout_lines[0] | from_json )}}"
with_items:
- "{{ mac_addresses_result.results }}"
tags:
- virtualization
- network
- name: "DEBUG | Print registered result (mac address generation)"
debug:
msg: "{{ mac_addresses[item.hostname] }}"
with_items:
- "{{ virtual_machines }}"
tags:
- DEBUG
- virtualization
- network
---
- name: COLLECT | Collect some facts locally before taking further actions
include_tasks: collect_facts.yml
- name: DEBUG | We now have a variable called `deployment_configuration` holding each virtual machine configuration.
debug:
msg: "{{ deployment_configuration }}"
tags:
- DEBUG
- name: INCLUDE | Register ssh key of hypervisor hosts for later use
include_tasks: ssh_keys.yml
vars:
ansible_user: "{{ virt_user }}"
- name: INCLUDE | Assign mac addresses to each virtual machine
include_tasks: mac_addresses.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: INCLUDE | List interfaces to define (networks, pools, volumes)
include_tasks: interfaces.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
- name: INCLUDE | Prepare images for virtual machines
include_tasks: os_images.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: INCLUDE | Prepare networks for virtual machines
include_tasks: networks.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: INCLUDE | Prepare pools for virtual machines
include_tasks: pools.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: INCLUDE | Prepare volumes for virtual machines
include_tasks: volumes.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: INCLUDE | Prepare virtual machines
include_tasks: domains.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ansible_user: "{{ virt_user }}"
- name: "ACTION | Destroy virtual networks listed in destroy_network variable"
virt_net:
command: destroy
uri: "{{ qemu_uri}}"
name: "{{ item }}"
with_items:
- "{{ destroy_networks | default([]) }}"
tags:
- virtualization
- network
- name: "ACTION | Undefine virtual networks listed in remove_networks variable"
virt_net:
command: undefine
name: "{{ item }}"
uri: "{{ qemu_uri}}"
with_items:
- "{{ remove_networks | default([]) }}"
tags:
- virtualization
- network
- name: "ACTION | Define virtual networks with virt_net (Be careful, previously defined networks will not be overriden)"
virt_net:
command: define
name: "{{ item.key }}"
xml: '{{ lookup("template", "libvirt/bridges.xml.j2") }}'
uri: "{{ qemu_uri}}"
with_dict:
- "{{ networks }}"
when: "item.value.state is not defined or item.value.state == 'active' or item.value.state == 'present'"
tags:
- virtualization
- network
- name: "COLLECT | Register informations on existing networks"
# Gather facts about networks
# Facts will be available as 'ansible_libvirt_networks'
virt_net:
uri: "{{ qemu_uri}}"
command: facts
tags:
- virtualization
- network
- name: "ACTION | Start virtual networks currently stopped with virt_net"
virt_net:
command: create
name: "{{ item.key }}"
uri: "{{ qemu_uri}}"
when: "(item.value.state is not defined or item.value.state == 'active') and (ansible_libvirt_networks[item.key] is defined and ansible_libvirt_networks[item.key].state != 'active')"
with_dict:
- "{{ networks }}"
tags:
- virtualization
- network
- name: "ACTION | Add virtual machines created after virtual network definition to dhcp-leases entries (network doesn't need to be restarted)"
script: >
update_dhcp.sh
"{{ item.network }}"
"{{ item.hostname }}"
"{{ mac_addresses[item.hostname] }}"
"{{ item.ipv4 }}"
tags:
- virtualization
- network
register: update_dhcp_results
with_items:
- "{{ virtual_machines }}"
changed_when:
- update_dhcp_results is success
- update_dhcp_results.stdout_lines[0] == "CHANGED"
- name: "ACTION | Ensure network autostart policy is defined"
virt_net:
uri: "{{ qemu_uri}}"
autostart: yes
name: "{{ item.key }}"
when: "item.value.autostart|bool == True and (item.value.state is not defined or item.value.state == 'active' or item.value.state == 'present')"
with_dict:
- "{{ networks }}"
tags:
- virtualization
- network
- name: "ACTION | Ensure OS image cache path exists"
file:
path: "{{ libvirt_image_cache_path }}"
state: directory
mode: 0770
owner: "{{ virt_user }}"
group: "{{ virt_group }}"
recurse: true
tags:
- virtualization
- image
- name: "ACTION | Ensure OS images are downloaded"
get_url:
url: "{{ vm.image.url }}"
dest: "{{ libvirt_image_cache_path }}/{{ vm.image.name }}"
with_items: "{{ virtual_machines }}"
loop_control:
loop_var: vm
tags:
- virtualization
- image
- name: "INCLUDE | Create cloud-init seed for each virtual machine"
include_tasks: cloud-init.yml
tags:
- virtualization
- cloud-init
---
- name: "ACTION | Ensure storage pool root directory exist"
file:
path: "{{ storage_pool_root_path }}"
state: directory
mode: 0770
owner: "{{ virt_user }}"
group: "{{ virt_group }}"
recurse: true
tags:
- virtualization
- storage
- pool
become: true
- name: "COLLECT | Register information on existing pools"
virt_pool:
uri: "{{ qemu_uri}}"
command: info
register: pools_info
tags:
- virtualization
- storage
- pool
- name: "COLLECT | List existing pools (slightly different than collecting info)"
virt_pool:
uri: "{{ qemu_uri}}"
command: list_pools
register: available_pools
tags:
- virtualization
- storage
- pool
- name: "ACTION | Define storage pools that were not defined yet"
virt_pool:
uri: "{{ qemu_uri}}"
command: define
name: "{{ item.name }}"
xml: '{{ lookup("template", "libvirt/dir.xml.j2") }}'
when: "item.state != 'absent'
and
item.name not in available_pools.list_pools
"
with_items:
- "{{ virtual_pools }}"
tags:
- virtualization
- storage
- pool
- name: "ACTION | Build storage pools that are not active yet"
virt_pool:
uri: "{{ qemu_uri}}"
command: build
name: "{{ item.name }}"
when: "(
item.state == 'defined'
or
item.state == 'active'
)
and
(
item.name not in available_pools.list_pools
or
(
pools_info.pools[item.name] is defined
and
pools_info.pools[item.name].state == 'inactive'
))"
with_items:
- "{{ virtual_pools }}"
tags:
- virtualization
- storage
- pool
# - name: Start active storage pools that were not active yet.
# virt_pool:
# command: create
# name: "{{ storage_pool.name }}"
# when: "storage_pool.state == 'active'
# and
# storage_pool.name not in available_pools.list_pools"
# with_items:
# - "{{ storage_pools }}"
# loop_control:
# loop_var: storage_pool
- name: "ACTION | Ensure that pools with state set to 'active' are started"
virt_pool:
uri: "{{ qemu_uri}}"
state: active
name: "{{ item.name }}"
when: "item.state == 'active'"
with_items:
- "{{ virtual_pools }}"
tags:
- virtualization
- storage
- pool
- name: "ACTION | Define autostart policy for each pool"
virt_pool:
uri: "{{ qemu_uri}}"
autostart: "{{ item.autostart }}"
name: "{{ item.name }}"
with_items:
- "{{ virtual_pools }}"
tags:
- virtualization
- storage
- pool
- name: "ACTION | Ensure virt_user ({{ virt_user }}) belongs to libvirtd_group ({{ libvirtd_group }}) and {{ virt_group }}"
user:
name: "{{ virt_user }}"
groups: "{{ virt_groups }}"
vars:
virt_groups:
- "{{ libvirtd_group }}"
- "{{ virt_group }}"
tags:
- init
- virtualization
- name: "ACTION | Ensure virt_base_dir ({{ virt_base_dir }}) exists and belongs to {{ virt_user }}}}"
file:
path: "{{ virt_base_dir }}"
state: directory
mode: 0770
owner: "{{ virt_user }}"
group: "{{ virt_group }}"
recurse: true
tags:
- init
- virtualization
- name: ACTION | Uncomment lines in /etc/libvirt/qemu.conf to set user and group of qemu to libvirt_user
lineinfile:
path: /etc/libvirt/qemu.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
with_items:
-
regexp: '^#user = .*$'
line: 'user = "{{ virt_user }}"'
-
regexp: '^#group = .*$'
line: 'group = "{{ virt_group}}"'
-
regexp: '^user = .*$'
line: 'user = "{{ virt_user }}"'
-
regexp: '^group = .*$'
line: 'group = "{{ virt_group }}"'
tags:
- init
- virtualization
- name: Add special rule in polkit-d to allow libvirt_user to use libvirtd daemon
blockinfile:
path: /etc/polkit-1/rules.d/49-org.libvirt.unix.manager.rules
create: true
block: |
/* Allow users in {{ libvirtd_group }} group to manage the libvirt
daemon without authentication */
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("{{ virt_group }}")) {
return polkit.Result.YES;
}
});
- name: ACTION | Generate ssh public key of current user if it does not exists
user:
name: "{{ ansible_user }}"
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_comment: "{{ ansible_user }}@{{ inventory_hostname }}"
- name: COLLECT | Get ssh public key of current user
slurp:
src: ~/.ssh/id_rsa.pub
register: slurp_public_key
- name: COLLECT | Set the hypervisor ssh public key as a fact
set_fact:
hypervisor_public_key: "{{ slurp_public_key['content'] | b64decode }}"
- name: "DEBUG | Print virtual group"
debug:
msg: "{{ virtual_group }}"
tags:
- DEBUG
- name: 'COLLECT | Determine "{{ virtual_group }}" group configuration'
find:
paths: "{{ virtual_group }}/hosts"
excludes: 'group.yml'
recurse: true
file_type: file
follow: true
register: virtual_group_configuration_search
delegate_to: localhost
- name: "COLLECT | Set path of files holding configurations to empty list"
set_fact:
vm_conf_paths: []
- name: "COLLECT | Set path of files holding configurations as fact"
set_fact:
vm_conf_paths: "{{ [ item.path ] | union(vm_conf_paths|default([])) }}"
with_items:
- "{{ virtual_group_configuration_search.files }}"
- name: "DEBUG | Print virtual group configurations paths"
debug:
msg: "{{ vm_conf_paths }}"
tags:
- DEBUG
- name: "COLLECT | Add all variables first to configurations fact"
set_fact:
vm_conf: "{{ vm_conf | default({}) | combine( { item|basename : all_group_variables } ) }}"
with_items:
- "{{ vm_conf_paths }}"
- name: "COLLECT | Add group variables to configurations fact"
set_fact:
vm_conf: "{{ vm_conf | combine( { item|basename : lookup('file', virtual_group + '/group.yml')|from_yaml|combine (vm_conf[item|basename]) }) }}"
with_items:
- "{{ vm_conf_paths }}"
- name: DEBUG | Print virtual group configurations
debug:
msg: "{{ vm_conf }}"
tags:
- DEBUG
# Really ugly but works, so let's keep it like that for the moment
- name: "COLLECT | Add host variables to configurations fact"
set_fact:
vm_conf: "{{ vm_conf | combine( { item|basename : lookup('file', item)|from_yaml|combine (vm_conf[item|basename]) }) }}"
with_items:
- "{{ vm_conf_paths }}"
- name: "DEBUG | Print virtual groups configuration"
debug:
msg: "{{ vm_conf }}"
tags:
- DEBUG
- name: "ACTION | Ensure volumes exists for each virtual machine"
script: >
prepare_volume.sh
{{ item.hostname }}_img
{{ item.image.volume.pool.name }}
{{ item.image.volume.size }}
{{ item.image.volume.format | default(libvirt_volume_default_format) }}
{{ libvirt_image_cache_path }}/{{ item.image.name | basename }}
{{ item.image.volume.overwrite }}
with_items: "{{ virtual_machines }}"
register: volume_result
changed_when:
- volume_result is success
- (volume_result.stdout | from_json).changed | default(True)
tags:
- virtualization
- storage
- volume
- name: Check whether /etc/group contains "libvirt-qemu"
command: grep -Fq "libvirt-qemu" /etc/group
register: check_qemu_user
check_mode: no
ignore_errors: yes
changed_when: no
# - name: "ACTION | Set each volume ownership to libvirt-qemu on debian hosts"
# file:
# dest: "{{ storage_pool_root_path }}/{{ item.image.volume.pool.name }}"
# owner: libvirt-qemu
# group: libvirt-qemu
# mode: 0770
# recurse: true
# become: true
# when: "check_qemu_user.rc == 0"
- name: "Wait {{ ssh_timeout }} seconds for port {{ ssh_port }} to become open on each virtual machine"
wait_for:
timeout: "{{ ssh_timeout|default(30) }}"
port: "{{ ssh_port }}"
host: "{{ item.ipv4 }}"
with_items: "{{ virtual_machines }}"
instance-id: {{ vm.hostname }}
local-hostname: {{ vm.hostname }}
#cloud-config
users:
{% for user in vm.cloud_init.users %}
- name: {{ user.name }}
{% if user.gecos is defined %}
gecos: {{ user.gecos }}
{% endif %}
{% if user.lockpasswd is defined %}
lockpasswd: {{ user.lockpasswd }}
{% endif %}
{% if user.groups is defined %}
groups: {{ user.groups }}
{% endif %}
{% if user.passwd is defined %}
passwd: {{ user.passwd }}
{% endif %}
sudo: {{ user.sudo }}
ssh_authorized_keys:
- {{ hypervisor_public_key }}
{% for pubkey in user.ssh_authorized_keys %} - {{ pubkey }}
{% endfor %}
{% endfor %}
# manage_resolv_conf: true
#
# resolv_conf:
# nameservers: ['208.67.222.222', '208.67.220.220']
# options:
# rotate: true
# timeout: 1
datasource:
NoCloud:
fs_label: cidata
# TODO: Change default values in /etc/ssh/sshd_config
# "GSSAPIAuthentication yes" => "GSSAPIAuthentication no"
# "#UseDNS yes" => "UseDNS no"
runcmd:
- "echo 'Disabling GSSAPIAuthentication on SSH server'"
- "sed -i 's/^GSSAPIAuthentication yes/GSSAPIAuthentication no/g' /etc/ssh/sshd_config"
- "echo 'Disabling DNS lookup on SSH server'"
- "sed -i 's/^#UseDNS yes/UseDNS no/g' /etc/ssh/sshd_config"
- "echo 'Restarting SSH server'"
- "systemctl reload sshd"
- "systemctl restart sshd"
final_message: {{ vm.cloud_init.final_message }}
<network>
<name>{{ item.key }}</name>
<bridge name='{{ item.value.id }}' />
<forward mode='{{ item.value.mode }}' />
<domain name='{{ item.value.domain }}' />
<dns>
{% for address in item.value.dns %}
<forwarder addr='{{ address }}' />
{% endfor %}
</dns>
<ip address='{{ item.value.gateway_ipv4 }}' netmask='{{ item.value.netmask }}'>
<dhcp>
<range start='{{ item.value.dhcp.start }}' end='{{ item.value.dhcp.end }}' />
{% for vm in item.value['dhcp-leases'] %}
<host mac='{{ item.value['dhcp-leases'][vm]['mac_address'] }}' name='{{ vm }}' ip='{{ item.value['dhcp-leases'][vm]['ipv4'] }}'/>
{% endfor %}
</dhcp>
</ip>
</network>
<pool type='dir'>
<name>{{ item.name }}</name>
<target>
<path>{{ storage_pool_root_path }}/{{ item.name }}</path>
</target>
</pool>
<domain type='{{ vm.type }}'>
<name>{{ vm.hostname }}</name>
<memory unit='{{ vm.memory.unit }}'>{{ vm.memory.value }}</memory>
{% if vm.memory.current_memory is defined %}
<currentMemory unit='{{ vm.memory.unit }}'>{{ vm.memory.current_memory }}</currentMemory>
{% endif %}
{% if vm.hugepages is defined %}
<memoryBacking>
<hugepages>
<page size='{{ vm.hugepages.value }}' unit='{{ vm.hugepages.unit }}' nodeset='{{ vm.hugepages.nodeset }}'/>
</hugepages>
</memoryBacking>
{% endif %}
<vcpu>{{ vm.cores.value }}</vcpu>
<clock sync="localtime"/>
<on_poweroff>{{ vm.on_poweroff }}</on_poweroff>
<on_reboot>{{ vm.on_reboot }}</on_reboot>
<on_crash>{{ vm.on_crash }}</on_crash>
<os>
<type arch='{{ vm.os.arch }}'>{{ vm.os.type }}</type>
<bootmenu enable='{{ vm.os.boot.bootmenu_enabled }}'/>
{% for dev in vm.os.boot.devices %}
<boot dev='{{ dev }}'/>
{% endfor %}
<bios useserial='{{ vm.bios.userserial }}' rebootTimeout='{{ vm.bios.rebootTimeout }}'/>
</os>
<cpu mode='{{ vm.cpu.mode }}'>
<model fallback='{{ vm.cpu.model.fallback }}'/>
</cpu>
<devices>
<emulator>{{ emulator }}</emulator>
<disk type='volume' device='disk'>
<driver name='qemu' type='{{ vm.image.volume.format | default(libvirt_volume_default_format) }}'/>
<source pool='{{ vm.image.volume.pool.name }}' volume='{{ vm.hostname }}_img'/>
<target dev='vda'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='{{ cloud_init_base_dir }}/{{ vm.hostname }}/seed.iso'/>
<target dev='hda'/>
</disk>
<interface type='network'>
<source network='{{ vm.network }}'/>
<mac address='{{ mac_addresses[vm.hostname] }}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
</devices>
</domain>
---
packages: []
remove_packages: []
upgrade_packages: []
upgrade_dist: false
update_before_install: true
---
- name: "Add GPG keys"
rpm_key:
state: present
key: "{{ item }}"
with_items:
- "{{ GPG_keys | default([]) }}"
---
- name: "Add yum repositories"
yum_repository:
name: "{{ item.name }}"
description: "{{ item.description | default('') }}"
baseurl: "{{ item.baseurl }}"
enabled: "{{ item.enabled }}"
gpgcheck: "{{ item.gpgcheck }}"
gpgkey: "{{ item.gpgkey | default(omit) }}"
with_items:
- "{{ yum_repositories | default([]) }}"
---
- name: Check if yum repository is enabled
shell: "yum-config-manager {{ item }} | grep 'enabled = False'"
register: is_repo_enabled
failed_when: "is_repo_enabled.rc != 0 and
is_repo_enabled.rc != 1"
changed_when: "is_repo_enabled.rc == 0"
loop: "{{ yum_repositories }}"
- name: Enable yum repository
shell: "yum-config-manager --enable {{ item.0 }}"
vars:
ansible_become: true
when: "item.1.rc == 0"
loop: "{{ yum_repositories|zip(is_repo_enabled.results)|list }}"
---
- name: "Install {{ packages | list | join(', ') }} with yum"
yum:
name: "{{ item }}"
state: present
update_cache : "{{ update_before_install|bool }}"
with_items: "{{ packages }}"
---
- include: add_repositories.yml
- include: add_GPG_keys.yml
- include: remove_packages.yml
- include: upgrade_dist.yml
- include: upgrade_packages.yml
- include: install_packages.yml
---
- name: Remove {{ remove_packages | join(', ') }} with yum
yum:
name: "{{ item }}"
state: absent
with_items: "{{ remove_packages }}"
- name: Upgrade dist with yum udate
yum:
name: '*'
state: latest
when: upgrade_dist|bool == true
---
- name: Upgrade {{ upgrade_packages | list | join(', ') }} with yum
yum:
name: "{{ item }}"
state: latest
update_cache : "{{ update_before_install|bool }}"
with_items: "{{ upgrade_packages }}"
cloud_init:
users:
-
name: ansible_user
gecos: Ansible Remote User (admin)
lockpasswd: true
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
# Bash gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# MobaXterm gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
final_message: "The system is finally up, after $UPTIME seconds"
type: kvm
on_poweroff: destroy
on_reboot: restart
on_crash: restart
os:
arch: x86_64
type: hvm
boot:
devices:
- hd
- cdrom
- network
bootmenu_enabled: no
bios:
userserial: yes
rebootTimeout: 0
state: active
autostart: yes
cpu:
mode: host-passthrough
model:
fallback: allow
memory:
value: 1
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 1
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: demo_managers
state: active
autostart: yes
format: qcow2
size: 10G
overwrite_definition: true
hostname: demo-manager-11
network: demo_datalab
ipv4: 192.168.100.11
ssh_host_port: 10011
memory:
value: 1
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 1
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: demo_webservers
state: active
autostart: yes
format: qcow2
size: 10G
overwrite_definition: true
hostname: demo-webserver-12
network: demo_datalab
ipv4: 192.168.100.12
ssh_host_port: 10012
cloud_init:
users:
name: ansible_user
gecos: Ansible Remote User (admin)
lockpasswd: true
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
# Bash gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# MobaXterm gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
final_message: "The system is finally up, after $UPTIME seconds"
type: kvm
on_poweroff: destroy
on_reboot: restart
on_crash: restart
os:
arch: x86_64
type: hvm
boot:
devices:
- hd
- cdrom
- network
bootmenu_enabled: no
bios:
userserial: yes
rebootTimeout: 0
state: active
autostart: yes
memory:
value: 12
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 4
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: false
pool:
name: demo_kairosdb
state: active
autostart: yes
format: qcow2
size: 200G
overwrite_definition: false
hostname: prod-kairos-221
network: production_datalab
ipv4: 192.168.10.221
ssh_host_port: ''
hostname: prod-kairos-222
network: production_datalab
ipv4: 192.168.10.222
ssh_host_port: ''
memory:
value: 8
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 4
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: false
pool:
name: demo_managers
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: false
hostname: prod-manager-101
network: production_datalab
ipv4: 192.168.10.101
ssh_host_port: 10101
hostname: prod-manager-102
network: production_datalab
ipv4: 192.168.10.102
ssh_host_port: 10102
cloud_init:
users:
-
name: ansible_user
gecos: Ansible Remote User (admin)
lockpasswd: true
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
# Bash gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# MobaXterm gcharbon
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
final_message: "The system is finally up, after $UPTIME seconds"
type: kvm
on_poweroff: destroy
on_reboot: restart
on_crash: restart
os:
arch: x86_64
type: hvm
boot:
devices:
- hd
- cdrom
- network
bootmenu_enabled: no
bios:
userserial: true
rebootTimeout: 0
state: active
autostart: true
cpu:
mode: host-passthrough
model:
fallback: allow
memory:
value: 2
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 2
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_graphic
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_graphic_241
network: testing_datalab
ipv4: 192.168.20.241
ssh_host_port: 20241
hostname: test_graphic_242
network: testing_datalab
ipv4: 192.168.20.242
ssh_host_port: 20242
memory:
value: 2
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 2
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_kairosdb
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_kairos_221
network: testing_datalab
ipv4: 192.168.20.221
ssh_host_port: 20221
hostname: test_kairos_222
network: testing_datalab
ipv4: 192.168.20.222
ssh_host_port: 20222
memory:
value: 1
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 1
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_managers
state: active
autostart: yes
format: qcow2
size: 10G
overwrite_definition: true
hostname: test_manager_101
network: testing_datalab
ipv4: 192.168.20.101
ssh_host_port: 20101
hostname: test_manager_102
network: testing_datalab
ipv4: 192.168.20.102
ssh_host_port: 20102
memory:
value: 2
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 1
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_mongodb
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_mongo_231
network: testing_datalab
ipv4: 192.168.20.231
ssh_host_port: 20231
hostname: test_mongo_232
network: testing_datalab
ipv4: 192.168.20.232
ssh_host_port: 20232
memory:
value: 16
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 20
# unit: GiB
cores:
value: 2
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_nifi
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_nifi_211
network: testing_datalab
ipv4: 192.168.20.211
ssh_host_port: 20211
hostname: test_nifi_212
network: testing_datalab
ipv4: 192.168.20.212
ssh_host_port: 20212
memory:
value: 2
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 2
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_registry
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_registry_201
network: testing_datalab
ipv4: 192.168.20.201
ssh_host_port: 20201
hostname: test_registry_202
network: testing_datalab
ipv4: 192.168.20.202
ssh_host_port: 20202
memory:
value: 2
unit: GiB
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 14
# unit: GiB
cores:
value: 2
# Maximum can be undefined. In this case resources allocation are fixed.
# maximum:
# value: 6
image:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
volume:
overwrite: true
pool:
name: test_security
state: active
autostart: yes
format: qcow2
size: 20G
overwrite_definition: true
hostname: test_security_251
network: testing_datalab
ipv4: 192.168.20.251
ssh_host_port: 20251
hostname: test_security_252
network: testing_datalab
ipv4: 192.168.20.252
ssh_host_port: 20252
id: virbr100
name: demo_datalab
mode: nat
gateway_ipv4: 192.168.100.1
netmask: 255.255.255.0
dns:
- 208.67.222.222 # OpenDNS first IP
- 208.67.220.220 # OpenDNS second IP
- 8.8.8.8 # Google DNS
- 10.70.195.20 # Capgemini DNS
dhcp:
start: 192.168.100.1
end: 192.168.100.254
state: active
autostart: true
domain: demo-datalab.local
id: virbr10
name: production_datalab
mode: nat
gateway_ipv4: 192.168.10.1
netmask: 255.255.255.0
dns:
- 10.70.195.20 # Capgemini DNS
- 208.67.222.222 # OpenDNS first IP
- 208.67.220.220 # OpenDNS second IP
- 8.8.8.8 # Google DNS
dhcp:
start: 192.168.10.1
end: 192.168.10.254
state: active
autostart: true
domain: datalab.local
id: virbr20
name: testing_datalab
mode: nat
gateway_ipv4: 192.168.20.1
netmask: 255.255.255.0
dns:
- 208.67.222.222 # OpenDNS first IP
- 208.67.220.220 # OpenDNS second IP
- 8.8.8.8 # Google DNS
- 10.70.195.20 # Capgemini DNS
dhcp:
start: 192.168.20.1
end: 192.168.20.254
state: active
autostart: true
domain: testing-datalab.local
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment