Commit ee21491a authored by 's avatar

first commit

parents
playbooks/*
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
export ANSIBLE_ROLES_PATH="$DIR/roles"
*.retry
.vaultpassword
*.log
inventories/*.gcharbon.yml
Host 192.168.20.*
ProxyCommand ssh -W %h:%p LFR028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host 192.168.100.*
ProxyCommand ssh -W %h:%p LF028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host 192.168.10.*
ProxyCommand ssh -W %h:%p LF028762
User ansible_user
IdentityFile ~/.ssh/id_rsa
GSSAPIAuthentication no
Host LFR028762
Hostname 10.68.150.240
User gcharbon
ControlMaster auto
ControlPath ~/.ssh/ansible-%r@%h:%p
ControlPersist 5m
GSSAPIAuthentication no
MIT License
Copyright (c) 2018 Guillaume Charbonnier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
.DEFAULT_GOAL := prepare_demo
VERSION := 1.0
ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
CUR_DATE := $(shell date +"%s")
PLAYBOOK_FOLDER := playbooks
INVENTORY_FOLDER := inventories
HYPERVISOR_INVENTORY := LFR028762.yml
GCHARBON_INVENTORY := demo.gcharbon.yml
VIRTUAL_INVENTORY := dynamic_inventories/demo_datalab.py
HYPERVISOR_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(HYPERVISOR_INVENTORY)"
VIRTUAL_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(VIRTUAL_INVENTORY)"
GCHARBON_INVENTORY_PATH := "$(INVENTORY_FOLDER)/$(GCHARBON_INVENTORY)"
VAULT_ID := --vault-id @prompt
setup_demo:
ANSIBLE_ROLES_PATH=$(ROOT_DIR)/roles ansible-playbook -i $(HYPERVISOR_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_init_virt_setup.yml
demo:
ANSIBLE_ROLES_PATH=$(ROOT_DIR)/roles ansible-playbook -i $(GCHARBON_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_init_virt_setup.yml
update_demo:
ansible-playbook -i $(HYPERVISOR_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_update_virt_setup.yml
provision_demo:
ansible-playbook -i $(VIRTUAL_INVENTORY_PATH) \
$(PLAYBOOK_FOLDER)/demo_vm_provisioning.yml
# Setup your Python environment with Ansible
Table of Content:
* [Getting started](#getting_started)
* [Available ansible roles](#ansible_roles)
* [Available ansible playbooks](#ansible_playbooks)
* [install_pip.yml](#install_pip)
* [prepare_rh-python36](#demo_nodes)
* [prepare_rh-python36_centos](#centos_nodes)
* [Authentication](#authentication)
* [How to generate new value for `ansible_become_pass` variable](#ansible_become_pass)
* [Providing Ansible vault secrets](#vault_secrets)
## Getting started
<a name="getting_started"></a>
This project uses Ansible to configure remote or local python environment.
You must use `ansible>=2.6`.
You can install it directly with `pip install -r requirements.txt` after cloning the repository.
#### Available ansible roles
<a name="ansible_roles"></a>
This repository is composed of 3 [ansible roles](https://docs.ansible.com/ansible/2.6/user_guide/playbooks_reuse_roles.html):
- **get-pip**: Can be used to install pip and virtualenv
-
- **pip**: Can be use to perform operations with pip
-
- **yum**: Can be used to perform action with yum
#### Available ansible playbooks
<a name="ansible_playbooks"></a>
Three [major playbooks](https://docs.ansible.com/ansible/devel/user_guide/playbooks.html) are available for direct usage:
> Note: You must be inside the playbooks directory to run example commands.
- _**install_pip.yml**_:
<a name="install_pip"></a>
This playbook install `pip` and `virtualenv` with `ansible_python_interpreter` by default. Any other interpreter can be used if `python_interpreter` variable is defined.
###### Workflow:
1) Check if pip is installed
If it is not instaled:
1.1) Download get-pip installer
1.2) Execute get-pip installer
1.3) Remove get-pip installer
2) Install virtualenv if it is not installed yet
###### Example usage:
- `python_interpreter` is not defined in inventory neither in any variable.
Default interpreter (`ansible_python_interpreter`) is used:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
install_pip.yml
```
- `python_interpreter` is set to `/opt/rh/rh-python36/root/bin/python`
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
--extra-vars "python_interpreter=/opt/rh/rh-python36/root/bin/python"
install_pip.yml
```
- _**prepare_rh-python36.yml**_
<a name="demo_nodes"></a>
This playbook assume that rhel repositories has already been enabled and `rh-pyhton36` package is available for download. It installs `rh-python36` and dependencies based on requirements file.
> Note: The playbook installs `rh-python36` because `ansible_python_interpreter` is set to `rh_python_interpreter` and `rh_python_interpreter` is set to `rh-python36` in group demo_nodes in inventory. You can change it to anyother package.
> Warning: If you want to install a package non managed by scl, set `scn_enable_python` to `false`.
###### Workflow:
1) Ensure `rh-pyhton-36` is installed
2) Optionally add line in .bashrc to enable rh-python36 with scl at startup
3) Copy python requirements to rmeote host
4) Install python requirements with pip from `rh-python36`
###### Example usages:
- `rh-python-interpreter` is set to `rh-python36` by default
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
prepare_rh-python36.yml
```
- Install another version of Python:
```
ansible-playbook -i inventories/test_centos.yml \
--ask-vault-pass \
--extra-vars "rh_python_package=rh-python35 python_interpreter=/opt/rh/rh-python35/root/bin/python" \
prepare_rh-python36.yml
```
- Install with another requirements file:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
--extra-vars "pip_requirements_path=~/some_folder/requirements.txt"
prepare_rh-python36.yml
```
- _**prepare_rh-python36_centos.yml**_:
<a name="centos_nodes"></a>
This playbook enables `centos-sclo-rh-testing` repository and
download `rh-python36` before installing python dependencies with this
interpreter.
###### Playbook workflow:
1) Ensure centos-release-scl is installed
2) Enable centos-sclo-rh-testing repository
3) Install `rh-python36`
2) Add line to `.bashrc` to enable rh-python36 by default at startup
3) Copy python requirements to remote host
4) Install python requirements
It ban be used the same wah `prepare_rh-python36` is used.
## Authentication
<a name="authentification"></a>
- Logging to remote host is realized with SSH Key-Based Authentication.
> Use `ssh-copy-id <ansible_user@ansible_remote_host>` to ensure your own ssh key is authorized by remote agent.
- [Privilege escalation method](https://docs.ansible.com/ansible/latest/user_guide/become.html) used in playbook can be configured with `ansible_become_method` variable.
Default value is `su`. Password of root user is expected to be present as vault encrypted variable named `ansible_become_pass`. Is can be accessed as any other vault secrets once the vault password is given to playbooks.
> List of available values: https://docs.ansible.com/ansible/latest/user_guide/become.html#command-line-options
## How to generate new value for `ansible_become_pass` variable ?
<a name="ansible_become_pass"></a>
- Run the following command
```
ansible-vault encrypt_string "PASSWORD"
```
- Before returning the encrypted string it will ask you for a pasword (you will provide this password at runtime to decrypt secret). Store it into a file like the following:
You shoud get something like:
```
ansible_become_pass: !vault |
$ANSIBLE_VAULT;1.1;AES256
34306464383862303338666336306239306335393366656136313362643334383264326530333136
3831326639343639643063643664666331356236346239640a346531326465333330363761373831
61353139323635333461313732386538366361326163613865333462353161623039356433643032
3962303266363532330a616432653534333431363938386531373864616635393462356337336334
3834
```
> See [official documentation](https://docs.ansible.com/ansible/2.4/vault.html#use-encrypt-string-to-create-encrypted-variables-to-embed-in-yaml) for more information.
## Providing vault secrets
<a name="vault_secrets"></a>
You can choose several options to [provide vault password](https://docs.ansible.com/ansible/2.4/vault.html#providing-vault-passwords) to playbooks at runtime:
- Using `--vault-id @prompt`.
Example:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id @prompt \
prepare_rh-python36.yml
```
- Using a file or an executable :
Examples:
- Assuming `get_vault_id.py` is an existing python script:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id get-vault-password.py \
prepare_rh-python36.yml
```
- Assuming `.vaultpassword` is an existing file:
```
ansible-playbook -i inventories/test_centos.yml \
--vault-id .vaultpassword \
prepare_rh-python36.yml
```
[defaults]
roles_path = $ANSIBLE_ROLES_PATH
[ssh_connection]
ssh_args = -F .ssh/ssh.cfg -o ControlMaster=auto -o ControlPersist=30m
control_path = ~/.ssh/ansible-%%r@%%h:%%p
version: '3.2'
services:
agent:
image: portainer/agent
environment:
# REQUIRED: Should be equal to the service name prefixed by "tasks." when
# deployed inside an overlay network
AGENT_CLUSTER_ADDR: tasks.agent
# AGENT_PORT: 9001
# LOG_LEVEL: debug
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- agent_network
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
portainer:
image: portainer/portainer
command: -H tcp://tasks.agent:9001 --tlsskipverify
ports:
- "9000:9000"
volumes:
- portainer_data:/data
networks:
- agent_network
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == manager]
networks:
agent_network:
driver: overlay
attachable: true
volumes:
portainer_data:
#!/usr/bin/env python
from __future__ import print_function
import getpass
if __name__ == "__main__":
pswd = getpass.getpass('Vault Password:')
print(pswd)
---
all:
vars:
ansible_user: root
ansible_python_interpreter: /usr/bin/python
children:
centos_hosts:
hosts:
LFR028762:
ansible_host: 10.68.150.240
datalab:
hosts:
LFR028762:
hypervisors:
hosts:
LFR028762:
---
all:
vars:
ansible_user: root
children:
centos_hosts:
hosts:
demo-centos-01:
ansible_host: 159.65.207.209
local_ipv4: 10.133.55.212
demo-centos-02:
ansible_host: 188.166.122.69
local_ipv4: 10.133.49.170
demo-centos-03:
ansible_host: 174.138.11.9
local_ipv4: 10.133.53.14
debian_hosts:
hosts:
demo-ubuntu-01:
ansible_host: 159.65.196.145
local_ipv4: 10.133.67.137
demo-ubuntu-02:
ansible_host: 188.166.45.196
local_ipv4: 10.133.41.182
demo-ubuntu-03:
ansible_host: 174.138.13.10
local_ipv4: 10.133.48.63
datalab:
hosts:
demo-centos-01:
demo-centos-02:
demo-centos-03:
demo-ubuntu-01:
demo-ubuntu-02:
demo-ubuntu-03:
docker:
hosts:
demo-centos-01:
demo-centos-02:
demo-centos-03:
demo-ubuntu-01:
demo-ubuntu-02:
demo-ubuntu-03:
swarm_managers:
hosts:
demo-centos-01:
demo-ubuntu-01:
swarm_workers:
hosts:
demo-centos-02:
demo-centos-03:
demo-ubuntu-02:
demo-ubuntu-03: # hypervisors:
# hosts:
# # demo_node-01:
# # demo_node-02:
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
LIBVIRT_URI="qemu+tcp://root@10.68.150.240:16509/system"
VIRT_NETWORK="demo_datalab"
COMMON_GROUPS="demo,virtual_machines,centos_hosts,docker"
INVENTORY_SCRIPT_RELATIVE_PATH="scripts/libvirt_inventory.py"
cmd = "python {0} --libvirt_uri '{1}' --virt_network {2} --common_groups {3}"
inventory_script = os.path.join(dir_path,
INVENTORY_SCRIPT_RELATIVE_PATH)
cmd = cmd.format(inventory_script,
LIBVIRT_URI,
VIRT_NETWORK,
COMMON_GROUPS)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output.decode())
exit(0)
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
inventory_script = dir_path + "/scripts/libvirt_inventory.py"
cmd = 'python {0}'.format(inventory_script)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output)
exit(0)
#!/usr/bin/env python
from __future__ import print_function
import argparse
import libvirt
import json
import logging
logger = logging.getLogger(__name__)
LIBVIRT_DEFAULT_URI = 'qemu:///system'
LIBVIRT_DEFAULT_NETWORK = 'testing_datalab'
DEFAULT_SPLIT_HOSTNAME = '_'
DEFAULT_SPLIT_GROUP_POS = 1
DEFAULT_COMMON_GROUP = 'docker_hosts,centos_hosts,virtual_machines'
class LibvirtInventory(object):
def __init__(self,
target_network=LIBVIRT_DEFAULT_NETWORK,
uri=LIBVIRT_DEFAULT_URI,
group_split=DEFAULT_SPLIT_HOSTNAME,
group_pos=DEFAULT_SPLIT_GROUP_POS,
additional_group=DEFAULT_COMMON_GROUP):
self.uri = uri.replace("'","").replace('"','')
self.open_libvirt_connection(self.uri)
self.parser = {
"split": group_split,
"pos": group_pos,
}
self.common_groups = additional_group.split(",")
self.set_target_network(target_network)
self.inventory = {}
self.set_all_hosts_and_meta()
self.add_all_hosts_to_groups()
self.add_all_common_groups()
self.close_libvirt_connection()
def open_libvirt_connection(self, uri):
self.conn = libvirt.openReadOnly(uri)
if self.conn == None:
logger.error('Failed to open connection to {0}'.format(uri))
exit(1)
else:
logger.info("Connected to libvirt on remote host: {0}".format(uri))
def close_libvirt_connection(self):
self.conn.close()
def set_target_network(self, target_network):
self._networks = {network.name(): network for network in self.conn.listAllNetworks()}
try:
self._network = self._networks[target_network]
except KeyError:
available_networks = ", ".join(self._networks.keys())
raise ValueError("Target network ({0}) does not exist. Available networks: {1}".format(target_network, available_networks))
@property
def network(self):
return self._network
def set_all_hosts_and_meta(self):
self._hosts = { lease["hostname"]: { "ansible_host": lease["ipaddr"] } for lease in self.network.DHCPLeases() }
self.add_group("all")
self.inventory["all"]["hosts"] = self.hosts = list(self._hosts.keys())
self.inventory["_meta"]= {"hostvars": self._hosts}
def add_group(self, group):
""" Add a group to inventory """
if group not in self.inventory:
self.inventory[group] = dict()
def get_group(self, group):
""" Return a group as dictionary from inventory """
return self.inventory[group]
def add_child(self, child, group):
""" Add a child group (string) to group in inventory"""
if not "children" in self.get_group(group):
self.inventory[group]["children"] = [child]
else:
self.inventory[group]["children"] = list(set([child] + self.inventory[group]["children"]))
def add_childgroup(self, child_group, parent_group="all"):
""" Add a group and mark it as child of another group in inventory """
self.add_group(child_group)
self.add_child(child_group, parent_group)
def add_children(self, children, group):
""" Add list of children to group in inventory"""
if not "children" in self.get_group(group):
self.inventory[group]["children"] = list(children)
else:
self.inventory[group]["children"] = list(set(children + self.inventory[group]["children"]))
def add_groupvars(self, vars_dict, group):
""" Takes a dictionary as argument and add the keys and values as group variables"""
if "vars" not in self.get_group(group):
self.inventory[group]["vars"] = vars_dict
else:
self.inventory[group]["vars"].update(vars_dict)
def add_host(self, hostname, group, create=True):
""" Add one host (string) to a group.
If create is True (default) then the group will be automatically created if it does not exist yet.
Else it will fail on KeyError
"""
if create:
self.add_group(group)
if "hosts" not in self.inventory[group]:
self.inventory[group]["hosts"] = [hostname]
else:
self.inventory[group]["hosts"] = list(set([hostname] + self.inventory[group]["hosts"]))
def add_hosts(self, hostnames, group, create=True):
""" Add several hosts (list) to a group.
If create is True (default) then the group will be automatically created if it does not exist yet.
Else it will fail on KeyError
"""
if create:
self.add_group(group)
self.inventory[group]["hosts"] = list(set(hostnames + self.inventory[group]["hosts"]))
def add_all_hosts_to_groups(self):
for hostname in self.hosts:
prefix, group, last_ip_part = hostname.split(self.parser["split"])
self.add_host(hostname, group)
def add_all_common_groups(self):
for group in self.common_groups:
self.add_common_group(group)
def add_common_group(self, common_group):
self.add_childgroup(common_group)
for group in self.inventory:
if group in ["_meta", "all"] + self.common_groups:
continue
self.add_child(group, common_group)
def json_dump(self, indent=None):
return json.dumps(self.inventory, indent=indent)
def dump_inventory(libvirt_uri, virt_network, split_hostname, group_pos, common_groups):
inventory = LibvirtInventory(target_network=virt_network,
uri=libvirt_uri,
group_split=split_hostname,
group_pos=group_pos,
additional_group=common_groups)
return inventory.json_dump(2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--libvirt_uri",
"-l",
default=LIBVIRT_DEFAULT_URI,
type=str,
required=False,
help="""URI that python should use to connect to libvirt daemon.
Default to '{0}'.
Can use several protocols:
- qemu:/// for local socket connections
- qemu+tcp:// for raw tcp connections
- qemu+tls:// for tcp connections over tls encryption
More informations available at https://libvirt.org/remote.html
""".format(LIBVIRT_DEFAULT_URI))
parser.add_argument("--virt_network",
"-n",
default=LIBVIRT_DEFAULT_NETWORK,
type=str,
required=False,
help="""Virtual network from which we want to determine running guests.
This script can only list virtual machines located in same virtual network.
Default to '{0}'.
""".format(LIBVIRT_DEFAULT_NETWORK))
parser.add_argument("--split_hostname",
default=DEFAULT_SPLIT_HOSTNAME,
required=False,
help="""Which pattern should be used to split the found hostnames.
Default to '{0}'.
Hostnames are split in order to retrieve group they belong to.
This option must be used with `split_group_pos` option.
""".format(DEFAULT_SPLIT_HOSTNAME)
)
parser.add_argument("--split_group_pos",
default=DEFAULT_SPLIT_GROUP_POS,
required=False,
type=int,
help="""After splitting hostname, what is the index of the group name.
Default to '{0}'.
This option must be used with `split_hostname` option.
""".format(DEFAULT_SPLIT_GROUP_POS)
)
parser.add_argument("--common_groups",
"-g",
default=DEFAULT_COMMON_GROUP,
required=False,
type=str,
help="""Comma delimited list of group names to which all guests will belong.
Default to '{0}'.
This option must be used with `split_hostname` option.
""".format(DEFAULT_COMMON_GROUP)
)
args = parser.parse_args()
json_inv = dump_inventory(args.libvirt_uri,
args.virt_network,
args.split_hostname,
args.split_group_pos,
args.common_groups)
print(json_inv)
if __name__ == "__main__":
main()
#!/usr/bin/env python
from __future__ import print_function
import argparse
import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
LIBVIRT_URI="qemu+tcp://root@10.68.150.240:16509/system"
VIRT_NETWORK="testing_datalab"
COMMON_GROUPS="testing,datalab,virtual_machines,centos_hosts,docker"
INVENTORY_SCRIPT_RELATIVE_PATH="scripts/libvirt_inventory.py"
cmd = "python {0} --libvirt_uri '{1}' --virt_network {2} --common_groups {3}"
inventory_script = os.path.join(dir_path,
INVENTORY_SCRIPT_RELATIVE_PATH)
cmd = cmd.format(inventory_script,
LIBVIRT_URI,
VIRT_NETWORK,
COMMON_GROUPS)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--list",
"-l",
action='store_true',
required=False)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = p.stdout.read()
print(output)
exit(0)
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 2
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
- hosts: all
tasks:
- name: "Ensure local users exist and setup ssh configuration for each users"
import_role:
name: users
vars:
users: "{{ local_users }}"
become: true
- hosts: centos_hosts
tasks:
- name: Install common packages with yum
import_role:
name: yum
vars:
update_dist: true
upgrade_dist: true
packages:
- vim
- dos2unix
- git
- curl
- wget
- git
remove_packages: []
upgrade_packages: []
GPG_keys: []
yum_repositories: []
become: true
- hosts: debian_hosts
tasks:
- name: Install common packages with apt
import_role:
name: apt
vars:
update_dist: true
upgrade_dist: true
packages:
- vim
- dos2unix
- git
- curl
- wget
- git
remove_packages: []
upgrade_packages: []
GPG_keys: []
yum_repositories: []
become: true
- hosts: centos_hosts
tasks:
- name: "Ensure firewalld is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- firewalld
become: true
- name: "Ensure firewalld is started"
service:
name: firewalld
state: started
become: true
- name: "Set default firewalld policy"
import_role:
name: firewalld
become: true
- hosts: debian_hosts
tasks:
- name: "Ensure ufw is installed"
import_role:
name: apt
tasks_from: install_packages.yml
vars:
packages:
- ufw
become: true
- name: "Ensure ufw is started"
service:
name: ufw
state: started
become: true
- name: "Set default firewalld policy"
import_role:
name: ufw
become: true
- hosts: all
tasks:
- name: "Ensure pip is installed"
include_role:
name: get-pip
become: true
- name: "Ensure docker is installed"
include_role:
name: get-docker
vars:
docker_users:
- gcharbon
- jgaschler
- ansible_user
become: true
# - name: "Ensure certbot is installed"
# include_role:
# name: get-certbot
# vars:
# virtualenv: ~/certbot_env
# ansible_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: demo
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: demo
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: production
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: production
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_update_setup.yml
vars:
deploy_environment: testing
virt_group: kvm
virt_user: gcharbon
- name: Include virt_update_setup.yml playbook
import_playbook: virt_init_setup.yml
vars:
deploy_environment: testing
virt_group: kvm
virt_user: gcharbon
- hosts: centos_hosts
tasks:
- name: "Ensure firewalld is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- firewalld
- name: "Ensure firewalld is started"
service:
name: firewalld
state: started
- name: "Set default firewalld policy"
import_role:
name: firewalld
- hosts: debian_hosts
tasks:
- name: "Ensure ufw is installed"
import_role:
name: apt
tasks_from: install_packages.yml
vars:
packages:
- ufw
- name: "Ensure ufw is started"
service:
name: ufw
state: started
- name: "Set default firewalld policy"
import_role:
name: ufw
---
- hosts: all
tasks:
- name: "Ensure certbot is installed"
import_role:
name: get-certbot
---
- hosts: all
tasks:
- name: "Ensure docker is installed"
import_role:
name: get-docker
---
- hosts: all
tasks:
- name: "Ensure global pip is installed with extra args: {{ pip_extra_args | default('') }}"
import_role:
name: get-pip
sudoers_group: wheel
emulator: /usr/libexec/qemu-kvm
virt_dependencies:
- qemu-kvm
- libvirt
- virt-install
- genisoimage
- python-lxml
- libvirt-python
- virt-manager
docker_gpg_key: https://download.docker.com/linux/centos/gpg
docker_repository:
name: docker-ce.repo
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
enabled: 1
gpgcheck: 1
remove_docker_dependencies:
- docker
- docker-client
- docker-client-latest
- docker-common
- docker-latest-logrotate
- docker-selinux
- docker-engine-selinux
- docker-engine
docker_dependencies:
- yum-utils
- device-mapper-persistent-data
- lvm2
docker_package: docker-ce
libvirtd_group: libvirt
ansible_python_interpreter: /usr/bin/python
local_users:
-
name: gcharbon
gecos: Guillaume Charbonnier
createhome: yes
homedir: "/home/gcharbon"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
generate_ssh_key: true
sudo_user: true
passwordless_sudo: true
password_lock: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
-
name: jgaschler
gecos: Jean Gaschler
homedir: "/home/jgaschler"
shell: "/bin/bash"
ssh_authorized_keys:
# Git bash key (Jean)
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWi5wpJjHvfbAY9ekYJ2G042JF3VTGyXoYB99Z2deEuU/hDBYK4wTocJq2bCv7cU88S+a6uVewrO37SnVo0X1Vxehf9TMsiwNVNQXa+iuIvnSSzpnFzBSO0xWyP6bd4UJJsuCviZmlNjUSEHAaEH4NxAzFa+Jfz65jDBbSycHVcANcka9tVcIVazvCVGqalUKpMfcwrSnEf2u8GpmOfb5GQJ80Ax3r83NuJVjmQLDmBx98EgbhefXgP3Xt5oUWk1IInKaiOnbT6Vuo4eYW42HovVmxRFUJYGayNS2mNXAIkOakFVMQutHAVddXECyp0xKfBxyjWE4NTWTwiNZifgVv jgaschle@LFR010770
# MobaXterm (Jean)
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkRKXqZnDpW+p1jxIHUTo0ZbjoOzgVtM6lAoIC+KHN7WfF0IUIoUYi7q0ioPUBTp0RVuE1n5EhSzB6zjYo4vJHm4v1UG9/hhZUbJeG4Ob4cF281aSrvPYMMZ3DHEf2IIedtlVjoergogXdR3fVz0XmHq4CjzNHITlGBfglIz98MwK4j7pRVpAgff/C/zPILd2omJX1pJJ9vrmdHvITpfTbKWLDrB4SBL9Mh7NMZ+nnQNAuNDMUhHnUVFwS69Fl5ziBDO0Ce5dvk0ooyd6LqRomGUXy3l+F010IKSiljsESAYNXC9fMK93uuYZ3fBpNY9FDN3ZtA6bHdsR+5nvCIFFr jgaschle@LFR010770
sudo_user: true
passwordless_sudo: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
-
name: admin
gecos: Capgemini Admin User
createhome: true
homedir: "/home/admin"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
# jgaschler
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWi5wpJjHvfbAY9ekYJ2G042JF3VTGyXoYB99Z2deEuU/hDBYK4wTocJq2bCv7cU88S+a6uVewrO37SnVo0X1Vxehf9TMsiwNVNQXa+iuIvnSSzpnFzBSO0xWyP6bd4UJJsuCviZmlNjUSEHAaEH4NxAzFa+Jfz65jDBbSycHVcANcka9tVcIVazvCVGqalUKpMfcwrSnEf2u8GpmOfb5GQJ80Ax3r83NuJVjmQLDmBx98EgbhefXgP3Xt5oUWk1IInKaiOnbT6Vuo4eYW42HovVmxRFUJYGayNS2mNXAIkOakFVMQutHAVddXECyp0xKfBxyjWE4NTWTwiNZifgVv jgaschle@LFR010770
generate_ssh_key: true
sudo_user: true
passwordless_sudo: false
# Same password as other machines
password: $6$4x0Qf/oe3mXAJkYD$.4YyNeoIjbDuA60y0zjjMHhbLv7ZvpYxYLfrYqCZZ5XK7ehBkzIz8YcghxSHuB16CFNEsUPIJ0BB5yOBqAonz/
docker_user: true
deploy_environment: testing
virt_user: gcharbon
virt_group: kvm
sudoers_group: sudo
emulator: /usr/bin/kvm
virt_dependencies:
- qemu-kvm
- qemu-utils
- libvirt-bin
- virtinst
- genisoimage
- python3-lxml
- python3-libvirt
- virt-manager
docker_gpg_key:
url: https://download.docker.com/linux/ubuntu/gpg
docker_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ansible_distribution_release}} stable"
state: present
remove_docker_dependencies:
- docker
- docker-engine
- docker.io
docker_dependencies:
- apt-transport-https
- curl
- ca-certificates
- software-properties-common
docker_package: docker-ce
libvirtd_group: libvirtd
ansible_python_interpreter: /usr/bin/python3
virt_user: qemu
virt_group: libvirt-qemu
local_users:
-
name: gcharbon
gecos: Guillaume Charbonnier
createhome: yes
homedir: "/home/gcharbon"
shell: "/bin/bash"
ssh_authorized_keys:
# gcharbon/bash
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDG3Ql48TplqV5FWrZvImXhd3qHW3E9Dwqr38h3oP6fF50RVOzSR2AkqlQ8F0e8cAWNh1xN17+XLDiIc7ZsfJle0fwFy3SEaNGIMg+mvfcgDEbuzFk/YecfpWP7LiUSedScvExQ9Hz9bQiCCBqRn7XCipdXIx+yQLnlrtl0mgGIGMfRgmWWZOgSFbqLJ5bQKHa3JKZJKN1RAxLceADEgXUZBS9c1X/l8LFdfYOFhyvg23qUOCVBpBHmhzxqq91M72nVIaiU73edy+qZbF2u34Zs1VO+36yrE3FL1E19B5fmGwMyDtOe6NnDAPJ//5bYaUSf35NgYB0slBEQaKdDlAwt gcharbon@LFR020106
# gcharbon/mobaxterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDGzMtuijGAzxasmDZqqoAeoaz7e4Ys+xlW6duu2tK34Wi85uHRREkrqrL9yRdL8I9QGRuw4puwL9mI2sYOx7BpqAlhTeZx44FJaAidbb9cxYruCwz55Pvi2ebsm7LktVwCqiTckwS53yzfJZvdssYkJKCYNzL/7f1HN7xrugUqtyoM6M/KcdxYBSBDM9qQU0HbOdmS11xVrfsl8f39IjxsmKIq89sYk2ng6KF/0EsDQQzprB5D8QqVDyFcvT7fC3HXlot0nsW+4/9Q/xkkMet3kkQTXd/marwDZI1jPS6GXPQy3drGkGTI/5151/rzPwOZsOZRR+vfpvSHpiDNKvr gcharbon@LFR020106
generate_ssh_key: true
sudo_user: true
passwordless_sudo: true
password_lock: true
# Do not set password for better security. No one can log into this user without ssh keys being authorized.
deploy_environment: demo
virt_user: gcharbon
virt_group: kvm
# ansible_ssh_common_args: '-F .ssh/ssh.cfg -o StrictHostKeyChecking=no ProxyCommand="ssh -W %h:%p gcharbon@10.68.150.240"'
ansible_ssh_common_args: '-F .ssh/ssh.cfg -o StrictHostKeyChecking=no'
ansible_user: ansible_user
- hosts: hypervisors:&centos_hosts
tasks:
- name: ACTION | Upgrade dist ad install common packages on hypervisors hosts
import_role:
name: yum
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: yum
tasks_from: install_packages.yml
vars:
update_before_install: true
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- package
- hosts: hypervisors:&debian_hosts
tasks:
- name: ACTION | Upgrase dist and install common packages on hypervisors hosts
import_role:
name: apt
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: apt
tasks_from: install_packages.yml
vars:
update_before_install: false
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- packages
- hosts: hypervisors
tasks:
- name: ROLE | Ensure users exists
import_role:
name: users
vars:
users: "{{ local_users }}"
tags:
- virtualization
- user
- hosts: hypervisors
tasks:
- name: Create virtual machines
import_role:
name: virtualization
tasks_from: domains.yml
vars:
overwrite: true
os_images:
centos7:
name: Centos7_x86_64_cloud_image.qcow2
url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1808.qcow2
virtual_machines:
-
hostname: test-centos-01
volume:
name: test-centos-01.img
format: qcow2
size: 10G
pool: test_pool
os_image: centos7
memory:
value: 2
unit: GB
current_memory: 1
hugepages:
value: 2048
unit: kB
cores:
value: 1
cpu:
mode: host-passthrough
model:
fallback: allow
state: active
autostart: yes
on_poweroff: destroy
on_reboot: restart
on_crash: restart
os:
arch: x86_64
type: hvm
boot:
devices:
- hd
- cdrom
- network
bootmenu_enabled: no
bios:
userserial: yes
rebootTimeout: 0
---
- hosts: nfs
tasks:
- include_role:
name: nfs
vars:
nfs_exports: "/nfs/test_share"
- hosts: docker
tasks:
- name: Install docker python package
include_role:
name: pip
tasks_from: install_packages.yml
vars:
pip_install_packages:
- docker
- docker-compose
virtualenv: ~/.docker_env
ansible_user: gcharbon
- name: Leave current swarm cluster on all docker nodes
docker_swarm:
state: absent
force: true
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
- name: Create docker zone with firewalld
command: firewall-cmd --permanent --new-zone docker
become: true
register: create_zone_result
changed_when: |
("Error: NAME_CONFLICT: new_zone(): 'docker'" not in create_zone_result.stderr
and
create_zone_result.rc != 1)
or
create_zone_result.rc == 0
failed_when: |
"Error: NAME_CONFLICT: new_zone(): 'docker'" not in create_zone_result.stderr
and
create_zone_result.rc != 0
- name : Add ip addresses of members of docker group as source to docker zone on centos managers
include_role:
name: firewalld
tasks_from: sources.yml
vars:
firewall:
sources:
- subnet: "{{ hostvars[item].ansible_host }}"
zone: docker
permanent: true
state: enabled
with_items: "{{ groups['docker'] }}"
- name: Enable port 2377 on docker zone on centos managers
include_role:
name: firewalld
tasks_from: ports.yml
vars:
firewall:
ports:
- port: 2377
proto: tcp
permanent: true
state: enabled
zone: docker
when: "groups['centos_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']
and
inventory_hostname in groups['manager']"
- name: Prepare rules for ufw on debian managers
set_fact:
ufw_rules: "{{ ufw_rules|default([]) | union( [{ 'rule': 'allow', 'src': hostvars[item].ansible_host, 'port': '2377', 'proto': 'tcp'}] ) }}"
with_items: "{{ groups['docker'] }}"
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['debian_hosts']
and
inventory_hostname in groups['manager']"
- name: Allow port 2377 for members of docker group with ufw on debian managers
include_role:
name: ufw
tasks_from: rules.yml
vars:
firewall:
rules: "{{ ufw_rules }}"
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['debian_hosts']
and
inventory_hostname in groups['debian_hosts']"
- name: Reload firewallcmd
include_role:
name: firewalld
tasks_from: reload.yml
when: "groups['centos_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']"
- name: Reload UFW
ufw:
state: reloaded
when: "groups['debian_hosts'] is defined
and
inventory_hostname in groups['centos_hosts']"
- name: "Init a new swarm with default parameters on node {{ groups['manager'][0] }}"
docker_swarm:
state: present
advertise_addr: "{{ hostvars[groups['manager'][0]].ansible_host }}"
register: swarm_facts
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
run_once: true
delegate_to: "{{ groups['manager'][0] }}"
- name: gather facts from db servers
setup:
delegate_to: "{{ groups['manager'][0] }}"
delegate_facts: True
- name: Register join token on all pending workers
set_fact:
join_token: "{{ swarm_facts.swarm_facts.JoinTokens.Worker }}"
when: "inventory_hostname not in groups['manager']"
- name: Register join token on all pending managers
set_fact:
join_token: "{{ swarm_facts.swarm_facts.JoinTokens.Manager }}"
when: "inventory_hostname in groups['manager']
and
inventory_hostname != groups['manager'][0]"
- name: Join Swarm cluster
docker_swarm:
state: join
advertise_addr: "{{ ansible_host }}"
join_token: "{{ join_token }}"
remote_addrs: "{{ hostvars[groups['manager'][0]].ansible_host }}:2377"
when: "inventory_hostname != groups['manager'][0]"
vars:
ansible_python_interpreter: ~/.docker_env/bin/python
ansible_user: gcharbon
---
- hosts: manager
tasks:
- name: Ensure work directory exists
become: true
file:
path: /work
state: directory
mode: 0775
owner: admin
group: docker
- name: Ensure portainer directory exists
become: true
file:
path: /work/portainer
state: directory
mode: 0775
owner: admin
group: docker
- name: Copy stack file to remote host
copy:
src: ../deploy/portainer/deploy-stack.yml
dest: /work/portainer/deploy-stack.yml
# - name: deploy portainer stack from file
# docker_stack:
# state: present
# name: portainer
# compose:
# - "/work/portainer/deploy-stack.yml"
# run_once: true
# vars:
# ansible_become_user: admin
# ansible_become: true
- hosts: all
tasks:
- name: Configure users and ssh keys
include_role:
name: users
- hosts: hypervisors:&centos_hosts
tasks:
- name: ACTION | Upgrade dist ad install common packages on hypervisors hosts
import_role:
name: yum
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: yum
tasks_from: install_packages.yml
vars:
update_before_install: true
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- package
- hosts: hypervisors:&debian_hosts
tasks:
- name: ACTION | Upgrase dist and install common packages on hypervisors hosts
import_role:
name: apt
vars:
update_before_install: true
upgrade_dist: true
GPG_keys: []
yum_repositories: []
remove_packages: []
upgrade_packages: []
packages:
- vim
- dos2unix
- curl
- wget
tags:
- common
- package
- name: ACTION | Install libvirt dependencies on hypervisors hosts
import_role:
name: apt
tasks_from: install_packages.yml
vars:
update_before_install: false
packages: "{{ virt_dependencies }}"
tags:
- virtualization
- packages
- hosts: hypervisors
tasks:
- name: ROLE | Ensure users exists
import_role:
name: users
vars:
users: "{{ local_users }}"
tags:
- virtualization
- user
- name: INCLUDE | Set permissions for virtualization
include_role:
name: virtualization
tasks_from: set_permissions.yml
vars:
virt_user: gcharbon
- name: ACTION | Restart and enable the libvirtd service
service:
name: libvirtd
state: restarted
enabled: true
- name: ROLE | Setup virtualization infrastructure
include_role:
name: virtualization
vars:
virt_user: gcharbon
environments_path: ../virtual_infrastructure/environments
networks_path: ../virtual_infrastructure/networks
- name: INCLUDE | Forward ports of created virtual machine to host port with iptable
include_role:
name: virtualization
tasks_from: forward_ports.yml
vars:
virtual_networks: "{{ networks }}"
virtual_machines: "{{ deployment_configuration }}"
- name: INCLUDE | Ensure that virtual machines are reachable from SSH
include_role:
name: virtualization
tasks_from: wait_for_vms.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ssh_timeout: 30
ssh_port: 22
# We need to find something similar to add SSH keys of newly created VM to known_hosts file of local computer
# - name: ACTION | Write the new virtual instance host key to known hosts
# connection: local
# shell: "ssh-keyscan -H {{ item.ipv4 }} >> ~/.ssh/known_hosts"
# with_items:
# - "{{ deployment_configuration }}"
- hosts: hypervisors
tasks:
- name: ROLE | Setup virtualization infrastructure
include_role:
name: virtualization
vars:
virt_user: gcharbon
environments_path: ../virtual_infrastructure/environments
networks_path: ../virtual_infrastructure/networks
- name: INCLUDE | Forward ports of created virtual machine to host port with iptable
include_role:
name: virtualization
tasks_from: forward_ports.yml
vars:
virtual_networks: "{{ networks }}"
virtual_machines: "{{ deployment_configuration }}"
- name: INCLUDE | Ensure that virtual machines are reachable from SSH
include_role:
name: virtualization
tasks_from: wait_for_vms.yml
vars:
virtual_machines: "{{ deployment_configuration }}"
ssh_timeout: 30
ssh_port: 22
---
- hosts: centos_hosts
tasks:
- name: "Ensure epel repo is installed"
import_role:
name: yum
tasks_from: add_repositories.yml
vars:
yum_repositories:
- name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
enabled: 1
gpgcheck: 1
gpgkey: "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}"
---
- import_playbook: enable_epel.yml
vars:
ansible_become: true
- hosts: all
tasks:
- name: "Ensure python36 is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- python36
update_before_install: false
become: true
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Ensure global pip is installed with extra args"
import_role:
name: get-pip
vars:
python_interpreter: /usr/bin/python36
pip_extra_args: --user
- name: "Install python requirements"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-epel"
ansible_python_interpreter: /usr/bin/python36
---
- hosts: datalab
tasks:
- name: Ensure centos-release-scl is installed
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- centos-release-scl
update_before_install: true
become: true
- name: Enable centos-sclo-rh-testing repository
import_role:
name: yum
tasks_from: enable_repositories.yml
vars:
yum_repositories:
- centos-sclo-rh-testing
- name: "Ensure {{ rh_python_package }} is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- "{{ rh_python_package }}"
update_before_install: true
become: true
- name: "Optionally add line in .bashrc to enable {{ rh_python_package }} with scl"
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: "source scl_source enable {{ rh_python_package }}"
insertafter: EOF
state: present
when: "scn_enable_python|default(true) == true"
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Install python requirements with pip from {{ rh_python_interpreter }}"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-rh"
ansible_python_interpreter: "{{ rh_python_interpreter }}"
---
- hosts: all
tasks:
- name: "Ensure {{ rh_python_package }} is installed"
import_role:
name: yum
tasks_from: install_packages.yml
vars:
packages:
- "{{ rh_python_package }}"
update_before_install: false
become: true
- name: "Optionally add line in .bashrc to enable {{ rh_python_package }} with scl"
lineinfile:
path: "/home/{{ ansible_user }}/.bashrc"
line: "source scl_source enable {{ rh_python_package }}"
insertafter: EOF
state: present
when: "scn_enable_python|default(true) == true"
- name: Copy python requirements
import_role:
name: pip
tasks_from: copy_requirements.yml
- name: "Install python requirements with pip from {{ rh_python_interpreter }}"
import_role:
name: pip
tasks_from: install_requirements.yml
vars:
virtualenv: "{{ virtual_env_path }}-rh"
ansible_python_interpreter: "{{ rh_python_interpreter }}"
---
packages: []
remove_packages: []
upgrade_packages: []
install_recommends: no
dpkg_upgrade_options: 'force-confold,force-confdef'
update_before_install: true
---
- name: "Add GPG keys"
apt_key:
state: present
url: "{{ item.url | default(omit) }}"
id: "{{ item.id | default(omit) }}"
keyserver: "{{ item.keyserver | default(omit) }}"
keyring: "{{ item.keyring | default(omit) }}"
with_items:
- "{{ apt_keys | default([]) }}"
---
- name: "Add source repositories"
apt_repository:
repo: "{{ item.repo }}"
filename: "{{ item.filename | default(omit) }}"
state: present
with_items:
- "{{ apt_repositories | default([]) }}"
---
- name: "Install {{ packages | list | join(', ') }} with apt"
apt:
name: "{{ item }}"
state: present
update_cache : "{{ update_before_install|bool }}"
install_recommends: "{{ install_recommends }}"
with_items: "{{ packages }}"
---
- include: add_repositories.yml
- include: add_GPG_keys.yml
- include: remove_packages.yml
- include: upgrade_dist.yml
- include: upgrade_packages.yml
- include: install_packages.yml
---
- name: Remove {{ remove_packages | join(', ') }} with apt
apt:
name: "{{ item }}"
state: absent
with_items: "{{ remove_packages }}"
- name: Upgrade dist with apt dist upgrade udate
apt:
upgrade: dist
update_cache: yes
dpkg_options: "{{ dpkg_upgrade_options }}"
---
- name: Upgrade {{ upgrade_packages | list | join(', ') }} with apt
apt:
name: "{{ item }}"
state: latest
update_cache : "{{ update_before_install|bool }}"
with_items: "{{ upgrade_packages }}"
---
firewall:
services:
-
name: ssh
permanent: true
state: enabled
zone: public
-
name: http
permanent: true
state: enabled
zone: public
-
name: https
permanent: true
state: enabled
zone: public
ports:
-
port: 2375
proto: tcp
zone: internal
permanent: true
sources:
-
subnet: 192.168.0.0/16
zone: internal
permanent: true
---
- name: Enable firewalld service at boot
service:
name: firewalld
enabled: yes
become: true
# /roles/firewall/tasks/main.yml
# Configure firewall
---
- include: services.yml
- include: sources.yml
- include: ports.yml
- include: reload.yml
- include: enable.yml
# /roles/firewall/tasks/ufw.yml
# Configure firewall with firewalld on RHEL/CentOS systems
---
- name: Change registered ports with firewalld
firewalld:
port: "{{ port.port}}/{{ port.proto }}"
permanent: "{{ port.permanent }}"
state: "{{ port.state | default('enabled') }}"
zone: "{{ port.zone }}"
with_items:
- "{{ firewall.ports }}"
loop_control:
loop_var: port
become: true
---
- name: Reload firewalld service
service:
name: firewalld
state: reloaded
become: true
---
- name: Change registered services with firewalld
firewalld:
service: "{{ service.name }}"
permanent: "{{ service.permanent }}"
state: "{{ service.state | default('enabled') }}"
zone: "{{ service.zone }}"
with_items:
- "{{ firewall.services }}"
loop_control:
loop_var: service
become: true
---
- name: Change registered sources with firewalld
firewalld:
source : "{{ source.subnet }}"
zone: "{{ source.zone }}"
permanent: "{{ source.permanent }}"
state: "{{ source.state | default('enabled') }}"
with_items:
- "{{ firewall.sources }}"
loop_control:
loop_var: source
become: true
---
- name: Install certbot
include_role:
name: pip
tasks_from: install_packages.yml
vars:
pip_install_packages:
- pyopenssl
- certbot
#!/bin/bash
# Install virtualenv with pip if not already installed.
# pip must be installed before executing this script
pip freeze | grep virtualenv > /dev/null 2>&1
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv is already installed'
exit 0
else
pip install virtualenv > /dev/null 2>&1
echo 'virtualenv has been installed'
exit 0
fi
---
# file: roles/docker/meta/main.yml
dependencies:
# Ubuntu and Debian dependencies
-
role: apt
update_dist: true
upgrade_dist: true
remove_packages: "{{ remove_docker_dependencies }}"
packages: "{{ docker_dependencies }}"
apt_keys: "{{ docker_gpg_key }}"
apt_repositories:
- "{{ docker_repository }}"
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
become: true
-
role: yum
update_dist: true
upgrade_dist: true
remove_packages: "{{ remove_docker_dependencies }}"
packages: "{{ docker_dependencies }}"
GPG_keys: "{{ docker_gpg_key }}"
yum_repositories:
- "{{ docker_repository }}"
when: ansible_distribution == 'CentOS' or ansible_distribution == 'RHEL'
become: true
-
role: apt
upgrade_dist: false
update_dist: true
remove_packages: []
GPG_keys: []
apt_repositories: []
packages:
- "{{ docker_package }}"
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
become: true
-
role: yum
upgrade_dist: false
update_dist: true
remove_packages: []
GPG_keys: []
yum_repositories: []
packages:
- "{{ docker_package }}"
when: ansible_distribution == 'CentOS' or ansible_distribution == 'RHEL'
become: true
---
# file: roles/docker/tasks/add_user_to_docker_group.yml
- name: Add the docker users into docker group
user:
name: "{{ item }}"
groups: docker
with_items: "{{ docker_users | default([]) }}"
become: true
---
# file: roles/docker/tasks/configure_docker_daemon.yml
- name: Configure docker daemon
debug:
msg: "NOT IMPLEMENTED YET"
---
# file: roles/docker/tasks/install_docker.yml
- name: Ensure docker deamon is running
---
# file: roles/docker/tasks/main.yml
- include: add_user_to_docker_group.yml
- include: configure_daemon.yml
- include: start_docker.yml
---
# file: roles/docker/tasks/start_docker.yml
- name: Ensure docker deamon is running
service:
name: docker
state: started
become: true
python_interpreter: "{{ ansible_python_interpreter }}"
# pip_extra_args: '--user'
#!/bin/bash
# Install virtualenv with pip if not already installed.
# pip must be installed before executing this script
if [[ $# -ne 1 ]] && [[ $# -ne 2 ]]; then
echo "Wrong number of arguments. Usage: $0 <PYTHON_INTERPRETER> [<PIP_EXTRA_ARGS>]"
exit 1
fi
PIP_COMMAND="$1 -m pip"
PIP_EXTRA_ARGS="$2"
$($PIP_COMMAND freeze | grep virtualenv > /dev/null 2>&1)
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv is already installed'
exit 0
fi
$($PIP_COMMAND install virtualenv $PIP_EXTRA_ARGS > /dev/null 2> /tmp/virtualenv.error)
is_installed=$?
if [ "$is_installed" -eq 0 ]; then
echo 'virtualenv has been installed'
exit 0
else
$(>&2 echo "$(cat /tmp/virtualenv.error)")
exit 1
fi
---
# This tasks file install pip using get-pip.py script
# from https://bootstrap.pypa.io/get-pip.py
- name: Download pip installer
get_url:
url: https://bootstrap.pypa.io/get-pip.py
dest: /tmp/get-pip.py
- name: Execute the get-pip.py installer
become: true
shell: "{{ python_interpreter }} /tmp/get-pip.py {{ pip_extra_args | default('') }}"
register: successfull_install
- name: Remove the get-pip.py installer
file:
path: /tmp/get-pip.py
state: absent
when: successfull_install.rc == 0
---
# This tasks file install virtualenv.
# The official documentation states that virtualenv is a require dependency for
# ansible pip module.
- name: Install virtualenv if it is not installed yet with interpreter {{ python_interpreter }}"
become: true
script: >
install_virtualenv.sh
{{ python_interpreter }}
{{ pip_extra_args |default('') }}
register: virtualenv_result
changed_when: "virtualenv_result.rc == 0
and
virtualenv_result.stdout_lines[0] != 'virtualenv is already installed'"
---
# This tasks file install pip package from get-pip.py script with
# {{ python_version }} interpreter.
- name: "Checking if pip is already installed with interpreter {{ python_interpreter }}"
command: "{{ python_interpreter }} -m pip --version"
ignore_errors: true
register: pip_is_installed
changed_when: pip_is_installed.rc != 0
---
- include_tasks: is_pip_installed.yml
- include_tasks: install_pip.yml
when: pip_is_installed.rc != 0
- include_tasks: install_virtualenv.yml
- name: "change hostname to {{ hostname }}"
hostname:
name: "{{ hostname }}"
- name: add hostname to /etc/hosts
lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.0\.1[ \t]+localhost'
line: "127.0.0.1 localhost {{ hostname }}"
state: present
# Ansible Role: NFS
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-nfs.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-nfs)
Installs NFS utilities on RedHat/CentOS or Debian/Ubuntu.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
nfs_exports: []
A list of exports which will be placed in the `/etc/exports` file. See Ubuntu's simple [Network File System (NFS)](https://help.ubuntu.com/14.04/serverguide/network-file-system.html) guide for more info and examples. (Simple example: `nfs_exports: [ "/home/public *(rw,sync,no_root_squash)" ]`).
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true
(RedHat/CentOS/Fedora only) The state of the `rpcbind` service, and whether it should be enabled at system boot.
## Dependencies
None.
## Example Playbook
- hosts: db-servers
roles:
- { role: geerlingguy.nfs }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
---
nfs_exports: []
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true
---
- name: reload nfs
command: 'exportfs -ra'
---
# Include variables and define needed variables.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
- name: Include overrides specific to RHEL 7.
include_vars: RedHat-7.yml
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version == "7"
- name: Include overrides specific to Fedora.
include_vars: Fedora.yml
when:
- ansible_os_family == 'RedHat'
- ansible_distribution == "Fedora"
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
- name: Ensure directories to export exist
file: 'path="{{ item.strip().split()[0] }}" state=directory'
with_items: "{{ nfs_exports }}"
- name: Copy exports file.
template:
src: exports.j2
dest: /etc/exports
owner: root
group: root
mode: 0644
notify: reload nfs
- name: Ensure nfs is running.
service: "name={{ nfs_server_daemon }} state=started enabled=yes"
when: nfs_exports|length
---
- name: Ensure NFS utilities are installed.
apt:
name:
- nfs-common
- nfs-kernel-server
state: present
---
- name: Ensure NFS utilities are installed.
package: name=nfs-utils state=present
- name: Ensure rpcbind is running as configured.
service:
name: rpcbind
state: "{{ nfs_rpcbind_state }}"
enabled: "{{ nfs_rpcbind_enabled }}"
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
{% for export in nfs_exports %}
{{ export }}
{% endfor %}
\ No newline at end of file
---
nfs_server_daemon: nfs-kernel-server
---
nfs_server_daemon: nfs-server
---
nfs_server_daemon: nfs-server
---
nfs_server_daemon: nfs
pip_requirements_remote_path: "/tmp/requirements_demo.txt"
pip_requirements_owner: "{{ ansible_user }}"
pip_requirements_mode: 600
coverage==4.5.1
cycler==0.10.0
decorator==4.3.0
entrypoints==0.2.3
ipykernel==4.8.2
ipython==6.5.0
ipython-genutils==0.2.0
jsonschema==2.6.0
jupyter-client==5.2.3
jupyter-core==4.4.0
matplotlib==2.2.2
mistune==0.8.3
nbconvert==5.3.1
nbformat==4.4.0
numpy==1.15.0
pandas==0.23.4
prompt-toolkit==1.0.15
pymongo==3.7.1
pyparsing==2.2.0
python-dateutil==2.7.3
PyYAML==3.13
scikit-learn==0.19.2
scipy==1.1.0
seaborn==0.9.0
sklearn==0.0
statsmodels==0.9.0
---
# Requirements path should be local to remote file system.
- name: Copy requirements.txt file to remote file system
copy:
src: "{{ pip_requirements_path }}"
dest: "{{ pip_requirements_remote_path }}"
owner: "{{ pip_requirements_owner | default(omit) }}"
group: "{{ pip_requirements_group | default(omit) }}"
mode: "{{ pip_requirements_mode | default(omit) }}"
---
- name: "Install {{ pip_install_packages|join(', ') }} with pip (extra_args: {{ pip_extra_args|default('') }})"
pip:
name: "{{ item.name | default(item) }}"
virtualenv: "{{ virtualenv | default(omit) }}"
version: "{{ item.version | default(omit) }}"
extra_args: "{{pip_extra_args | default(omit) }}"
with_items:
- "{{ pip_install_packages }}"
---
# File can be specified as a relative path if using the chdir option.
- name: "Install python requirements with interpreter {{ ansible_python_interpreter }}"
pip:
requirements: "{{ pip_requirements_remote_path }}"
virtualenv: "{{ virtualenv | default(omit) }}"
virtualenv_site_packages: "{{ virtualenv_site_packages | default(omit) }}"
extra_args: "{{ pip_extra_args | default(omit) }}"
---
- name: Remove packages with pip
pip:
name: "{{ item.name }}"
state: absent
virtualenv: "{{ virtualenv | default(omit) }}"
extra_args: "{{pip_extra_args | default(omit) }}"
with_items: "{{ pip_remove_packages }}"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment