blob: 834d34a71ea8346bf6f85e7537439fbf3790bfa0 [file] [log] [blame]
# Note: When the target == "cluster" the installer
# is running to install voltha in the cluster hosts.
# Whe the target == "installer" the installer is being
# created.
- name: A .ssh directory for the voltha user exists
file:
#path: "{{ ansible_env['HOME'] }}/.ssh"
path: "{{ target_voltha_home }}/.ssh"
state: directory
owner: voltha
group: voltha
tags: [cluster_host]
- name: known_hosts file is absent for the voltha user
file:
path: "{{ target_voltha_home }}/.ssh/known_hosts"
state: absent
tags: [cluster_host]
- name: Known host checking is disabled
copy:
src: files/ssh_config
dest: "{{ target_voltha_home }}/.ssh/config"
owner: voltha
group: voltha
mode: 0600
tags: [cluster_host]
- name: Cluster host keys are propagated to all hosts in the cluster
copy:
src: files/.keys
dest: "{{ target_voltha_home }}"
owner: voltha
group: voltha
mode: 0600
tags: [cluster_host]
#- name: Required configuration directories are copied
# copy:
# src: "/home/vinstall/{{ item }}"
# dest: "{{ target_voltha_home }}"
# owner: voltha
# group: voltha
# with_items:
# - docker-py
# - netifaces
# - deb_files
# when: target == "cluster"
# tags: [cluster_host]
- name: Required configuration directories are copied
synchronize:
src: "/home/vinstall/{{ item }}"
dest: "{{ target_voltha_home }}"
archive: no
owner: no
perms: no
recursive: yes
links: yes
with_items:
- docker-py
- netifaces
- deb_files
tags: [cluster-host]
- name: apt lists are up-to-date
synchronize:
src: "/var/lib/apt/lists"
dest: "/var/lib/apt/lists"
archive: no
owner: no
perms: no
rsync_opts:
- "--exclude=lock"
- "--exclude=partial"
recursive: yes
links: yes
tags: [cluster_host]
#- name: upstart barrier filesystem loop mount script is installed
# copy:
# src: "/home/vinstall/losetup.conf"
# dest: /etc/init
# owner: root
# group: root
# mode: 0644
# when: target == "cluster"
# tags: [cluster_host]
#- name: pre-emptive strike to avoid errors during package installation
# apt:
# name: "{{ item }}"
# state: absent
# with_items:
# - ubuntu-core-launcher
# - snapd
# tags: [cluster_host]
- name: A voltha directory under /var/log for voltha logs exists
file:
path: "/var/log/voltha"
state: directory
tags: [cluster_host]
- name: Dependent software is installed (this can take about 10 Min, DONT'T PANIC, go for coffee instead)
command: dpkg -R -i "{{ target_voltha_home }}/deb_files"
# ignore_errors: true
when: target == "cluster"
tags: [cluster_host]
#- name: Dependent software is initialized
# command: apt-get -y -f install
# when: target == "cluster"
# tags: [cluster_host]
- name: Python packages are installed
command: pip install {{ item }} --no-index --find-links "file://{{ target_voltha_home }}/{{ item }}"
with_items:
- docker-py
- netifaces
when: target == "cluster"
tags: [cluster_host]
- name: Configuration directories are deleted
file:
path: "{{ target_voltha_home }}/{{ item }}"
state: absent
with_items:
- docker-py
- netifaces
- deb_files
when: target == "cluster"
tags: [cluster_host]
- name: Logging barrier file is created
command: "dd if=/dev/zero of={{ barrier_fs_dir }}/.logger-barrier-file cbs=100M bs=1G count={{ logger_volume_size }}"
when: target == "cluster"
tags: [cluster_host]
- name: Registry barrier file is created
command: "dd if=/dev/zero of={{ barrier_fs_dir }}/.registry-barrier-file cbs=100M bs=1G count={{ registry_volume_size }}"
when: target == "cluster"
tags: [cluster_host]
- name: Consul barrier file is created
command: "dd if=/dev/zero of={{ barrier_fs_dir }}/.consul-barrier-file cbs=100M bs=1G count={{ consul_volume_size }}"
when: target == "cluster"
tags: [cluster_host]
- name: The logging barrier file is set up as a loop device
command: "losetup /dev/loop0 {{ barrier_fs_dir }}/.logger-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: The registry barrier file is set up as a loop device
command: "losetup /dev/loop1 {{ barrier_fs_dir }}/.registry-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: The consul barrier file is set up as a loop device
command: "losetup /dev/loop2 {{ barrier_fs_dir }}/.consul-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: The xfs filesystem is created on the replicated barrier file systems
filesystem:
fstype: xfs
dev: "{{ item }}"
opts: -i size=512
with_items:
- /dev/loop0
- /dev/loop1
when: target == "cluster"
tags: [cluster_host]
- name: The ext4 filesystem is created on the consul barrier volume
filesystem:
fstype: ext4
dev: /dev/loop2
when: target == "cluster"
tags: [cluster_host]
- name: The loop devices that are no longer needed are removed
command: "losetup -D"
when: target == "cluster"
tags: [cluster_host]
- name: The barrier fileystem files are owned by voltha
file:
path: "{{ barrier_fs_dir }}/{{ item }}"
mode: 0755
owner: voltha
group: voltha
with_items:
- ".registry-barrier-file"
- ".logger-barrier-file"
- ".consul-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: A mountpoint for the glusterfs registry brick is created
file:
path: "{{ barrier_fs_dir }}/reg_brick1"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: A mountpoint for the glusterfs logging brick is created
file:
path: "{{ barrier_fs_dir }}/log_brick1"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: The replicated registry filesystem is mounted on boot
mount:
path: "{{ barrier_fs_dir }}/reg_brick1"
src: "{{ barrier_fs_dir }}/.registry-barrier-file"
fstype: xfs
opts: loop
state: mounted
when: target == "cluster"
tags: [cluster_host]
- name: The replicated logger filesystem is mounted on boot
mount:
path: "{{ barrier_fs_dir }}/log_brick1"
src: "{{ barrier_fs_dir }}/.logger-barrier-file"
fstype: xfs
opts: loop
state: mounted
when: target == "cluster"
tags: [cluster_host]
- name: A directory for the registry glusterfs volume is created
file:
path: "{{ barrier_fs_dir }}/reg_brick1/registry_volume"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: A directory for the logging glusterfs volume is created
file:
path: "{{ barrier_fs_dir }}/log_brick1/logging_volume"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: Directories for voltha processes are created
file:
path: "{{ target_voltha_dir }}/{{ item }}"
state: directory
mode: 0755
owner: voltha
group: voltha
with_items:
- registry_data
- consul/data
- consul/config
when: target == "cluster"
tags: [cluster_host]
- name: The consul data filesystem is mounted on boot
mount:
path: "{{ target_voltha_dir }}/consul/data"
src: "{{ barrier_fs_dir }}/.consul-barrier-file"
fstype: ext4
opts: loop
state: mounted
when: target == "cluster"
tags: [cluster_host]
- name: The glusterfs service is started
service:
name: glusterfs-server
enabled: yes
state: started
when: target == "cluster"
tags: [cluster_host]
- name: The replicated registry filesystem is mounted on boot
mount:
path: "{{ target_voltha_dir }}/registry_data"
src: "{{ inventory_hostname }}:/registry_volume"
fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present
when: target == "cluster"
tags: [cluster_host]
- name: The replicated logging filesystem is mounted on boot
mount:
path: "/var/log/voltha"
src: "{{ inventory_hostname }}:/logging_volume"
fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present
when: target == "cluster"
tags: [cluster_host]