blob: 7358e6e06fbf6e9bd8456355774a9e2d4ebf6c3b [file] [log] [blame]
# Note: When the target == "cluster" the installer
# is running to install voltha in the cluster hosts.
# Whe the target == "installer" the installer is being
# created.
- name: A .ssh directory for the voltha user exists
file:
#path: "{{ ansible_env['HOME'] }}/.ssh"
path: "{{ target_voltha_home }}/.ssh"
state: directory
owner: voltha
group: voltha
tags: [cluster_host]
- name: known_hosts file is absent for the voltha user
file:
path: "{{ target_voltha_home }}/.ssh/known_hosts"
state: absent
tags: [cluster_host]
- name: Known host checking is disabled
copy:
src: files/ssh_config
dest: "{{ target_voltha_home }}/.ssh/config"
owner: voltha
group: voltha
mode: 0600
tags: [cluster_host]
- name: Cluster host keys are propagated to all hosts in the cluster
copy:
src: files/.keys
dest: "{{ target_voltha_home }}"
owner: voltha
group: voltha
mode: 0600
tags: [cluster_host]
#- name: Required configuration directories are copied
# copy:
# src: "/home/vinstall/{{ item }}"
# dest: "{{ target_voltha_home }}"
# owner: voltha
# group: voltha
# with_items:
# - docker-py
# - netifaces
# - deb_files
# when: target == "cluster"
# tags: [cluster_host]
- name: Required configuration directories are copied
synchronize:
src: "/home/vinstall/{{ item }}"
dest: "{{ target_voltha_home }}"
archive: no
owner: no
perms: no
recursive: yes
links: yes
with_items:
- docker-py
- netifaces
- deb_files
tags: [cluster-host]
- name: Required configuration scripts are copied
synchronize:
src: "/home/vinstall/{{ item }}"
dest: "{{ target_voltha_home }}"
archive: no
owner: no
perms: no
recursive: no
links: yes
with_items:
- sort_packages.sh
- sort_packages.py
- install_packages.sh
tags: [cluster-host]
- name: The installer scripts are made executable
file:
path: "{{target_voltha_home }}/{{ item }}"
mode: 0744
with_items:
- sort_packages.sh
- sort_packages.py
- install_packages.sh
tags: [installer]
- name: apt lists are up-to-date
synchronize:
src: "/var/lib/apt/lists"
dest: "/var/lib/apt/lists"
archive: no
owner: no
perms: no
rsync_opts:
- "--exclude=lock"
- "--exclude=partial"
recursive: yes
links: yes
tags: [cluster_host]
#- name: upstart barrier filesystem loop mount script is installed
# copy:
# src: "/home/vinstall/losetup.conf"
# dest: /etc/init
# owner: root
# group: root
# mode: 0644
# when: target == "cluster"
# tags: [cluster_host]
#- name: pre-emptive strike to avoid errors during package installation
# apt:
# name: "{{ item }}"
# state: absent
# with_items:
# - ubuntu-core-launcher
# - snapd
# tags: [cluster_host]
- name: A voltha directory under /var/log for voltha logs exists
file:
path: "/var/log/voltha"
state: directory
tags: [cluster_host]
- name: Dependent software is installed (this can take about 10 Min, DONT'T PANIC, go for coffee instead)
command: "{{ target_voltha_home }}/install_packages.sh"
# command: dpkg -R -i "{{ target_voltha_home }}/deb_files"
when: target == "cluster"
tags: [cluster_host]
#- name: Dependent software is initialized
# command: apt-get -y -f install
# when: target == "cluster"
# tags: [cluster_host]
- name: Python packages are installed
command: pip install {{ item }} --no-index --find-links "file://{{ target_voltha_home }}/{{ item }}"
with_items:
- docker-py
- netifaces
when: target == "cluster"
tags: [cluster_host]
# To debug package installation, comment this out so review temporary interim
# files.
- name: Configuration directories and files are deleted
file:
path: "{{ target_voltha_home }}/{{ item }}"
state: absent
with_items:
- docker-py
- netifaces
- sort_packages.sh
- sort_packages.py
- install_packages.sh
- deb_files1
- deb_files2
when: target == "cluster"
tags: [cluster_host]
- name: Logging barrier file is created
command: "fallocate -l {{ logger_volume_size }}G {{ barrier_fs_dir }}/.logger-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: Registry barrier file is created
command: "fallocate -l {{ registry_volume_size }}G {{ barrier_fs_dir }}/.registry-barrier-file"
when: target == "cluster"
tags: [cluster_host]
#- name: Consul barrier file is created
# command: "fallocate -l {{ consul_volume_size }}G {{ barrier_fs_dir }}/.consul-barrier-file"
# when: target == "cluster"
# tags: [cluster_host]
- name: The logging barrier file is set up as a loop device
command: "losetup /dev/loop0 {{ barrier_fs_dir }}/.logger-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: The registry barrier file is set up as a loop device
command: "losetup /dev/loop1 {{ barrier_fs_dir }}/.registry-barrier-file"
when: target == "cluster"
tags: [cluster_host]
#- name: The consul barrier file is set up as a loop device
# command: "losetup /dev/loop2 {{ barrier_fs_dir }}/.consul-barrier-file"
# when: target == "cluster"
# tags: [cluster_host]
- name: The xfs filesystem is created on the replicated barrier file systems
filesystem:
fstype: xfs
dev: "{{ item }}"
opts: -i size=512
with_items:
- /dev/loop0
- /dev/loop1
when: target == "cluster"
tags: [cluster_host]
#- name: The ext4 filesystem is created on the consul barrier volume
# filesystem:
# fstype: ext4
# dev: /dev/loop2
# when: target == "cluster"
# tags: [cluster_host]
- name: The loop devices that are no longer needed are removed
command: "losetup -D"
when: target == "cluster"
tags: [cluster_host]
- name: The barrier fileystem files are owned by voltha
file:
path: "{{ barrier_fs_dir }}/{{ item }}"
mode: 0755
owner: voltha
group: voltha
with_items:
- ".registry-barrier-file"
- ".logger-barrier-file"
# - ".consul-barrier-file"
when: target == "cluster"
tags: [cluster_host]
- name: A mountpoint for the glusterfs registry brick is created
file:
path: "{{ barrier_fs_dir }}/reg_brick1"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: A mountpoint for the glusterfs logging brick is created
file:
path: "{{ barrier_fs_dir }}/log_brick1"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: The replicated registry filesystem is mounted on boot
mount:
path: "{{ barrier_fs_dir }}/reg_brick1"
src: "{{ barrier_fs_dir }}/.registry-barrier-file"
fstype: xfs
opts: loop
state: mounted
when: target == "cluster"
tags: [cluster_host]
- name: The replicated logger filesystem is mounted on boot
mount:
path: "{{ barrier_fs_dir }}/log_brick1"
src: "{{ barrier_fs_dir }}/.logger-barrier-file"
fstype: xfs
opts: loop
state: mounted
when: target == "cluster"
tags: [cluster_host]
- name: A directory for the registry glusterfs volume is created
file:
path: "{{ barrier_fs_dir }}/reg_brick1/registry_volume"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: A directory for the logging glusterfs volume is created
file:
path: "{{ barrier_fs_dir }}/log_brick1/logging_volume"
state: directory
mode: 0755
owner: voltha
group: voltha
when: target == "cluster"
tags: [cluster_host]
- name: Directories for voltha processes are created
file:
path: "{{ target_voltha_dir }}/{{ item }}"
state: directory
mode: 0755
owner: voltha
group: voltha
with_items:
- registry_data
- consul/data
- consul/config
when: target == "cluster"
tags: [cluster_host]
#- name: The consul data filesystem is mounted on boot
# mount:
# path: "{{ target_voltha_dir }}/consul/data"
# src: "{{ barrier_fs_dir }}/.consul-barrier-file"
# fstype: ext4
# opts: loop
# state: mounted
# when: target == "cluster"
# tags: [cluster_host]
- name: The glusterfs service is started
service:
name: glusterfs-server
enabled: yes
state: started
when: target == "cluster"
tags: [cluster_host]
- name: The replicated registry filesystem is mounted on boot
mount:
path: "{{ target_voltha_dir }}/registry_data"
src: "{{ inventory_hostname }}:/registry_volume"
fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present
when: target == "cluster"
tags: [cluster_host]
- name: The replicated logging filesystem is mounted on boot
mount:
path: "/var/log/voltha"
src: "{{ inventory_hostname }}:/logging_volume"
fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present
when: target == "cluster"
tags: [cluster_host]