blob: 4af45042b5f4ab3ba72444b1653ad95157e51da7 [file] [log] [blame]
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.opencord.gradle.rules.*
import org.yaml.snakeyaml.Yaml
allprojects {
apply plugin: 'base'
apply plugin: 'de.gesellix.docker'
//apply plugin: 'com.tmiyamon.config'
docker {
// dockerHost = System.env.DOCKER_HOST ?: 'unix:///var/run/docker.sock'
// dockerHost = System.env.DOCKER_HOST ?: 'https://192.168.99.100:2376'
// certPath = System.getProperty('docker.cert.path') ?: "${System.getProperty('user.home')}/.docker/machine/machines/default"
// authConfigPlain = [
// "username" : "joe",
// "password" : "some-pw-as-needed",
// "email" : "joe@acme.com",
// "serveraddress" : "https://index.docker.io/v1/"
// ]
}
}
ext {
// Upstream registry to simplify filling out the comps table below
upstreamReg = project.hasProperty('upstreamReg') ? project.getProperty('upstreamReg') : 'docker.io'
// Deployment target config file (yaml format); this can be overwritten from the command line
// using the -PdeployConfig=<file-path> syntax.
deployConfig = project.hasProperty('deployConfig') ? project.getProperty('deployConfig') : './config/default.yml'
println "Using deployment config: $deployConfig"
File configFile = new File(deployConfig)
def yaml = new Yaml()
config = yaml.load(configFile.newReader())
// Target registry to be used to publish docker images needed for deployment
targetReg = project.hasProperty('targetReg')
? project.getProperty('targetReg')
: config.docker && config.docker.registry
? config.docker.registry
: config.seedServer.ip
? config.seedServer.ip + ":5000"
: 'localhost:5000'
// The tag used to tag the docker images push to the target registry
targetTag = project.hasProperty('targetTag')
? project.getProperty('targetTag')
: config.docker && config.docker.imageVersion
? config.docker.imageVersion
: 'candidate'
comps = [
'consul': [
'type': 'image',
'upstream': upstreamReg,
'name': 'consul',
'digest': 'sha256:0dc990ff3c44d5b5395475bcc5ebdae4fc8b67f69e17942a8b9793b3df74d290'
]
]
}
task fetchUpstreamImages {
comps.each { name, spec -> if (spec.type == 'image') { dependsOn "fetch" + name } }
}
// Switch Configuration Image
def getBuildTimestamp() {
def cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
def date = cal.getTime()
def formattedDate = date.format("yyyy-MM-dd'T'HH:mm:ssZ")
return formattedDate
}
def getCommitDate = { ->
def stdOut = new ByteArrayOutputStream()
exec {
commandLine "git", "log", "--pretty=format:%cd", "--date=format:%FT%T%z", "-n", "1"
standardOutput = stdOut
}
return stdOut.toString().trim()
}
def getCommitHash = { ->
def hashStdOut = new ByteArrayOutputStream()
exec {
commandLine "git", "log", "--pretty=format:%H", "-n", "1"
standardOutput = hashStdOut
}
return hashStdOut.toString().trim()
}
def isModified = { ->
def statusOut = new ByteArrayOutputStream()
def branchesOut = new ByteArrayOutputStream()
exec {
commandLine "bash", "-c", "repo --color=never --no-pager status . | tail -n +2 | wc -l"
standardOutput = statusOut
}
exec {
commandLine "bash", "-c", "repo --color=never --no-pager branches | wc -l"
standardOutput = branchesOut
}
def statusVal = statusOut.toString().trim()
def branchesVal = branchesOut.toString().trim()
return statusVal != "0" || branchesVal != "0"
}
def getBranchName = { ->
def stdout = new ByteArrayOutputStream()
exec {
commandLine "bash", "-c", "repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print \$NF}'"
standardOutput = stdout
}
def val = stdout.toString().trim()
if ( isModified() != "0" ) {
val += '[modified]'
}
return val
}
task buildSwitchqImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-maas-switchq', './switchq'
}
task tagSwitchqImage(type: Exec) {
dependsOn buildSwitchqImage
commandLine "docker", 'tag', 'cord-maas-switchq', "$targetReg/cord-maas-switchq:$targetTag"
}
task publishSwitchqImage(type: Exec) {
dependsOn tagSwitchqImage
commandLine "docker", 'push', "$targetReg/cord-maas-switchq:$targetTag"
}
// IP Allocator Image
task buildAllocationImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-ip-allocator', './ip-allocator'
}
task tagAllocationImage(type: Exec) {
dependsOn buildAllocationImage
commandLine "docker", 'tag', 'cord-ip-allocator', "$targetReg/cord-ip-allocator:$targetTag"
}
task publishAllocationImage(type: Exec) {
dependsOn tagAllocationImage
commandLine "docker", 'push', "$targetReg/cord-ip-allocator:$targetTag"
}
// Provisioner Image
task buildProvisionerImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-provisioner', './provisioner'
}
task tagProvisionerImage(type: Exec) {
dependsOn buildProvisionerImage
commandLine "docker", 'tag', 'cord-provisioner', "$targetReg/cord-provisioner:$targetTag"
}
task publishProvisionerImage(type: Exec) {
dependsOn tagProvisionerImage
commandLine "docker", 'push', "$targetReg/cord-provisioner:$targetTag"
}
// Config Generator Image
task buildConfigGeneratorImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'config-generator', './config-generator'
}
task tagConfigGeneratorImage(type: Exec) {
dependsOn buildConfigGeneratorImage
commandLine "docker", 'tag', 'config-generator', "$targetReg/config-generator:$targetTag"
}
task publishConfigGeneratorImage(type: Exec) {
dependsOn tagConfigGeneratorImage
commandLine "docker", 'push', "$targetReg/config-generator:$targetTag"
}
// Automation Image
task buildAutomationImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', "cord-maas-automation", "-f", "./automation/Dockerfile", "./automation"
}
task tagAutomationImage(type: Exec) {
dependsOn buildAutomationImage
commandLine "docker", 'tag', 'cord-maas-automation', "$targetReg/cord-maas-automation:$targetTag"
}
task publishAutomationImage(type: Exec) {
dependsOn tagAutomationImage
commandLine "docker", 'push', "$targetReg/cord-maas-automation:$targetTag"
}
// DHCP Harvester Images
task buildHarvesterImage(type: Exec) {
commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', "cord-dhcp-harvester", "./harvester"
}
task tagHarvesterImage(type: Exec) {
dependsOn buildHarvesterImage
commandLine "docker", 'tag', 'cord-dhcp-harvester', "$targetReg/cord-dhcp-harvester:$targetTag"
}
task publishHarvesterImage(type: Exec) {
dependsOn tagHarvesterImage
commandLine "docker", 'push', "$targetReg/cord-dhcp-harvester:$targetTag"
}
// ~~~~~~~~~~~~~~~~~~~ Global tasks ~~~~~~~~~~~~~~~~~~~~~~~
task updateDocker (type: Exec) {
commandLine 'sudo', 'utils/enable-remote-docker-registry', "$targetReg"
}
// To be used to fetch upstream binaries, clone repos, etc.
task fetch(type: Exec) {
// this is where we fetch upstream artifacts that we do not need internet for the build phase"
// Placeholdr example:
dependsOn fetchUpstreamImages
commandLine "docker", "pull", "golang:alpine"
commandLine "docker", "pull", "python:2.7-alpine"
}
// To be used to generate all needed binaries that need to be present on the target
// as docker images in the local docker runner.
task buildImages {
dependsOn buildHarvesterImage
dependsOn buildAutomationImage
dependsOn buildAllocationImage
dependsOn buildProvisionerImage
dependsOn buildConfigGeneratorImage
dependsOn buildSwitchqImage
}
task tagImages {
dependsOn tagHarvesterImage
dependsOn tagAutomationImage
dependsOn tagAllocationImage
dependsOn tagProvisionerImage
dependsOn tagConfigGeneratorImage
dependsOn tagSwitchqImage
}
task publish {
//FIXME: This works because the upstream project primes the nodes before running this.
comps.each { name, spec -> if (spec.type == 'image') { dependsOn "publish" + name } }
dependsOn publishHarvesterImage
dependsOn publishAutomationImage
dependsOn publishAllocationImage
dependsOn publishProvisionerImage
dependsOn publishConfigGeneratorImage
dependsOn publishSwitchqImage
}
// ~~~~~~~~~~~~~~~~~~~ Deployment / Test Tasks ~~~~~~~~~~~~~~~~~~~~~~~
List.metaClass.asParam = { prefix, sep ->
if (delegate.size() == 0) {
""
}
String result = "--" + prefix + "="
String p = ""
delegate.each {
result += p + "${it}"
p = sep
}
result
}
List.metaClass.p = { value, name ->
if (value != null && value != "") {
delegate << name + "=" + value
} else {
delegate
}
}
List.metaClass.p = { spec ->
if (spec != null && spec != "") {
delegate += spec
} else {
delegate
}
}
task prime (type: Exec) {
executable = "ansible-playbook"
args = ["-i", config.seedServer.ip + ',']
if ( config.seedServer.ansible_user != null && config.seedServer.ansible_user != "" ) {
args = args << "--user=$config.seedServer.ansible_user"
}
if ( config.debug ) {
args = args << "-vvvv"
}
def extraVars = []
if (config.seedServer) {
extraVars = extraVars.p(config.seedServer.extraVars)
.p(config.seedServer.ansible_ssh_pass, "ansible_ssh_pass")
.p(config.seedServer.ansible_become_pass, "ansible_become_pass")
.p(config.seedServer.fabric_ip, "fabric_ip")
.p(config.seedServer.fabric_range_low, "fabric_range_low")
.p(config.seedServer.fabric_range_high, "fabric_range_high")
.p(config.seedServer.management_ip, "management_ip")
.p(config.seedServer.management_range_low, "management_range_low")
.p(config.seedServer.management_range_high, "management_range_high")
.p(config.seedServer.management_gw, "management_gw")
.p(config.seedServer.management_bc, "management_bc")
.p(config.seedServer.management_network, "management_network")
.p(config.seedServer.management_iface, "management_iface")
.p(config.seedServer.external_ip, "external_ip")
.p(config.seedServer.external_gw, "external_gw")
.p(config.seedServer.external_bc, "external_bc")
.p(config.seedServer.external_network, "external_network")
.p(config.seedServer.external_iface, "external_iface")
.p(config.seedServer.fabric_iface, "fabric_iface")
.p(config.seedServer.domain, "domain")
.p(config.seedServer.virtualbox_support, "virtualbox_support")
.p(config.seedServer.power_helper_user, "power_helper_user")
.p(config.seedServer.power_helper_host, "power_helper_host")
.p(config.seedServer.port, "ansible_ssh_port")
}
if (config.passwords) {
extraVars = extraVars.p(config.passwords.compute_node, "password_compute_node")
.p(config.passwords.maas_admin, "password_maas_admin")
.p(config.passwords.maas_user, "password_maas_user")
}
if (config.otherServers) {
extraVars = extraVars.p(config.otherServers.location, "prov_location")
.p(config.otherServers.rolesPath, "prov_role_path")
.p(config.otherServers.role, "prov_role")
}
extraVars = extraVars.p("$targetReg", "deploy_docker_registry")
.p("$targetTag", "deploy_docker_tag")
// the password set on the compute node is skipped because this is being run against the
// head node and we don't want to change the head node password as this node was manualy
// set up.
def skipTags = [].p(config.seedServer.skipTags).p('set_compute_node_password')
args = args.p(skipTags.asParam("skip-tags", ",")).p(extraVars.asParam("extra-vars", " ")) << "prime-node.yml"
}
task deployBase(type: Exec) {
executable = "ansible-playbook"
args = ["-i", config.seedServer.ip + ',']
if ( config.seedServer.ansible_user != null && config.seedServer.ansible_user != "" ) {
args = args << "--user=$config.seedServer.ansible_user"
}
if ( config.debug ) {
args = args << "-vvvv"
}
def extraVars = []
if (config.seedServer) {
extraVars = extraVars.p(config.seedServer.extraVars)
.p(config.seedServer.ansible_ssh_pass, "ansible_ssh_pass")
.p(config.seedServer.ansible_become_pass, "ansible_become_pass")
.p(config.seedServer.fabric_ip, "fabric_ip")
.p(config.seedServer.fabric_range_low, "fabric_range_low")
.p(config.seedServer.fabric_range_high, "fabric_range_high")
.p(config.seedServer.management_ip, "management_ip")
.p(config.seedServer.management_range_low, "management_range_low")
.p(config.seedServer.management_range_high, "management_range_high")
.p(config.seedServer.management_gw, "management_gw")
.p(config.seedServer.management_network, "management_network")
.p(config.seedServer.management_iface, "management_iface")
.p(config.seedServer.external_ip, "external_ip")
.p(config.seedServer.external_gw, "external_gw")
.p(config.seedServer.external_network, "external_network")
.p(config.seedServer.external_iface, "external_iface")
.p(config.seedServer.fabric_iface, "fabric_iface")
.p(config.seedServer.domain, "domain")
.p(config.seedServer.virtualbox_support, "virtualbox_support")
.p(config.seedServer.power_helper_user, "power_helper_user")
.p(config.seedServer.power_helper_host, "power_helper_host")
.p(config.seedServer.port, "ansible_ssh_port")
}
if (config.passwords) {
extraVars = extraVars.p(config.passwords.compute_node, "password_compute_node")
.p(config.passwords.maas_admin, "password_maas_admin")
.p(config.passwords.maas_user, "password_maas_user")
}
if (config.otherServers) {
extraVars = extraVars.p(config.otherServers.location, "prov_location")
.p(config.otherServers.rolesPath, "prov_role_path")
.p(config.otherServers.role, "prov_role")
}
extraVars = extraVars.p("$targetReg", "deploy_docker_registry")
.p("$targetTag", "deploy_docker_tag")
// the password set on the compute node is skipped because this is being run against the
// head node and we don't want to change the head node password as this node was manualy
// set up.
def skipTags = [].p(config.seedServer.skipTags).p('set_compute_node_password')
args = args.p(skipTags.asParam("skip-tags", ",")).p(extraVars.asParam("extra-vars", " ")) << "head-node.yml"
}
prime.dependsOn {
updateDocker
}
tasks.addRule(new DockerFetchRule(project))
tasks.addRule(new DockerPublishRule(project, project(':maas').prime))
tasks.addRule(new DockerTagRule(project))