updated to add persistence to provisioning and roll that through the rest of the services
Change-Id: Ia0d5a49dc0d88dbe6780c76483fd2247ad631bdf
diff --git a/automation/Dockerfile.ansible b/automation/Dockerfile.ansible
deleted file mode 100644
index ac3c520..0000000
--- a/automation/Dockerfile.ansible
+++ /dev/null
@@ -1,44 +0,0 @@
-FROM ubuntu:14.04
-
-# Base image information borrowed by official golang wheezy Dockerfile
-RUN apt-get update && apt-get install -y --no-install-recommends \
- g++ \
- gcc \
- libc6-dev \
- make \
- curl \
- && rm -rf /var/lib/apt/lists/*
-
-ENV GOLANG_VERSION 1.6.2
-ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz
-ENV GOLANG_DOWNLOAD_SHA256 e40c36ae71756198478624ed1bb4ce17597b3c19d243f3f0899bb5740d56212a
-
-RUN curl -kfsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \
- && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \
- && tar -C /usr/local -xzf golang.tar.gz \
- && rm golang.tar.gz
-
-ENV GOPATH /go
-ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
-
-RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
-
-# CORD Automation Dockerfile
-WORKDIR $GOPATH
-
-RUN apt-get update && \
- apt-get install -y software-properties-common && \
- apt-add-repository ppa:ansible/ansible && \
- apt-get update -y -m && \
- apt-get install -y git ansible
-
-RUN go get github.com/tools/godep
-ADD . $GOPATH/src/gerrit.opencord.org/maas/cord-maas-automation
-
-WORKDIR $GOPATH/src/gerrit.opencord.org/maas/cord-maas-automation
-RUN $GOPATH/bin/godep restore
-
-WORKDIR $GOPATH
-RUN go install gerrit.opencord.org/maas/cord-maas-automation
-
-ENTRYPOINT ["$GOPATH/bin/cord-maas-automation"]
diff --git a/automation/Godeps/Godeps.json b/automation/Godeps/Godeps.json
index a267c3c..88da63d 100644
--- a/automation/Godeps/Godeps.json
+++ b/automation/Godeps/Godeps.json
@@ -7,11 +7,6 @@
],
"Deps": [
{
- "ImportPath": "github.com/fzzy/radix/redis",
- "Comment": "v0.5.6-2-g3528e87",
- "Rev": "3528e87a910c6730810cec1be187f989d7d9442a"
- },
- {
"ImportPath": "github.com/juju/gomaasapi",
"Rev": "e173bc8d8d3304ff11b0ded5f6d4eea0cb560a40"
},
diff --git a/automation/maas-flow.go b/automation/maas-flow.go
index 84337aa..be9459f 100644
--- a/automation/maas-flow.go
+++ b/automation/maas-flow.go
@@ -34,7 +34,7 @@
PowerHelperHost string `default:"127.0.0.1" envconfig:"POWER_HELPER_HOST"`
PowerHelperScript string `default:"" envconfig:"POWER_HELPER_SCRIPT"`
ProvisionUrl string `default:"" envconfig:"PROVISION_URL"`
- ProvisionTtl string `default:"30m" envconfig:"PROVISION_TTL"`
+ ProvisionTtl string `default:"1h" envconfig:"PROVISION_TTL"`
}
var apiKey = flag.String("apikey", "", "key with which to access MAAS server")
@@ -104,7 +104,7 @@
Preview: *preview,
Verbose: *verbose,
AlwaysRename: *always,
- ProvTracker: NewTracker(),
+ Provisioner: NewProvisioner(&ProvisionerConfig{Url: config.ProvisionUrl}),
ProvisionURL: config.ProvisionUrl,
PowerHelper: config.PowerHelperScript,
PowerHelperUser: config.PowerHelperUser,
diff --git a/automation/provisioner_api.go b/automation/provisioner_api.go
new file mode 100644
index 0000000..899cde5
--- /dev/null
+++ b/automation/provisioner_api.go
@@ -0,0 +1,121 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+type ProvisionStatus int
+
+const (
+ Pending ProvisionStatus = iota
+ Running
+ Complete
+ Failed
+)
+
+func (s ProvisionStatus) String() string {
+ switch s {
+ case Pending:
+ return "PENDING"
+ case Running:
+ return "RUNNING"
+ case Complete:
+ return "COMPLETE"
+ case Failed:
+ return "FAILED"
+ }
+ return "INVALID TASK STATUS"
+}
+
+type ProvisionRecord struct {
+ Status ProvisionStatus `json:"status"`
+ Timestamp int64
+}
+
+type ProvisionRequest struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Ip string `json:"ip"`
+ Mac string `json:"mac"`
+}
+
+type Provisioner interface {
+ Get(id string) (*ProvisionRecord, error)
+ Provision(prov *ProvisionRequest) error
+ Clear(id string) error
+}
+
+type ProvisionerConfig struct {
+ Url string
+}
+
+func NewProvisioner(config *ProvisionerConfig) Provisioner {
+ return config
+}
+
+func (p *ProvisionerConfig) Get(id string) (*ProvisionRecord, error) {
+ resp, err := http.Get(p.Url + "/" + id)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ decoder := json.NewDecoder(resp.Body)
+
+ var record ProvisionRecord
+
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusAccepted:
+ err = decoder.Decode(&record)
+ if err != nil {
+ return nil, err
+ }
+ return &record, nil
+ case http.StatusNotFound:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf(resp.Status)
+ }
+}
+
+func (p *ProvisionerConfig) Provision(prov *ProvisionRequest) error {
+ hc := http.Client{}
+ data, err := json.Marshal(prov)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", p.Url, bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ req.Header.Add("Content-Type", "application/json")
+ resp, err := hc.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusAccepted {
+ return fmt.Errorf("Unexpected response : %s", resp.Status)
+ }
+ return nil
+}
+
+func (p *ProvisionerConfig) Clear(id string) error {
+ hc := http.Client{}
+ req, err := http.NewRequest("DELETE", p.Url+"/"+id, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := hc.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("Unexpected response : %s", resp.Status)
+ }
+ return nil
+}
diff --git a/automation/state.go b/automation/state.go
index 5890b31..7fe1760 100644
--- a/automation/state.go
+++ b/automation/state.go
@@ -1,11 +1,9 @@
package main
import (
- "bytes"
"encoding/json"
"fmt"
"log"
- "net/http"
"net/url"
"os/exec"
"regexp"
@@ -49,7 +47,7 @@
Verbose bool
Preview bool
AlwaysRename bool
- ProvTracker Tracker
+ Provisioner Provisioner
ProvisionURL string
ProvisionTTL time.Duration
PowerHelper string
@@ -153,7 +151,7 @@
updateNodeName(client, node, options)
}
- options.ProvTracker.Clear(node.ID())
+ options.Provisioner.Clear(node.ID())
return nil
}
@@ -168,186 +166,52 @@
updateNodeName(client, node, options)
}
- record, err := options.ProvTracker.Get(node.ID())
+ record, err := options.Provisioner.Get(node.ID())
if err != nil {
log.Printf("[warn] unable to retrieve provisioning state of node '%s' : %s", node.Hostname(), err)
- } else if record.State == Unprovisioned || record.State == ProvisionError {
+ } else if record == nil || record.Status == Failed {
+ var label string
+ if record == nil {
+ label = "NotFound"
+ } else {
+ label = record.Status.String()
+ }
if options.Verbose {
- log.Printf("[info] Current state of node '%s' is '%s'", node.Hostname(), record.State.String())
+ log.Printf("[info] Current state of node '%s' is '%s'", node.Hostname(), label)
}
- var err error = nil
- var callout *url.URL
- log.Printf("PROVISION '%s'", node.Hostname())
- if len(options.ProvisionURL) > 0 {
- if options.Verbose {
- log.Printf("[info] Provisioning callout to '%s'", options.ProvisionURL)
- }
- callout, err = url.Parse(options.ProvisionURL)
- if err != nil {
- log.Printf("[error] Failed to parse provisioning URL '%s' : %s", options.ProvisionURL, err)
- } else {
- ips := node.IPs()
- ip := ""
- if len(ips) > 0 {
- ip = ips[0]
- }
- macs := node.MACs()
- mac := ""
- if len(macs) > 0 {
- mac = macs[0]
- }
- switch callout.Scheme {
- // If the scheme is a file, then we will execute the refereced file
- case "", "file":
- if options.Verbose {
- log.Printf("[info] executing local script file '%s'", callout.Path)
- }
- record.State = Provisioning
- record.Timestamp = time.Now().Unix()
- options.ProvTracker.Set(node.ID(), record)
- err = exec.Command(callout.Path, node.ID(), node.Hostname(), ip, mac).Run()
- if err != nil {
- log.Printf("[error] Failed to execute '%s' : %s", options.ProvisionURL, err)
- } else {
- if options.Verbose {
- log.Printf("[info] Marking node '%s' with ID '%s' as provisioned",
- node.Hostname(), node.ID())
- }
- record.State = Provisioned
- options.ProvTracker.Set(node.ID(), record)
- }
-
- default:
- if options.Verbose {
- log.Printf("[info] POSTing to '%s'", options.ProvisionURL)
- }
- data := map[string]string{
- "id": node.ID(),
- "name": node.Hostname(),
- "ip": ip,
- "mac": mac,
- }
- hc := http.Client{}
- var b []byte
- b, err = json.Marshal(data)
- if err != nil {
- log.Printf("[error] Unable to marshal node data : %s", err)
- } else {
- var req *http.Request
- var resp *http.Response
- if options.Verbose {
- log.Printf("[debug] POSTing data '%s'", string(b))
- }
- req, err = http.NewRequest("POST", options.ProvisionURL, bytes.NewReader(b))
- if err != nil {
- log.Printf("[error] Unable to construct POST request to provisioner : %s",
- err)
- } else {
- req.Header.Add("Content-Type", "application/json")
- resp, err = hc.Do(req)
- if err != nil {
- log.Printf("[error] Unable to process POST request : %s",
- err)
- } else {
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusAccepted {
- record.State = Provisioning
- } else {
- record.State = ProvisionError
- }
- record.Timestamp = time.Now().Unix()
- options.ProvTracker.Set(node.ID(), record)
- }
- }
- }
- }
- }
+ ips := node.IPs()
+ ip := ""
+ if len(ips) > 0 {
+ ip = ips[0]
}
+ macs := node.MACs()
+ mac := ""
+ if len(macs) > 0 {
+ mac = macs[0]
+ }
+ if options.Verbose {
+ log.Printf("[info] POSTing '%s' (%s) to '%s'", node.Hostname(),
+ node.ID(), options.ProvisionURL)
+ }
+ err = options.Provisioner.Provision(&ProvisionRequest{
+ Id: node.ID(),
+ Name: node.Hostname(),
+ Ip: ip,
+ Mac: mac,
+ })
if err != nil {
- if options.Verbose {
- log.Printf("[warn] Not marking node '%s' with ID '%s' as provisioned, because of error '%s'",
- node.Hostname(), node.ID(), err)
- record.State = ProvisionError
- options.ProvTracker.Set(node.ID(), record)
- }
+ log.Printf("[error] unable to provision '%s' (%s) : %s", node.ID, node.Hostname(), err)
}
- } else if record.State == Provisioning && time.Since(time.Unix(record.Timestamp, 0)) > options.ProvisionTTL {
+
+ } else if options.ProvisionTTL > 0 &&
+ record.Status == Running && time.Since(time.Unix(record.Timestamp, 0)) > options.ProvisionTTL {
log.Printf("[error] Provisioning of node '%s' has passed provisioning TTL of '%v'",
node.Hostname(), options.ProvisionTTL)
- record.State = ProvisionError
- options.ProvTracker.Set(node.ID(), record)
- } else if record.State == Provisioning {
- callout, err := url.Parse(options.ProvisionURL)
- if err != nil {
- log.Printf("[error] Unable to parse provisioning URL '%s' : %s", options.ProvisionURL, err)
- } else if callout.Scheme != "file" {
- var req *http.Request
- var resp *http.Response
- if options.Verbose {
- log.Printf("[info] Fetching provisioning state for node '%s'", node.Hostname())
- }
- req, err = http.NewRequest("GET", options.ProvisionURL+"/"+node.ID(), nil)
- if err != nil {
- log.Printf("[error] Unable to construct GET request to provisioner : %s", err)
- } else {
- hc := http.Client{}
- resp, err = hc.Do(req)
- if err != nil {
- log.Printf("[error] Failed to quest provision state for node '%s' : %s",
- node.Hostname(), err)
- } else {
- defer resp.Body.Close()
- if options.Verbose {
- log.Printf("[debug] Got status '%s' for node '%s'", resp.Status, node.Hostname())
- }
- switch resp.StatusCode {
- case http.StatusOK: // provisioning completed or failed
- decoder := json.NewDecoder(resp.Body)
- var raw interface{}
- err = decoder.Decode(&raw)
- if err != nil {
- log.Printf("[error] Unable to unmarshal response from provisioner for '%s': %s",
- node.Hostname(), err)
- }
- status := raw.(map[string]interface{})
- switch int(status["status"].(float64)) {
- case 0, 1: // PENDING, RUNNING ... should never really get here
- // noop, already in this state
- case 2: // COMPLETE
- if options.Verbose {
- log.Printf("[info] Marking node '%s' with ID '%s' as provisioned",
- node.Hostname(), node.ID())
- }
- record.State = Provisioned
- options.ProvTracker.Set(node.ID(), record)
- case 3: // FAILED
- if options.Verbose {
- log.Printf("[info] Marking node '%s' with ID '%s' as failed provisioning",
- node.Hostname(), node.ID())
- }
- record.State = ProvisionError
- options.ProvTracker.Set(node.ID(), record)
- default:
- log.Printf("[error] unknown status state for node '%s' : %d",
- node.Hostname(), int(status["status"].(float64)))
- }
- case http.StatusAccepted: // in the provisioning state
- // Noop, presumably alread in this state
- case http.StatusNotFound:
- // Noop, but not an error
- default: // Consider anything else an erorr
- log.Printf("[warn] Node '%s' with ID '%s' failed provisioning, will retry",
- node.Hostname(), node.ID())
- record.State = ProvisionError
- options.ProvTracker.Set(node.ID(), record)
- }
- }
- }
- }
+ options.Provisioner.Clear(node.ID())
} else if options.Verbose {
log.Printf("[info] Not invoking provisioning for '%s', current state is '%s'", node.Hostname(),
- record.State.String())
+ record.Status.String())
}
return nil
diff --git a/automation/tracker.go b/automation/tracker.go
deleted file mode 100644
index e299a68..0000000
--- a/automation/tracker.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "github.com/fzzy/radix/redis"
- consul "github.com/hashicorp/consul/api"
- "log"
- "net/url"
- "os"
- "strings"
-)
-
-type ProvisionState int8
-
-const (
- Unprovisioned ProvisionState = iota
- ProvisionError
- Provisioning
- Provisioned
-)
-
-func (s *ProvisionState) String() string {
- switch *s {
- case Unprovisioned:
- return "UNPROVISIONED"
- case ProvisionError:
- return "PROVISIONERROR"
- case Provisioning:
- return "PROVISIONING"
- case Provisioned:
- return "PROVISIONED"
- default:
- return "UNKNOWN"
- }
-}
-
-// TrackerRecord state kept for each node to be provisioned
-type TrackerRecord struct {
- State ProvisionState
-
- // Timeestamp maintains the time the node started provisioning, eventually will be used to time out
- // provisinion states
- Timestamp int64
-}
-
-// Tracker used to track if a node has been post deployed provisioned
-type Tracker interface {
- Get(key string) (*TrackerRecord, error)
- Set(key string, record *TrackerRecord) error
- Clear(key string) error
-}
-
-type ConsulTracker struct {
- client *consul.Client
- kv *consul.KV
-}
-
-func (c *ConsulTracker) Get(key string) (*TrackerRecord, error) {
- pair, _, err := c.kv.Get(key, nil)
- if err != nil {
- return nil, err
- }
-
- if pair == nil {
- var record TrackerRecord
- record.State = Unprovisioned
- return &record, nil
- }
-
- var record TrackerRecord
- err = json.Unmarshal([]byte(pair.Value), &record)
- if err != nil {
- return nil, err
- }
- return &record, nil
-}
-
-func (c *ConsulTracker) Set(key string, record *TrackerRecord) error {
- data, err := json.Marshal(record)
- if err != nil {
- return err
- }
- pair := &consul.KVPair{Key: key, Value: data}
- _, err = c.kv.Put(pair, nil)
- return err
-}
-
-func (c *ConsulTracker) Clear(key string) error {
- _, err := c.kv.Delete(key, nil)
- return err
-}
-
-// RedisTracker redis implementation of the tracker interface
-type RedisTracker struct {
- client *redis.Client
-}
-
-func (t *RedisTracker) Get(key string) (*TrackerRecord, error) {
- reply := t.client.Cmd("get", key)
- if reply.Err != nil {
- return nil, reply.Err
- }
- if reply.Type == redis.NilReply {
- var record TrackerRecord
- record.State = Unprovisioned
- return &record, nil
- }
-
- value, err := reply.Str()
- if err != nil {
- return nil, err
- }
- var record TrackerRecord
- err = json.Unmarshal([]byte(value), &record)
- if err != nil {
- return nil, err
- }
- return &record, nil
-}
-
-func (t *RedisTracker) Set(key string, record *TrackerRecord) error {
- data, err := json.Marshal(record)
- if err != nil {
- return err
- }
- reply := t.client.Cmd("set", key, data)
- return reply.Err
-}
-
-func (t *RedisTracker) Clear(key string) error {
- reply := t.client.Cmd("del", key)
- return reply.Err
-}
-
-// MemoryTracker in memory implementation of the tracker interface
-type MemoryTracker struct {
- data map[string]TrackerRecord
-}
-
-func (m *MemoryTracker) Get(key string) (*TrackerRecord, error) {
- if value, ok := m.data[key]; ok {
- return &value, nil
- }
- var record TrackerRecord
- record.State = Unprovisioned
- return &record, nil
-}
-
-func (m *MemoryTracker) Set(key string, record *TrackerRecord) error {
- m.data[key] = *record
- return nil
-}
-
-func (m *MemoryTracker) Clear(key string) error {
- delete(m.data, key)
- return nil
-}
-
-// NetTracker constructs an implemetation of the Tracker interface. Which implementation selected
-// depends on the environment. If a link to a redis instance is defined then this will
-// be used, else an in memory version will be used.
-func NewTracker() Tracker {
- driver := os.Getenv("AUTODB_DRIVER")
- if driver == "" {
- log.Printf("[info] No driver specified, defaulting to in memeory persistence driver")
- driver = "MEMORY"
- }
-
- switch strings.ToUpper(driver) {
- case "REDIS":
- tracker := new(RedisTracker)
- if spec := os.Getenv("AUTODB_PORT"); spec != "" {
- port, err := url.Parse(spec)
- checkError(err, "[error] unable to lookup to redis database : %s", err)
- tracker.client, err = redis.Dial(port.Scheme, port.Host)
- checkError(err, "[error] unable to connect to redis database : '%s' : %s", port, err)
- log.Println("[info] Using REDIS to track provisioning status of nodes")
- return tracker
- }
- log.Fatalf("[error] No connection specified to REDIS server")
- case "CONSUL":
- var err error
- config := consul.Config{
- Address: "autodb:8500",
- Scheme: "http",
- }
- tracker := new(ConsulTracker)
- tracker.client, err = consul.NewClient(&config)
- checkError(err, "[error] unable to connect to redis server : 'autodb:8500' : %s", err)
- log.Println("[info] Using Consul to track provisioning status of nodes")
- tracker.kv = tracker.client.KV()
- return tracker
- case "MEMORY":
- tracker := new(MemoryTracker)
- tracker.data = make(map[string]TrackerRecord)
- log.Println("[info] Using memory based structures to track provisioning status of nodes")
- return tracker
- default:
- log.Fatalf("[error] Unknown persistance driver specified, '%s'", driver)
- }
- return nil
-}
diff --git a/provisioner/consul_storage.go b/provisioner/consul_storage.go
index 7592ac0..5701c72 100644
--- a/provisioner/consul_storage.go
+++ b/provisioner/consul_storage.go
@@ -51,6 +51,11 @@
return err
}
+func (s *ConsulStorage) Delete(id string) error {
+ _, err := s.kv.Delete(PREFIX+id, nil)
+ return err
+}
+
func (s *ConsulStorage) Get(id string) (*StatusMsg, error) {
pair, _, err := s.kv.Get(PREFIX+id, nil)
if err != nil {
diff --git a/provisioner/dispatcher.go b/provisioner/dispatcher.go
index fed57a4..d448e19 100644
--- a/provisioner/dispatcher.go
+++ b/provisioner/dispatcher.go
@@ -3,6 +3,7 @@
import (
"log"
"os/exec"
+ "time"
)
type WorkRequest struct {
@@ -20,10 +21,11 @@
}
type StatusMsg struct {
- Request *WorkRequest `json:"request"`
- Worker int `json:"worker"`
- Status TaskStatus `json:"status"`
- Message string `json:"message"`
+ Request *WorkRequest `json:"request"`
+ Worker int `json:"worker"`
+ Status TaskStatus `json:"status"`
+ Message string `json:"message"`
+ Timestamp int64 `json:"timestamp"`
}
func NewWorker(id int, workerQueue chan chan WorkRequest, statusChan chan StatusMsg) Worker {
@@ -48,16 +50,18 @@
select {
case work := <-w.Work:
// Receive a work request.
- w.StatusChan <- StatusMsg{&work, w.ID, Running, ""}
+ w.StatusChan <- StatusMsg{&work, w.ID, Running, "", time.Now().Unix()}
log.Printf("[debug] RUN: %s %s %s %s %s %s",
work.Script, work.Info.Id, work.Info.Name,
work.Info.Ip, work.Info.Mac, work.Role)
err := exec.Command(work.Script, work.Info.Id, work.Info.Name,
work.Info.Ip, work.Info.Mac, work.Role).Run()
if err != nil {
- w.StatusChan <- StatusMsg{&work, w.ID, Failed, err.Error()}
+ w.StatusChan <- StatusMsg{&work, w.ID, Failed, err.Error(),
+ time.Now().Unix()}
} else {
- w.StatusChan <- StatusMsg{&work, w.ID, Complete, ""}
+ w.StatusChan <- StatusMsg{&work, w.ID, Complete, "",
+ time.Now().Unix()}
}
case <-w.QuitChan:
// We have been asked to stop.
@@ -118,8 +122,8 @@
select {
case work := <-d.WorkQueue:
log.Println("[debug] Received work requeust")
+ d.StatusChan <- StatusMsg{&work, -1, Pending, "", time.Now().Unix()}
go func() {
- d.StatusChan <- StatusMsg{&work, -1, Pending, ""}
worker := <-d.WorkerQueue
log.Println("[debug] Dispatching work request")
diff --git a/provisioner/handlers.go b/provisioner/handlers.go
index 5bc6e57..7fd53fa 100644
--- a/provisioner/handlers.go
+++ b/provisioner/handlers.go
@@ -102,6 +102,24 @@
w.Write(bytes)
}
+func (c *Context) DeleteStatusHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ id, ok := vars["nodeid"]
+ if !ok || strings.TrimSpace(id) == "" {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ err := c.storage.Delete(id)
+ if err != nil {
+ log.Printf("[warn] Error while deleting status fo '%s' from storage : %s", id, err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
+
func (c *Context) QueryStatusHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, ok := vars["nodeid"]
diff --git a/provisioner/provisioner.go b/provisioner/provisioner.go
index 559e23c..41240b5 100644
--- a/provisioner/provisioner.go
+++ b/provisioner/provisioner.go
@@ -52,6 +52,7 @@
router.HandleFunc("/provision/", context.ProvisionRequestHandler).Methods("POST")
router.HandleFunc("/provision/", context.ListRequestsHandler).Methods("GET")
router.HandleFunc("/provision/{nodeid}", context.QueryStatusHandler).Methods("GET")
+ router.HandleFunc("/provision/{nodeid}", context.DeleteStatusHandler).Methods("DELETE")
http.Handle("/", router)
// Start the dispatcher and workers
diff --git a/provisioner/storage.go b/provisioner/storage.go
index b12b7ef..2d2fb6d 100644
--- a/provisioner/storage.go
+++ b/provisioner/storage.go
@@ -9,6 +9,7 @@
type Storage interface {
Put(id string, update StatusMsg) error
Get(id string) (*StatusMsg, error)
+ Delete(id string) error
List() ([]StatusMsg, error)
}
@@ -51,6 +52,11 @@
return &m, nil
}
+func (s *MemoryStorage) Delete(id string) error {
+ delete(s.Data, id)
+ return nil
+}
+
func (s *MemoryStorage) List() ([]StatusMsg, error) {
r := make([]StatusMsg, len(s.Data))
i := 0
diff --git a/provisioner/task.go b/provisioner/task.go
index 4017268..ca0d430 100644
--- a/provisioner/task.go
+++ b/provisioner/task.go
@@ -22,17 +22,3 @@
}
return "INVALID TASK STATUS"
}
-
-type Task struct {
- nodeId string
- status TaskStatus
-}
-
-type TaskQueueEntry struct {
- previous *TaskQueueEntry
- next *TaskQueueEntry
- task *Task
-}
-
-type TaskQueue struct {
-}
diff --git a/roles/maas/tasks/main.yml b/roles/maas/tasks/main.yml
index 0a90da8..aabaf1a 100644
--- a/roles/maas/tasks/main.yml
+++ b/roles/maas/tasks/main.yml
@@ -76,6 +76,15 @@
mode: 0755
state: directory
+- name: MAAS Automation Storage
+ become: yes
+ file:
+ path: /etc/maas/automation/storage
+ owner: maas
+ group: maas
+ mode: 0777
+ state: directory
+
- name: Host Name Mapping File
become: yes
copy:
diff --git a/roles/maas/templates/automation-compose.yml.j2 b/roles/maas/templates/automation-compose.yml.j2
index 40d7f98..a7e9f49 100644
--- a/roles/maas/templates/automation-compose.yml.j2
+++ b/roles/maas/templates/automation-compose.yml.j2
@@ -1,79 +1,95 @@
-allocator:
- image: "docker-registry:5000/cord-ip-allocator:{{ docker.image_version }}"
- container_name: allocator
- labels:
- - "lab.solution=CORD"
- - "lab.component=allocator"
- environment:
- # need to explicitly set the resolver, else go will skip the /etc/hosts file
- - "GODEBUG=netdns=go"
- - "ALLOCATE_PORT=4242"
- - "ALLOCATE_LISTEN=0.0.0.0"
- - "ALLOCATE_NETWORK={{ networks.fabric }}"
- - "ALLOCATE_SKIP=2"
- restart: unless-stopped
+version: '2'
-provisioner:
- image: "docker-registry:5000/cord-provisioner:{{ docker.image_version }}"
- container_name: provisioner
- labels:
- - "lab.solution=CORD"
- - "lab.component=provisioner"
- links:
- - allocator
- environment:
- # need to explicitly set the resolver, else go will skip the /etc/hosts file
- - "GODEBUG=netdns=go"
- - "INTERFACE_CONFIG=1"
- - "PROVISION_PORT=4243"
- - "PROVISION_LISTEN=0.0.0.0"
- - "PROVISION_DEFAULT_ROLE=compute-node"
- - "PROVISION_SCRIPT=/etc/maas/ansible/do-ansible"
- volumes:
- - "/etc/maas/ansible:/etc/maas/ansible"
- restart: unless-stopped
+services:
+ storage:
+ image: "docker-registry:5000/consul:{{ docker.image_version }}"
+ container_name: storage
+ labels:
+ - "lab.solution=CORD"
+ - "lab.component=storage"
+ - "lab.implementation=consul"
+ volumes:
+ - "/etc/maas/automation/storage:/consul/data"
+ network_mode: host
+ command: agent --server --bind {{ mgmt_ip_address.stdout }} --client {{ mgmt_ip_address.stdout }} --bootstrap-expect=1
-switchq:
- image: "docker-registry:5000/cord-maas-switchq:{{ docker.image_version }}"
- container_name: switchq
- labels:
- - "lab.solution=CORD"
- - "lab.component=switchq"
- links:
- - provisioner
- environment:
- - "SWITCHQ_SCRIPT=/etc/maas/ansible/do-switch"
- - "SWITCHQ_PROVISION_URL=http://provisioner:4243/provision/"
- - "SWITCHQ_PROVISION_TTL=0s"
- - "SWITCHQ_DEFAULT_ROLE=fabric-switch"
- - "SWITCHQ_ADDRESS_URL=file:///switchq/dhcp/dhcp_harvest.inc"
- volumes:
- - "/etc/bind/maas:/switchq/dhcp"
- restart: unless-stopped
+ allocator:
+ image: "docker-registry:5000/cord-ip-allocator:{{ docker.image_version }}"
+ container_name: allocator
+ labels:
+ - "lab.solution=CORD"
+ - "lab.component=allocator"
+ environment:
+ # need to explicitly set the resolver, else go will skip the /etc/hosts file
+ - "GODEBUG=netdns=go"
+ - "ALLOCATE_PORT=4242"
+ - "ALLOCATE_LISTEN=0.0.0.0"
+ - "ALLOCATE_NETWORK={{ networks.fabric }}"
+ - "ALLOCATE_SKIP=2"
+ restart: unless-stopped
-automation:
- image: "docker-registry:5000/cord-maas-automation:{{ docker.image_version }}"
- container_name: automation
- labels:
- - "lab.solution=CORD"
- - "lab.component=automation"
- links:
- - provisioner
- environment:
- # need to explicitly set the resolver, else go will skip the /etc/hosts file
- - "GODEBUG=netdns=go"
- - "AUTOMATION_PROVISION_URL=http://provisioner:4243/provision/"
- - "AUTOMATION_PROVISION_TTL=30m"
+ provisioner:
+ image: "docker-registry:5000/cord-provisioner:{{ docker.image_version }}"
+ container_name: provisioner
+ labels:
+ - "lab.solution=CORD"
+ - "lab.component=provisioner"
+ links:
+ - allocator
+ environment:
+ # need to explicitly set the resolver, else go will skip the /etc/hosts file
+ - "GODEBUG=netdns=go"
+ - "INTERFACE_CONFIG=1"
+ - "PROVISION_PORT=4243"
+ - "PROVISION_LISTEN=0.0.0.0"
+ - "PROVISION_DEFAULT_ROLE=compute-node"
+ - "PROVISION_SCRIPT=/etc/maas/ansible/do-ansible"
+ - "PROVISION_STORAGE_URL=consul://{{ mgmt_ip_address.stdout }}:8500"
+ volumes:
+ - "/etc/maas/ansible:/etc/maas/ansible"
+ restart: unless-stopped
+
+ switchq:
+ image: "docker-registry:5000/cord-maas-switchq:{{ docker.image_version }}"
+ container_name: switchq
+ labels:
+ - "lab.solution=CORD"
+ - "lab.component=switchq"
+ links:
+ - provisioner
+ environment:
+ - "SWITCHQ_SCRIPT=/etc/maas/ansible/do-switch"
+ - "SWITCHQ_PROVISION_URL=http://provisioner:4243/provision/"
+ - "SWITCHQ_PROVISION_TTL=0s"
+ - "SWITCHQ_DEFAULT_ROLE=fabric-switch"
+ - "SWITCHQ_ADDRESS_URL=file:///switchq/dhcp/dhcp_harvest.inc"
+ volumes:
+ - "/etc/bind/maas:/switchq/dhcp"
+ restart: unless-stopped
+
+ automation:
+ image: "docker-registry:5000/cord-maas-automation:{{ docker.image_version }}"
+ container_name: automation
+ labels:
+ - "lab.solution=CORD"
+ - "lab.component=automation"
+ links:
+ - provisioner
+ environment:
+ # need to explicitly set the resolver, else go will skip the /etc/hosts file
+ - "GODEBUG=netdns=go"
+ - "AUTOMATION_PROVISION_URL=http://provisioner:4243/provision/"
+ - "AUTOMATION_PROVISION_TTL=0s"
{% if virtualbox_support is defined and virtualbox_support == "1" %}
- - "AUTOMATION_POWER_HELPER_SCRIPT=/etc/maas/virtualbox/power_discovery"
- - "AUTOMATION_POWER_HELPER_USER={{ virtualbox.power_helper_user }}"
- - "AUTOMATION_POWER_HELPER_HOST={{ virtualbox_host }}"
+ - "AUTOMATION_POWER_HELPER_SCRIPT=/etc/maas/virtualbox/power_discovery"
+ - "AUTOMATION_POWER_HELPER_USER={{ virtualbox.power_helper_user }}"
+ - "AUTOMATION_POWER_HELPER_HOST={{ virtualbox_host }}"
{% endif %}
- volumes:
- - "/etc/maas:/mappings"
+ volumes:
+ - "/etc/maas:/mappings"
{% if virtualbox_support is defined and virtualbox_support == "1" %}
- - "/etc/maas/virtualbox:/etc/maas/virtualbox"
+ - "/etc/maas/virtualbox:/etc/maas/virtualbox"
{% endif %}
- command: [ "-apiVersion", "1.0", "-apikey", "{{ apikey.stdout }}", "-maas", "http://{{ mgmt_ip_address.stdout }}/MAAS", "-period", "30s", "-mappings", "@/mappings/mappings.json", "-always-rename" ]
- restart: unless-stopped
+ command: [ "-apiVersion", "1.0", "-apikey", "{{ apikey.stdout }}", "-maas", "http://{{ mgmt_ip_address.stdout }}/MAAS", "-period", "30s", "-mappings", "@/mappings/mappings.json", "-always-rename", "-verbose" ]
+ restart: unless-stopped
diff --git a/roles/maas/templates/harvest-compose.yml.j2 b/roles/maas/templates/harvest-compose.yml.j2
index 33ca119..07a816c 100644
--- a/roles/maas/templates/harvest-compose.yml.j2
+++ b/roles/maas/templates/harvest-compose.yml.j2
@@ -1,16 +1,19 @@
-harvester:
- image: "docker-registry:5000/cord-dhcp-harvester:{{ docker.image_version }}"
- container_name: harvester
- restart: always
- labels:
- - "lab.solution=cord"
- - "lab.component=harvester"
- volumes:
- - "/var/lib/maas/dhcp:/dhcp"
- - "/etc/bind/maas:/bind"
- - "/etc/bind/maas:/key"
- - "/etc/dhcp:/etc/dhcp"
- ports:
- - "8954:8954"
- command: [ "--server", "{{ mgmt_ip_address.stdout }}", "--port", "954", "--key", "/key/rndc.conf.maas", "--zone", "cord.lab", "--update", "--verify", "--timeout", "1s", "--repeat", "5m", "--quiet", "2s", "--workers", "10", "--filter", "^(?!cord)" ]
- restart: unless-stopped
+version: '2'
+
+services:
+ harvester:
+ image: "docker-registry:5000/cord-dhcp-harvester:{{ docker.image_version }}"
+ container_name: harvester
+ restart: always
+ labels:
+ - "lab.solution=cord"
+ - "lab.component=harvester"
+ volumes:
+ - "/var/lib/maas/dhcp:/dhcp"
+ - "/etc/bind/maas:/bind"
+ - "/etc/bind/maas:/key"
+ - "/etc/dhcp:/etc/dhcp"
+ ports:
+ - "8954:8954"
+ command: [ "--server", "{{ mgmt_ip_address.stdout }}", "--port", "954", "--key", "/key/rndc.conf.maas", "--zone", "cord.lab", "--update", "--verify", "--timeout", "1s", "--repeat", "5m", "--quiet", "2s", "--workers", "10", "--filter", "^(?!cord)" ]
+ restart: unless-stopped