blob: 283ae803284f26556eada37e6154b3b1e99b3205 [file] [log] [blame]
Matteo Scandolo37efb3d2017-08-09 16:36:09 -07001#!/usr/bin/env python
Matteo Scandolo60b640f2017-08-08 13:05:22 -07002
3# Copyright 2017-present Open Networking Foundation
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
Zack Williamsce63eb02017-02-28 10:46:22 -070018
19# imagebuilder.py
20# rebuilds/fetches docker container images per their git status in repo
21# in addition to docker, needs `sudo apt-get install python-git`
22
23import argparse
24import datetime
25import git
26import json
27import logging
28import os
29import re
30import string
31import sys
32import tarfile
33import tempfile
34import time
35import xml.etree.ElementTree as ET
36import yaml
37
38global args
39global conf
40global build_tag
41global buildable_images
42global pull_only_images
43
44
45def setup_logging(name=None, logfile=False):
46 global args
47
48 if name:
49 log = logging.getLogger("-".join([__name__, name]))
50 else:
51 log = logging.getLogger(__name__)
52
53 slh = logging.StreamHandler(sys.stdout)
54 slh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
55 slh.setLevel(logging.DEBUG)
56
57 log.addHandler(slh)
58
59 # secondary logging to a file, always DEBUG level
60 if logfile:
61 fn = os.path.join(conf.logdir, "%s.log" % name)
62 flh = logging.FileHandler(fn)
63 flh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
64 flh.setLevel(logging.DEBUG)
65 log.addHandler(flh)
66
67 return log
68
69LOG = setup_logging()
70
71
72def parse_args():
73 global args
74
75 parser = argparse.ArgumentParser()
76
77 parser.add_argument('-c', '--container_list', default='docker_images.yml',
78 type=argparse.FileType('r'),
79 help="YAML Config and master container list")
80
81 # -f is optional, so using type=argparse.FileType is problematic
82 parser.add_argument('-f', '--filter_images', default=None, action='store',
83 help="YAML file restricting images to build/fetch")
84
85 parser.add_argument('-a', '--actions_taken', default=None,
86 help="Save a YAML file with actions taken during run")
87
88 # FIXME - the -b and -p options are currently unimplemented
89 group = parser.add_mutually_exclusive_group()
90
91 group.add_argument('-b', '--build_force', action="store_true",
92 help="Build (don't fetch) all internal containers")
93
94 group.add_argument('-p', '--pull_force', action="store_true",
95 help="Only pull containers, fail if build required")
96
97 parser.add_argument('-d', '--dry_run', action="store_true",
98 help="Don't build/fetch anything")
99
100 parser.add_argument('-g', '--graph', default=None,
101 help="Filename for DOT graph file of image dependency")
102
103 parser.add_argument('-l', '--build_log_dir', action="store",
104 help="Log build output to this dir if set")
105
106 parser.add_argument('-r', '--repo_root', default="..", action="store",
107 help="Repo root directory")
108
109 parser.add_argument('-t', '--build_tag', default=None, action="store",
110 help="tag all images built/pulled using this tag")
111
112 parser.add_argument('-v', '--verbosity', action='count', default=1,
113 help="Repeat to increase log level")
114
115 args = parser.parse_args()
116
117 if args.verbosity > 1:
118 LOG.setLevel(logging.DEBUG)
119 else:
120 LOG.setLevel(logging.INFO)
121
122
123def load_config():
124 global args
125 global conf
126 global buildable_images
127 global pull_only_images
128 global build_tag
129
130 try:
131 cl_abs = os.path.abspath(args.container_list.name)
132 LOG.info("Master container list file: %s" % cl_abs)
133
134 conf = yaml.safe_load(args.container_list)
135 except yaml.YAMLError:
136 LOG.exception("Problem loading container list file")
137 sys.exit(1)
138
139 if args.build_tag:
140 build_tag = args.build_tag
141 else:
142 build_tag = conf['docker_build_tag']
143
144 if args.filter_images is None:
145 buildable_images = conf['buildable_images']
146 pull_only_images = conf['pull_only_images']
147 else:
148 fi_abs = os.path.abspath(args.filter_images)
149
150 LOG.info("Filtering image list per 'docker_image_whitelist' in: %s" %
151 fi_abs)
152 try:
153 fi_fh = open(fi_abs, 'r')
154 filter_list = yaml.safe_load(fi_fh)
155 fi_fh.close()
156
157 if 'docker_image_whitelist' not in filter_list:
158 LOG.error("No 'docker_image_whitelist' defined in: %s" %
159 fi_abs)
160 sys.exit(1)
161
162 # fail if filter list specifies tags
163 for f_i in filter_list['docker_image_whitelist']:
164 (name, tag) = split_name(f_i)
165 if tag:
166 LOG.error("filter list may not be tagged")
167 sys.exit(1)
168
169 buildable_images = [img for img in conf['buildable_images']
170 if split_name(img['name'])[0]
171 in filter_list['docker_image_whitelist']]
172
173 pull_only_images = [img for img in conf['pull_only_images']
174 if split_name(img)[0]
175 in filter_list['docker_image_whitelist']]
176
177 except:
178 LOG.exception("Problem with filter list file")
179 sys.exit(1)
180
181
182def split_name(input_name):
183 """ split a docker image name in the 'name:tag' format into components """
184
185 name = input_name
186 tag = None
187
188 # split name:tag if given in combined format
189 name_tag_split = string.split(input_name, ":")
190
191 if len(name_tag_split) > 1: # has tag, return separated version
192 name = name_tag_split[0]
193 tag = name_tag_split[1]
194
195 return (name, tag)
196
197
198class RepoRepo():
199 """ git repo managed by repo tool"""
200
201 manifest_branch = ""
202
203 def __init__(self, name, path, remote):
204
205 self.name = name
206 self.path = path
207 self.remote = remote
208 self.git_url = "%s%s" % (remote, name)
209
210 try:
211 self.git_repo_o = git.Repo(self.abspath())
212 LOG.debug("Repo - %s, path: %s" % (name, path))
213
214 self.head_commit = self.git_repo_o.head.commit.hexsha
215 LOG.debug(" head commit: %s" % self.head_commit)
216
217 commit_t = time.gmtime(self.git_repo_o.head.commit.committed_date)
218 self.head_commit_t = time.strftime("%Y-%m-%dT%H:%M:%SZ", commit_t)
219 LOG.debug(" commit date: %s" % self.head_commit_t)
220
221 self.clean = not self.git_repo_o.is_dirty(untracked_files=True)
222 LOG.debug(" clean: %s" % self.clean)
223
224 # list of untracked files (expensive operation)
225 self.untracked_files = self.git_repo_o.untracked_files
226 for u_file in self.untracked_files:
227 LOG.debug(" Untracked: %s" % u_file)
228
229 except Exception:
230 LOG.exception("Error with git repo: %s" % name)
231 sys.exit(1)
232
233 def abspath(self):
234 global args
235 return os.path.abspath(os.path.join(args.repo_root, self.path))
236
237 def path_clean(self, test_path, branch=""):
238 """ Is working tree on branch and no untracked files in path? """
239 global conf
240
241 if not branch:
242 branch = self.manifest_branch
243
244 LOG.debug(" Looking for changes in path: %s" % test_path)
245
246 p_clean = True
247
248 # diff between branch head and working tree (None)
249 branch_head = self.git_repo_o.commit(branch)
250 diff = branch_head.diff(None, paths=test_path)
251
252 if diff:
253 p_clean = False
254
255 for diff_obj in diff:
256 LOG.debug(" file not on branch: %s" % diff_obj)
257
258 # remove . to compare paths using .startswith()
259 if test_path == ".":
260 test_path = ""
261
262 for u_file in self.untracked_files:
263 if u_file.startswith(test_path):
264 LOG.debug(" untracked file in path: %s" % u_file)
265 p_clean = False
266
267 return p_clean
268
269
270class RepoManifest():
271 """ parses manifest XML file used by repo tool"""
272
273 def __init__(self):
274 global args
275 global conf
276
277 self.manifest_xml = {}
278 self.repos = {}
279 self.branch = ""
280
281 self.manifest_file = os.path.abspath(
282 os.path.join(args.repo_root,
283 ".repo/manifest.xml"))
284
285 LOG.info("Loading manifest file: %s" % self.manifest_file)
286
287 try:
288 tree = ET.parse(self.manifest_file)
289 self.manifest_xml = tree.getroot()
290 except Exception:
291 LOG.exception("Error loading repo manifest")
292 sys.exit(1)
293
294 # Find the default branch
295 default = self.manifest_xml.find('default')
296 self.branch = "%s/%s" % (default.attrib['remote'],
297 default.attrib['revision'])
298
299 # Find the remote URL for these repos
300 remote = self.manifest_xml.find('remote')
301 self.remote = remote.attrib['review']
302
303 LOG.info("Manifest is on branch '%s' with remote '%s'" %
304 (self.branch, self.remote))
305
306 project_repos = {}
307
308 for project in self.manifest_xml.iter('project'):
309 repo_name = project.attrib['name']
310 rel_path = project.attrib['path']
311 abs_path = os.path.abspath(os.path.join(args.repo_root,
312 project.attrib['path']))
313
314 if os.path.isdir(abs_path):
315 project_repos[repo_name] = rel_path
316 else:
317 LOG.debug("Repo in manifest but not checked out: %s" %
318 repo_name)
319
320 for repo_name, repo_path in project_repos.iteritems():
321 self.repos[repo_name] = RepoRepo(repo_name, repo_path, self.remote)
322 self.repos[repo_name].manifest_branch = self.branch
323
324 def get_repo(self, repo_name):
325 return self.repos[repo_name]
326
327# DockerImage Status Constants
328
329DI_UNKNOWN = 'unknown' # unknown status
330DI_EXISTS = 'exists' # already exists in docker, has an image_id
331
332DI_BUILD = 'build' # needs to be built
333DI_FETCH = 'fetch' # needs to be fetched (pulled)
334DI_ERROR = 'error' # build or other fatal failure
335
336
337class DockerImage():
338
339 def __init__(self, name, repo_name=None, repo_d=None, path=".",
340 context=".", dockerfile='Dockerfile', labels=None,
341 tags=None, image_id=None, components=None, status=DI_UNKNOWN):
342
343 LOG.debug("New DockerImage object from name: %s" % name)
344
345 # name to pull as, usually what is provided on creation.
346 # May be changed by create_tags
347 self.raw_name = name
348
349 # Python's mutable defaults is a landmine
350 if labels is None:
351 self.labels = {}
352 else:
353 self.labels = labels
354
355 self.repo_name = repo_name
356 self.repo_d = repo_d
357 self.path = path
358 self.context = context
359 self.dockerfile = dockerfile
360 self.tags = [] # tags are added to this later in __init__
361 self.image_id = image_id
362 self.components = components
363 self.status = status
364
365 self.parent_name = None # set by _find_parent_name()
366 self.parent = None # pointer to parent DockerImage object
367 self.children = [] # list of child DockerImage objects
368
369 # split name:tag if given in combined format
370 (image_name, image_tag) = split_name(name)
371 if image_tag: # has tag
372 self.name = image_name
373 self.tags.append(image_tag)
374 else: # no tag
375 self.name = image_name
376
377 # Add the build tag if exists
378 if build_tag not in self.tags:
379 self.tags.append(build_tag)
380
381 # split names from tag list
382 if tags is not None:
383 for tag in tags:
384 thistag = ""
385 (tag_name, tag_tag) = split_name(tag)
386 if tag_tag: # has name also, use just tag
387 thistag = tag_tag
388 else: # just a bare tag
389 thistag = tag_name
390
391 if thistag not in self.tags: # don't duplicate tags
392 self.tags.append(thistag)
393
394 # self.clean only applies to this container
395 self.clean = self._context_clean()
396 self._find_parent_name()
397
398 def __str__(self):
399 return self.name
400
401 def buildable(self):
402 """ Can this image be built from a Dockerfile? """
403 if self.repo_name: # has a git repo to be built from
404 return True
405 return False
406
407 def _context_clean(self):
408 """ Determine if this is repo and context is clean """
409
410 if self.buildable():
411
412 # check if on master branch
413 repo_clean = self.repo_d.clean
414
415 # only check the Docker context for cleanliness
416 context_path = os.path.normpath(
417 os.path.join(self.path, self.context))
418 context_clean = self.repo_d.path_clean(context_path)
419
420 # check of subcomponents are clean
421 components_clean = self.components_clean()
422
423 LOG.debug(" Build Context Cleanliness -")
424 LOG.debug(" repo: %s, context: %s, components: %s" %
425 (repo_clean, context_clean, components_clean))
426
427 if context_clean and repo_clean and components_clean:
428 return True
429 else:
430 return False
431
432 return True # unbuildable images are clean
433
434 def parent_clean(self):
435 """ if all parents are clean """
436
437 if self.buildable():
438 if self.clean and self.parent.parent_clean():
439 return True
440 else:
441 return False
442
443 return True # unbuildable images are clean
444
445 def compare_labels(self, other_labels):
446 """ Returns True if image label-schema.org labels match dict """
447
448 comparable_labels_re = [
449 r".*name$",
450 r".*vcs-url$",
451 r".*vcs-ref$",
452 ]
453
454 for clr in comparable_labels_re: # loop on all comparable labels
455 for label in self.labels: # loop on all labels
456 if re.match(clr, label) is not None: # if label matches re
457 # and label exists in other, and values are same
458 if label in other_labels and \
459 self.labels[label] == other_labels[label]:
460 pass # continue through loop
461 else:
462 LOG.info("Non-matching label: %s" % label)
463 return False # False when first difference found
464
465 return True # only when every label matches
466
467 def same_name(self, other_name):
468 """ compare image name (possibly with tag) against image name/tag """
469
470 (o_name, o_tag) = split_name(other_name)
471
472 if o_tag is None and self.name == o_name:
473 return True
474 elif self.name == o_name and o_tag in self.tags:
475 return True
476
477 return False
478
479 def components_clean(self):
480
481 if self.buildable() and self.components is not None:
482 for component in self.components:
483 if not component['repo_d'].clean or \
484 not component['repo_d'].path_clean(component['path']):
485 return False
486
487 return True
488
489 def component_labels(self):
490 """ returns a dict of labels for subcomponents """
491
492 if self.buildable() and self.components is not None:
493
494 comp_l = {}
495
496 for component in self.components:
497
498 LOG.debug(" component %s generating child labels" %
499 component['repo_name'])
500
501 prefix = "org.opencord.component.%s." % component['repo_name']
502
503 comp_l[prefix + "vcs-url"] = component['repo_d'].git_url
504
505 if component['repo_d'].clean and \
506 component['repo_d'].path_clean(component['path']):
507 clean = True
508 else:
509 clean = False
510
511 if clean:
512 comp_l[prefix + "version"] = self.repo_d.manifest_branch
513 comp_l[prefix + "vcs-ref"] = \
514 component['repo_d'].head_commit
515 else:
516 comp_l[prefix + "version"] = "dirty"
517 comp_l[prefix + "vcs-ref"] = ""
518
519 return comp_l
520
521 return None
522
523 def child_labels(self, repo_list=None):
524 """ return a dict of labels to apply to child images """
525
526 LOG.debug(" Parent image %s generating child labels" % self.name)
527
528 # only create labels when they haven't already been created
529 if repo_list is None:
530 repo_list = []
531
532 LOG.debug(" Parents already labeled with: %s" % ", ".join(repo_list))
533
534 cl = {}
535
536 if self.buildable() and self.repo_name not in repo_list:
537
538 LOG.debug(" Adding parent labels from repo: %s" % self.repo_name)
539
540 prefix = "org.opencord.component.%s." % self.repo_name
541
542 cl[prefix + "vcs-url"] = self.repo_d.git_url
543
544 if self.clean:
545 cl[prefix + "version"] = self.repo_d.manifest_branch
546 cl[prefix + "vcs-ref"] = self.repo_d.head_commit
547 else:
548 cl[prefix + "version"] = "dirty"
549 cl[prefix + "vcs-ref"] = ""
550
551 repo_list.append(self.repo_name)
552
553 # include component labels if present
554 if self.components is not None:
555 cl.update(self.component_labels())
556
557 # recursively find labels up the parent chain
558 if self.parent is not None:
559 cl.update(self.parent.child_labels(repo_list))
560
561 return cl
562
563 def create_labels(self):
564 """ Create label-schema.org labels for image """
565
566 if self.buildable():
567
568 LOG.debug("Creating labels for: %s" % self.name)
569
570 self.labels['org.label-schema.name'] = self.name
571 self.labels['org.label-schema.schema-version'] = "1.0"
572
573 # org.label-schema.build-date
574 time_now = datetime.datetime.utcnow()
575 build_date = time_now.strftime("%Y-%m-%dT%H:%M:%SZ")
576 self.labels['org.label-schema.build-date'] = build_date
577
578 # git version related labels
579 self.labels['org.label-schema.vcs-url'] = self.repo_d.git_url
580
581 if self.clean:
582 self.labels['org.label-schema.version'] = \
583 self.repo_d.manifest_branch
584 self.labels['org.label-schema.vcs-ref'] = \
585 self.repo_d.head_commit
586 self.labels['org.opencord.vcs-commit-date'] = \
587 self.repo_d.head_commit_t
588 else:
589 self.labels['org.label-schema.version'] = "dirty"
590 self.labels['org.label-schema.vcs-ref'] = ""
591
592 # include component labels if present
593 if self.components is not None:
594 self.labels.update(self.component_labels())
595
596 def create_tags(self):
597 """ Create docker tags as needed """
598
599 if self.buildable():
600 LOG.debug("Creating tags for image: %s" % self.name)
601
602 # if clean and parents clean, add tags for branch/commit
603 if self.parent_clean():
604 if build_tag not in self.tags:
605 self.tags.append(build_tag)
606
607 commit_tag = self.repo_d.head_commit
608 if commit_tag not in self.tags:
609 self.tags.append(commit_tag)
610
611 # pulling is done via raw_name, set tag to commit
612 self.raw_name = "%s:%s" % (self.name, commit_tag)
613
614 LOG.debug("All tags: %s" %
615 ", ".join(self.tags))
616
617 def _find_parent_name(self):
618 """ set self.parent_name using Dockerfile FROM line """
619
620 if self.buildable():
621 # read contents of Dockerfile into df
622 with open(self.dockerfile_abspath()) as dfh:
623 df = dfh.read()
624
625 # find FROM line to determine image parent
626 frompatt = re.compile(r'^FROM\s+(.*)$', re.MULTILINE)
627 fromline = re.search(frompatt, df)
628
629 self.parent_name = fromline.group(1) # may have tag
630
631 def dockerfile_abspath(self):
632 """ returns absolute path to Dockerfile for this image """
633
634 if self.buildable():
635 return os.path.join(self.repo_d.abspath(),
636 self.path, self.dockerfile)
637 else:
638 return None
639
640 def dockerfile_rel_path(self):
641 """ returns the path relative to the context of the Dockerfile """
642
643 if self.buildable():
644 if self.context is ".":
645 return self.dockerfile
646 else:
647 return os.path.normpath(os.path.join(self.path,
648 self.dockerfile))
649 else:
650 return None
651
652 def context_tarball(self):
653 """ returns a filehandle to a tarball (tempfile) for the image """
654
655 if self.buildable():
656
657 context_path = os.path.normpath(
658 os.path.join(self.repo_d.abspath(),
659 self.path, self.context))
660
661 LOG.info("Creating context tarball of path: %s" % context_path)
662
663 t_fh = tempfile.NamedTemporaryFile()
664 t = tarfile.open(mode='w', fileobj=t_fh, dereference=True)
665
666 # exclude files in this list
667 exclusion_list = ['.git']
668
Matteo Scandolobb6db7f2017-10-02 10:18:03 -0700669 docker_ignore = os.path.join(context_path, '.dockerignore')
670 if os.path.exists(docker_ignore):
671 for line in open(docker_ignore).readlines():
672 if line.strip()[0] is not '#':
673 exclusion_list.append(line.strip().rstrip('\/'))
674 LOG.info("Exclusion list: %s" % exclusion_list)
675
Zack Williamsce63eb02017-02-28 10:46:22 -0700676 # see docker-py source for context
677 for path in sorted(
678 DockerUtils.exclude_paths(context_path, exclusion_list)):
679 t.add(os.path.join(context_path, path),
680 arcname=path,
681 recursive=False)
682
683 # add sub-components to tarball if required
684 if self.components is not None:
685 for component in self.components:
686 c_ctx_p = os.path.normpath(
687 os.path.join(component['repo_d'].abspath(),
688 component['path']))
689
690 LOG.info("Adding component %s at context %s" %
691 (component['repo_name'], c_ctx_p))
692
693 # walk component source path
694 for path in sorted(
695 DockerUtils.exclude_paths(c_ctx_p, exclusion_list)):
696
697 # path to where to put files in the archive
698 cf_dest = os.path.normpath(
699 os.path.join(component['dest'], path))
700
701 t.add(os.path.join(c_ctx_p, path),
702 arcname=cf_dest,
703 recursive=False)
704
705 # t.list() # prints all files in tarball
706 t.close()
707 t_fh.seek(0)
708 return t_fh
709
710 else:
711 return None
712
713 def buildargs(self):
714 """ returns array of labels in docker buildargs compliant format """
715 ba_a = {}
716
717 for label_k in self.labels:
718 ba_re = re.compile(r'\W') # non alpha/num/_ chars
719 ba_label = ba_re.sub('_', label_k)
720 ba_a[ba_label] = self.labels[label_k]
721
722 return ba_a
723
724
725class DockerBuilder():
726
727 def __init__(self, repo_manifest):
728
729 global buildable_images
730 global pull_only_images
731
732 self.rm = repo_manifest
733 self.dc = None # Docker Client object
734
735 self.images = []
736
737 # arrays of images, used for write_actions
738 self.all = []
739 self.preexisting = []
740 self.obsolete = []
741 self.pulled = []
742 self.failed_pull = []
743 self.obsolete_pull = []
744 self.built = []
745 self.failed_build = []
746
747 # create dict of images, setting defaults
748 for image in buildable_images:
749
750 repo_d = self.rm.get_repo(image['repo'])
751
752 if "components" in image:
753 components = []
754
755 for component in image['components']:
756 comp = {}
757 comp['repo_name'] = component['repo']
758 comp['repo_d'] = self.rm.get_repo(component['repo'])
759 comp['dest'] = component['dest']
760 comp['path'] = component.get('path', '.')
761 components.append(comp)
762 else:
763 components = None
764
765 # set the full name in case this is pulled
766 full_name = "%s:%s" % (image['name'], build_tag)
767
768 img_o = DockerImage(full_name, image['repo'], repo_d,
769 image.get('path', '.'),
770 image.get('context', '.'),
771 image.get('dockerfile', 'Dockerfile'),
772 components=components)
773
774 self.images.append(img_o)
775
776 # add misc images
777 for misc_image in pull_only_images:
778 img_o = DockerImage(misc_image)
779 self.images.append(img_o)
780
781 if not args.dry_run:
782 self._docker_connect()
783
784 self.create_dependency()
785 self.find_preexisting()
786
787 if args.graph is not None:
788 self.dependency_graph(args.graph)
789
790 self.process_images()
791
792 if args.actions_taken is not None:
793 self.write_actions_file(args.actions_taken)
794
795 def _docker_connect(self):
796 """ Connect to docker daemon """
797
798 self.dc = DockerClient()
799
800 if self.dc.ping():
801 LOG.debug("Docker server is responding")
802 else:
803 LOG.error("Unable to ping docker server")
804 sys.exit(1)
805
806 def find_preexisting(self):
807 """ find images that already exist in Docker and mark """
808
809 if self.dc:
810 LOG.debug("Evaluating already built/fetched Docker images")
811
812 # get list of images from docker
813 pe_images = self.dc.images()
814
815 for pe_image in pe_images:
816 raw_tags = pe_image['RepoTags']
817
818 self.all.append({
819 'id': pe_image['Id'],
820 'tags': raw_tags,
821 })
822
823 # ignoring all <none>:<none> images, reasonable?
824 if raw_tags and "<none>:<none>" not in raw_tags:
825 LOG.debug(" Preexisting Image - ID: %s, tags: %s" %
826 (pe_image['Id'], ",".join(raw_tags)))
827
828 image = self.find_image(raw_tags[0])
829
830 if image is not None:
831 if image.compare_labels(pe_image['Labels']):
832 LOG.debug(" Image %s has up-to-date labels" %
833 pe_image['Id'])
834
835 self.preexisting.append({
836 'id': pe_image['Id'],
837 'tags': raw_tags,
Andy Bavierafaa5302017-08-15 08:56:15 -0700838 'base': image.name.split(":")[0],
Zack Williamsce63eb02017-02-28 10:46:22 -0700839 })
840
841 image.image_id = pe_image['Id']
842 image.status = DI_EXISTS
843
844 else:
845 LOG.debug(" Image %s has obsolete labels" %
846 pe_image['Id'])
847
848 self.obsolete.append({
849 'id': pe_image['Id'],
850 'tags': raw_tags,
851 })
852
853 def find_image(self, image_name):
854 """ return image object matching name """
855 LOG.debug("attempting to find image for: %s" % image_name)
856
857 for image in self.images:
858 if image.same_name(image_name):
859 return image
860 return None
861
862 def create_dependency(self):
863 """ set parent/child links for images """
864
865 # list of all parent image names, with dupes
866 parents_with_dupes = [img.parent_name for img in self.images
867 if img.parent_name is not None]
868
869 # remove duplicates
870 parents = list(set(parents_with_dupes))
871
872 LOG.info("All parent images: %s" % ", ".join(parents))
873
874 # list of "external parents", ones not built internally
875 external_parents = []
876
877 for parent_name in parents:
878 LOG.debug("Evaluating parent image: %s" % parent_name)
879 internal_parent = False
880
881 # match on p_name, without tag
882 (p_name, p_tag) = split_name(parent_name)
883
884 for image in self.images:
885 if image.same_name(p_name): # internal image is a parent
886 internal_parent = True
887 LOG.debug(" Internal parent: %s" % image.name)
888 break
889
890 if not internal_parent: # parent is external
891 LOG.debug(" External parent: %s" % parent_name)
892 external_parents.append(parent_name)
893
894 # add unique external parents to image list
895 for e_p_name in set(external_parents):
896 LOG.debug(" Creating external parent image object: %s" % e_p_name)
897 img_o = DockerImage(e_p_name)
898 self.images.append(img_o)
899
900 # now that all images (including parents) are in list, associate them
901 for image in filter(lambda img: img.parent_name is not None,
902 self.images):
903
904 LOG.debug("Associating image: %s" % image.name)
905
906 parent = self.find_image(image.parent_name)
907 image.parent = parent
908
909 if parent is not None:
910 LOG.debug(" internal image '%s' is parent of '%s'" %
911 (parent.name, image.name))
912 parent.children.append(image)
913
914 else:
915 LOG.debug(" external image '%s' is parent of '%s'" %
916 (image.parent_name, image.name))
917
918 # loop again now that parents are linked to create labels
919 for image in self.images:
920 image.create_labels()
921 image.create_tags()
922
923 # if image has parent, get labels from parent(s)
924 if image.parent is not None:
925 LOG.debug("Adding parent labels from %s to child %s" %
926 (image.parent.name, image.name))
927
928 # don't create component labels for same repo as image
929 repo_list = [image.repo_name]
930 image.labels.update(image.parent.child_labels(repo_list))
931
932 def dependency_graph(self, graph_fn):
933 """ save a DOT dependency graph to a file """
934
935 graph_fn_abs = os.path.abspath(graph_fn)
936
937 LOG.info("Saving DOT dependency graph to: %s" % graph_fn_abs)
938
939 try:
940 import graphviz
941 except ImportError:
942 LOG.error('graphviz pip module not found')
943 raise
944
945 dg = graphviz.Digraph(comment='Image Dependency Graph',
946 graph_attr={'rankdir': 'LR'})
947
948 component_nodes = []
949
950 # Use raw names, so they match with what's in Dockerfiles
951 # delete colons as python graphviz module breaks with them
952 for image in self.images:
953 name_g = image.raw_name.replace(':', '\n')
954 dg.node(name_g)
955
956 if image.parent is not None:
957 name_p = image.parent.raw_name.replace(':', '\n')
958 dg.edge(name_p, name_g)
959
960 if image.components is not None:
961 for component in image.components:
962 name_c = "component - %s" % component['repo_name']
963 if name_c not in component_nodes:
964 dg.node(name_c)
965 component_nodes.append(name_c)
966 dg.edge(name_c, name_g, "", {'style': 'dashed'})
967
968 with open(graph_fn_abs, 'w') as g_fh:
969 g_fh.write(dg.source)
970
971 def write_actions_file(self, actions_fn):
972
973 actions_fn_abs = os.path.abspath(actions_fn)
974
975 LOG.info("Saving actions as YAML to: %s" % actions_fn_abs)
976
977 actions = {
978 "ib_pulled": self.pulled,
979 "ib_built": self.built,
980 "ib_preexisting_images": self.preexisting,
981 "ib_obsolete_images": self.obsolete,
982 "ib_failed_pull": self.failed_pull,
983 "ib_obsolete_pull": self.obsolete_pull,
984 "ib_failed_build": self.failed_build,
985 }
986
987 with open(actions_fn_abs, 'w') as a_fh:
988 yaml.safe_dump(actions, a_fh)
989 LOG.debug(yaml.safe_dump(actions))
990
991 def process_images(self):
992 """ determine whether to build/fetch images """
993
994 # upstream images (have no parents), must be fetched
995 must_fetch_a = filter(lambda img: img.parent is None, self.images)
996
997 for image in must_fetch_a:
998 if image.status is not DI_EXISTS:
999 image.status = DI_FETCH
1000
1001 # images that can be built or fetched (have parents)
1002 b_or_f_a = filter(lambda img: img.parent is not None, self.images)
1003
1004 for image in b_or_f_a:
1005 if not image.parent_clean():
1006 # must be built if not clean
1007 image.status = DI_BUILD
1008 elif image.status is not DI_EXISTS:
1009 # try to fetch if clean and doesn't exist
1010 image.status = DI_FETCH
1011 # otherwise, image is clean and exists (image.status == DI_EXISTS)
1012
1013 c_and_e_a = filter(lambda img: img.status is DI_EXISTS, self.images)
1014 LOG.info("Preexisting and clean images: %s" %
1015 ", ".join(c.name for c in c_and_e_a))
1016
1017 upstream_a = filter(lambda img: (img.status is DI_FETCH and
1018 img.parent is None), self.images)
1019 LOG.info("Upstream images that must be fetched: %s" %
1020 ", ".join(u.raw_name for u in upstream_a))
1021
1022 fetch_a = filter(lambda img: (img.status is DI_FETCH and
1023 img.parent is not None), self.images)
1024 LOG.info("Clean, buildable images to attempt to fetch: %s" %
1025 ", ".join(f.raw_name for f in fetch_a))
1026
1027 build_a = filter(lambda img: img.status is DI_BUILD, self.images)
1028 LOG.info("Buildable images, due to unclean context or parents: %s" %
1029 ", ".join(b.raw_name for b in build_a))
1030
1031 # OK to fetch upstream in any case as they should reduce number of
1032 # layers pulled/built later
1033
1034 for image in upstream_a:
1035 if not self._fetch_image(image):
1036 LOG.info("Unable to fetch upstream image: %s" % image.raw_name)
1037 # FIXME: fail if the upstream image can't be fetched ?
1038
1039 fetch_sort = sorted(fetch_a, key=(lambda img: len(img.children)),
1040 reverse=True)
1041
1042 for image in fetch_sort:
1043 if not self._fetch_image(image):
1044 # if didn't fetch, build
1045 image.status = DI_BUILD
1046
1047 while True:
1048 buildable_images = self.get_buildable()
1049 if buildable_images:
1050 for image in buildable_images:
1051 self._build_image(image)
1052 else:
1053 LOG.debug("No more images to build, ending build loop")
1054 break
1055
1056 def get_buildable(self):
1057 """ Returns list of images that can be built"""
1058
1059 buildable = []
1060
1061 for image in filter(lambda img: img.status is DI_BUILD, self.images):
1062 if image.parent.status is DI_EXISTS:
1063 buildable.append(image)
1064
1065 LOG.debug("Buildable images: %s" %
1066 ', '.join(image.name for image in buildable))
1067
1068 return buildable
1069
1070 def tag_image(self, image):
1071 """ Applies tags to an image """
1072
1073 for tag in image.tags:
1074
1075 LOG.info("Tagging id: '%s', repo: '%s', tag: '%s'" %
1076 (image.image_id, image.name, tag))
1077
1078 if self.dc is not None:
1079 self.dc.tag(image.image_id, image.name, tag=tag)
1080
1081 def _fetch_image(self, image):
1082
1083 LOG.info("Attempting to fetch docker image: %s" % image.raw_name)
1084
1085 if self.dc is not None:
1086 try:
1087 for stat_json in self.dc.pull(image.raw_name,
1088 stream=True):
1089
1090 # sometimes Docker's JSON is dirty, per:
1091 # https://github.com/docker/docker-py/pull/1081/
1092 stat_s = stat_json.strip()
1093 stat_list = stat_s.split("\r\n")
1094
1095 for s_j in stat_list:
1096 stat_d = json.loads(s_j)
1097
1098 if 'stream' in stat_d:
1099 for stat_l in stat_d['stream'].split('\n'):
1100 LOG.debug(stat_l)
1101
1102 if 'status' in stat_d:
1103 for stat_l in stat_d['status'].split('\n'):
1104 noisy = ["Extracting", "Downloading",
1105 "Waiting", "Download complete",
1106 "Pulling fs layer", "Pull complete",
1107 "Verifying Checksum",
1108 "Already exists"]
1109 if stat_l in noisy:
1110 LOG.debug(stat_l)
1111 else:
1112 LOG.info(stat_l)
1113
1114 if 'error' in stat_d:
1115 LOG.error(stat_d['error'])
1116 sys.exit(1)
1117
1118 except:
1119 LOG.exception("Error pulling docker image")
1120
1121 self.failed_pull.append({
1122 "tags": [image.raw_name, ],
1123 })
1124
1125 return False
1126
1127 # obtain the image_id by inspecting the pulled image. Seems unusual
1128 # that the Docker API `pull` method doesn't provide it when the
1129 # `build` method does
1130 pulled_image = self.dc.inspect_image(image.raw_name)
1131
1132 # check to make sure that image that was downloaded has the labels
1133 # that we expect it to have, otherwise return false, trigger build
1134 if not image.compare_labels(
1135 pulled_image['ContainerConfig']['Labels']):
1136 LOG.info("Tried fetching image %s, but labels didn't match" %
1137 image.raw_name)
1138
1139 self.obsolete_pull.append({
1140 "id": pulled_image['Id'],
1141 "tags": pulled_image['RepoTags'],
1142 })
1143 return False
1144
1145 image.image_id = pulled_image['Id']
1146 LOG.info("Fetched image %s, id: %s" %
1147 (image.raw_name, image.image_id))
1148
1149 self.pulled.append({
1150 "id": pulled_image['Id'],
1151 "tags": pulled_image['RepoTags'],
Andy Bavier09410472017-08-15 14:29:35 -07001152 "base": image.name.split(":")[0],
Zack Williamsce63eb02017-02-28 10:46:22 -07001153 })
1154
1155 self.tag_image(image)
1156 image.status = DI_EXISTS
1157 return True
1158
1159 def _build_image(self, image):
1160
1161 LOG.info("Building docker image for %s" % image.raw_name)
1162
1163 if self.dc is not None:
1164
1165 build_tag = "%s:%s" % (image.name, image.tags[0])
1166
1167 buildargs = image.buildargs()
1168 context_tar = image.context_tarball()
1169 dockerfile = image.dockerfile_rel_path()
1170
1171 for key, val in buildargs.iteritems():
1172 LOG.debug("Buildarg - %s : %s" % (key, val))
1173
1174 bl_path = ""
1175 start_time = datetime.datetime.utcnow()
1176
1177 if(args.build_log_dir):
1178 bl_name = "%s_%s" % (start_time.strftime("%Y%m%dT%H%M%SZ"),
1179 re.sub(r'\W', '_', image.name))
1180 bl_path = os.path.abspath(
1181 os.path.join(args.build_log_dir, bl_name))
1182
1183 LOG.info("Build log: %s" % bl_path)
1184 bl_fh = open(bl_path, 'w+', 0) # 0 = unbuffered writes
1185 else:
1186 bl_fh = None
1187
1188 try:
1189 LOG.info("Building image: %s" % image)
1190
1191 for stat_d in self.dc.build(tag=build_tag,
1192 buildargs=buildargs,
1193 custom_context=True,
1194 fileobj=context_tar,
1195 dockerfile=dockerfile,
1196 rm=True,
1197 forcerm=True,
1198 pull=False,
1199 stream=True,
1200 decode=True):
1201
1202 if 'stream' in stat_d:
1203
1204 if bl_fh:
1205 bl_fh.write(stat_d['stream'].encode('utf-8'))
1206
1207 for stat_l in stat_d['stream'].split('\n'):
1208 if(stat_l):
1209 LOG.debug(stat_l)
1210 if stat_d['stream'].startswith("Successfully built "):
1211 siid = stat_d['stream'].split(' ')[2]
1212 short_image_id = siid.strip()
1213 LOG.debug("Short Image ID: %s" % short_image_id)
1214
1215 if 'status' in stat_d:
1216 for stat_l in stat_d['status'].split('\n'):
1217 if(stat_l):
1218 LOG.info(stat_l)
1219
1220 if 'error' in stat_d:
1221 LOG.error(stat_d['error'])
1222 image.status = DI_ERROR
1223 sys.exit(1)
1224
1225 except:
1226 LOG.exception("Error building docker image")
1227
1228 self.failed_build.append({
1229 "tags": [build_tag, ],
1230 })
1231
1232 return
1233
1234 finally:
1235 if(bl_fh):
1236 bl_fh.close()
1237
1238 # the image ID given by output isn't the full SHA256 id, so find
1239 # and set it to the full one
1240 built_image = self.dc.inspect_image(short_image_id)
1241 image.image_id = built_image['Id']
1242
1243 end_time = datetime.datetime.utcnow()
1244 duration = end_time - start_time # duration is a timedelta
1245
1246 LOG.info("Built Image: %s, duration: %s, id: %s" %
1247 (image.name, duration, image.image_id))
1248
1249 self.built.append({
1250 "id": image.image_id,
1251 "tags": [build_tag, ],
1252 "push_name": image.raw_name,
1253 "build_log": bl_path,
1254 "duration": duration.total_seconds(),
Andy Bavier09410472017-08-15 14:29:35 -07001255 "base": image.name.split(":")[0],
Zack Williamsce63eb02017-02-28 10:46:22 -07001256 })
1257
1258 self.tag_image(image)
1259 image.status = DI_EXISTS
1260
1261
1262if __name__ == "__main__":
1263 parse_args()
1264 load_config()
1265
1266 # only include docker module if not a dry run
1267 if not args.dry_run:
1268 try:
1269 from distutils.version import LooseVersion
1270 from docker import __version__ as docker_version
1271 if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
1272 from docker import APIClient as DockerClient
1273 from docker import utils as DockerUtils
1274 else:
1275 from docker import Client as DockerClient
1276 from docker import utils as DockerUtils
1277 except ImportError:
1278 LOG.error("Unable to load python docker module (dry run?)")
1279 sys.exit(1)
1280
1281 rm = RepoManifest()
1282 db = DockerBuilder(rm)