def volume_set_of(image): """ :return: the set of data volumes defined for the given Docker image """ cmd = ['docker', 'inspect', image] vols = yaml.load(subprocess.check_output(print_args(cmd)))[0]['Config']['Volumes'] return set(vols.keys()) if vols else None
def create_loader_container(loader_image, tag): # N.B. this name must be consistent with the definition in cloud-config.yml.jinja container = "loader-{}".format(tag) if not has_container(container): # Retrieve runtime parameters from the current loader and reuse them for the new loader. inspect = yaml.load(subprocess.check_output(['docker', 'inspect', my_container_id()])) cmd = ['docker', 'create', '--name', container] for bind in inspect[0]['HostConfig']['Binds']: cmd.append('-v') cmd.append(bind) cmd.append(loader_image) cmd.extend(inspect[0]['Config']['Cmd']) subprocess.check_call(print_args(cmd)) return container
def gc(): # TODO currently GC don't perform cleaning for loader images of the same tag but different repo names than the # current loader. subprocess.check_call(print_args(['/gc.sh', my_image_name(), _tag])) return ''
def has_container(container): with open(devnull, 'w') as null: return subprocess.call(print_args(['docker', 'inspect', container]), stdout=null, stderr=null) == 0
def switch(repo, tag, target): kill_other_containers() print 'Computing data volumes to be copied from the old containers...' # List old containers # old: map of original-name => (old modified-name, image) old = {} with open(MODIFIED_YML_PATH) as f: for k, v in yaml.load(f)['containers'].iteritems(): old[v['original-name']] = (k, v['image']) # List the new containers that haven't been created and have shared volumes with their old counterparts. # new: map of new modified-name => (old modified-name, volumes shared by both old and new containers) new = {} new_loader_image = '{}/{}:{}'.format(repo, my_image_name(), tag) cmd = ['docker', 'run', '--rm', '-v', '/var/run/docker.sock:/var/run/docker.sock', new_loader_image, 'modified-yml', repo, tag] for new_container, new_container_props in yaml.load(subprocess.check_output(print_args(cmd)))['containers'].iteritems(): counterpart = old.get(new_container_props['original-name']) if not counterpart: print '{} has no counterpart'.format(new_container) continue volumes = volume_set_of(new_container_props['image']) if not volumes: print '{} has no data volumes'.format(new_container) continue counterpart_volumes = volume_set_of(counterpart[1]) if not counterpart_volumes: print '{}\'s counterpart has no data volumes'.format(new_container) continue intersected_volumes = volumes & counterpart_volumes if not counterpart_volumes: print '{} has no intersecting data volumes'.format(new_container) continue if has_container(new_container): print '{} already exists'.format(new_container) continue if not has_container(counterpart[0]): print '{} doesn\'t exist'.format(counterpart[0]) continue new[new_container] = (counterpart[0], intersected_volumes) print 'Containers for data copying, in the format of "target-container: (source-container, volumes)":' print new # Create the loader container first. Otherwise when launching the containers that we create below, the system would # fail if one of them links to the new loader -- the `create-containers` command doesn't create the loader. new_loader = create_loader_container(new_loader_image, tag) if new: # Create containers cmd = ['docker', 'run', '--rm', '-v', '/var/run/docker.sock:/var/run/docker.sock', new_loader_image, 'create-containers', repo, tag, new_loader] cmd.extend(new.keys()) subprocess.check_call(print_args(cmd)) # Copy data loader_image = my_full_image_name() for container, new_container_props in new.iteritems(): cmd = ['/copy-container-data.sh', loader_image, new_container_props[0], container] cmd.extend(list(new_container_props[1])) subprocess.check_call(print_args(cmd)) with open(_repo_file, 'w') as f: f.write(repo) with open(_tag_file, 'w') as f: f.write(tag) return restart_to(target)