def get_latest_tag(repo): if not repo: repo = _current_repo url = 'https://{}/v1/repositories/{}/tags'.format(repo, my_image_name()) print "Querying latest tag at {}...".format(url) r = requests.get(url) if 200 <= r.status_code < 300: print "Server {} returned: {}".format(repo, r.text) ret = r.json() for k, v in ret.iteritems(): if k != 'latest' and v == ret['latest']: return '"{}"'.format(k) return '"The latest tag does not correspond to a version."', 502 else: return r.text, r.status_code
def post_images_pull(repo, tag): if not exists(PULL_JSON): pulling = False else: with open(PULL_JSON) as f: pulling = json.load(f)['status'] == 'pulling' if pulling: return '"Pulling is already in progress."', 409 else: print "Pulling from {} tag {} ... The following output is from pull.sh:".format(repo, tag) # N.B. there is a small chance of race condition that this path is called again before # pull.sh updates PULL_JSON. Maintenance cost of the full solution due to its complexity # overweighs the benefit. subprocess.Popen(['/pull.sh', repo, my_image_name(), tag, PULL_JSON]) return '"Pulling started successfully."'
def modify_yaml(repo, tag): with open(CRANE_YML_PATH) as f: y = yaml.load(f) containers = y['containers'] my_image = my_image_name() my_container = my_container_name() loader_container = remove_loader_container(containers, my_image) tagged_loader_container = add_tag_to_container(loader_container, tag) add_repo_and_tag_to_images(containers, repo, tag) modify_links(containers, tagged_loader_container, my_container, tag) modify_volumes_from(containers, tagged_loader_container, my_container, tag) if 'groups' in y: modify_groups(y['groups'], tagged_loader_container, my_container, tag) with open(MODIFIED_YML_PATH, 'w') as f: f.write(yaml.dump(y, default_flow_style=False))
def modify_yaml(repo, tag, my_container=None, remove_loader_container=True): if not my_container: my_container = my_container_name() my_image = my_image_name() with open(CRANE_YML_PATH) as f: y = yaml.load(f) containers = y["containers"] loader_container = get_loader_container(containers, my_image) if remove_loader_container: del containers[loader_container] tagged_loader_container = add_tag_to_container(loader_container, tag) add_repo_and_tag_to_images(containers, repo, tag) modify_links(containers, tagged_loader_container, my_container, tag) modify_volumes_from(containers, tagged_loader_container, my_container, tag) if "groups" in y: modify_groups(y["groups"], tagged_loader_container, my_container, tag) with open(MODIFIED_YML_PATH, "w") as f: f.write(yaml.dump(y, default_flow_style=False))
def modify_yaml(repo, tag, my_container=None, remove_loader_container=True): if not my_container: my_container = my_container_name() my_image = my_image_name() with open(CRANE_YML_PATH) as f: y = yaml.load(f) containers = y['containers'] loader_container = get_loader_container(containers, my_image) if remove_loader_container: del containers[loader_container] tagged_loader_container = add_tag_to_container(loader_container, tag) add_repo_and_tag_to_images(containers, repo, tag) modify_links(containers, tagged_loader_container, my_container, tag) modify_volumes_from(containers, tagged_loader_container, my_container, tag) if 'groups' in y: modify_groups(y['groups'], tagged_loader_container, my_container, tag) with open(MODIFIED_YML_PATH, 'w') as f: f.write(yaml.dump(y, default_flow_style=False))
def gc(): # TODO currently GC don't perform cleaning for loader images of the same tag but different repo names than the # current loader. subprocess.check_call(print_args(['/gc.sh', my_image_name(), _tag])) return ''
def switch(repo, tag, target): kill_other_containers() print 'Computing data volumes to be copied from the old containers...' # List old containers # old: map of original-name => (old modified-name, image) old = {} with open(MODIFIED_YML_PATH) as f: for k, v in yaml.load(f)['containers'].iteritems(): old[v['original-name']] = (k, v['image']) # List the new containers that haven't been created and have shared volumes with their old counterparts. # new: map of new modified-name => (old modified-name, volumes shared by both old and new containers) new = {} new_loader_image = '{}/{}:{}'.format(repo, my_image_name(), tag) cmd = ['docker', 'run', '--rm', '-v', '/var/run/docker.sock:/var/run/docker.sock', new_loader_image, 'modified-yml', repo, tag] for new_container, new_container_props in yaml.load(subprocess.check_output(print_args(cmd)))['containers'].iteritems(): counterpart = old.get(new_container_props['original-name']) if not counterpart: print '{} has no counterpart'.format(new_container) continue volumes = volume_set_of(new_container_props['image']) if not volumes: print '{} has no data volumes'.format(new_container) continue counterpart_volumes = volume_set_of(counterpart[1]) if not counterpart_volumes: print '{}\'s counterpart has no data volumes'.format(new_container) continue intersected_volumes = volumes & counterpart_volumes if not counterpart_volumes: print '{} has no intersecting data volumes'.format(new_container) continue if has_container(new_container): print '{} already exists'.format(new_container) continue if not has_container(counterpart[0]): print '{} doesn\'t exist'.format(counterpart[0]) continue new[new_container] = (counterpart[0], intersected_volumes) print 'Containers for data copying, in the format of "target-container: (source-container, volumes)":' print new # Create the loader container first. Otherwise when launching the containers that we create below, the system would # fail if one of them links to the new loader -- the `create-containers` command doesn't create the loader. new_loader = create_loader_container(new_loader_image, tag) if new: # Create containers cmd = ['docker', 'run', '--rm', '-v', '/var/run/docker.sock:/var/run/docker.sock', new_loader_image, 'create-containers', repo, tag, new_loader] cmd.extend(new.keys()) subprocess.check_call(print_args(cmd)) # Copy data loader_image = my_full_image_name() for container, new_container_props in new.iteritems(): cmd = ['/copy-container-data.sh', loader_image, new_container_props[0], container] cmd.extend(list(new_container_props[1])) subprocess.check_call(print_args(cmd)) with open(_repo_file, 'w') as f: f.write(repo) with open(_tag_file, 'w') as f: f.write(tag) return restart_to(target)