def WarmUp(self, load: pb2.WarmUpLoad, context) -> Generator[pb2.PullerStatus, None, None]: cfg, args = self.__cfg, self.__args for p in load.pulls: self.__puller.warm_up(p) space_used = util.get_dir_size(args.docker_home) - self.__init_size logging.info('Space: %s/%s'%(util.size(space_used), util.size(cfg.capacity))) yield pb2.PullerStatus(capacity=cfg.capacity, level=space_used)
def data_analysis(G): nodes = size(G.nodes()) edges = size(G.edges()) print('nodes: {}'.format(nodes)) print('edges: {}'.format(edges)) print('average degree: {}'.format(edges / nodes)) print('max degree: {}'.format(max(size(G.neighbors(n)) for n in G.nodes()))) print('min degree: {}'.format(min(size(G.neighbors(n)) for n in G.nodes())))
def Pull(self, pull_gen: Generator[pb2.ImagePull, None, None], context) -> Generator[pb2.ImagePullSummary, None, None]: cfg, args = self.__cfg, self.__args try: for p in pull_gen: s = self.__puller.pull_image(p) space_used = util.get_dir_size(args.docker_home) - self.__init_size logging.info('Space: %s/%s'%(util.size(space_used), util.size(cfg.capacity))) s.puller_status.capacity, s.puller_status.level = cfg.capacity, space_used yield s finally: self.__puller.prune_images() self._clean_up_docker_images()
def _build_layers(self, layers: Iterable[pb2.Layer]) -> Dict[str, Layer]: layers = {l.digest: Layer(l.digest, l.size) for l in layers} n_parallel = int(mp.cpu_count() * self.__scaling_factor) sema = mp.Semaphore(n_parallel) build_dir = self.__build_dir def _build(l): with sema: l.build(build_dir) total_size = int(sum(l.size for l in layers.values())) logging.info('Building %d layers in %s, total size: %s' % (len(layers), build_dir, util.size(total_size))) wait_q = list(layers.values()) while len(wait_q) > 0: n_procs = min(1000, len(wait_q)) procs = [ mp.Process(target=_build, args=(l, )) for l in wait_q[:n_procs] ] try: for p in procs: p.start() for p in procs: p.join() finally: for p in procs: if p.is_alive(): p.terminate() wait_q = wait_q[n_procs:] return layers
def __init__(self, symbol_name, adress_string, data_type, comment=''): """ :param symbol_name: the name off the io :param adress_string: the adress of the io (example : I 4.0 ) :param data_type: type of the data of the IO :param comment: the comment that is given in the symbol table """ self._symbol_name = symbol_name self._type = adress_string[0:2:].strip() self._adress_byte, self._adress_bit = util.convert_adress( adress_string) self._size = util.size(data_type) self._data_type = data_type self._comment = comment
def start_dockerd(self, cfg: pb2.Config, dockerd_path: str): logging.info('Killing existing dockerd ...') os.system('sudo killall dockerd') cmd = ['sudo', dockerd_path] if cfg.capacity > 0: cmd += ['--cache-capacity', util.size(cfg.capacity).lower().replace(' ', '')] if cfg.cache_policy: cmd += ['--cache-policy', cfg.cache_policy] if cfg.use_archive: cmd += ['--cache-archive'] cmd += ['> /tmp/dockerd.log 2>&1', '&'] logging.info('Starting new dockerd ...') os.system(' '.join(cmd)) wait_time = 1 logging.info('Wait %ds for dockerd to fully start ...'%wait_time) time.sleep(wait_time)
def build(self, build_dir: str, registry: str = None) -> docker.models.images.Image: base = '%s/image' % build_dir layer_base = '%s/layer' % build_dir if self.parent and registry: base_img = '%s/%s' % (registry, self.parent) elif not registry: base_img = self.parent else: base_img = 'scratch' logging.debug("Building %s ..." % self) docker_cli = docker.from_env() dockerfile = ['FROM %s' % base_img] img_dir = '%s/%s' % (base, self) if os.path.exists(img_dir): shutil.rmtree(img_dir) os.makedirs(img_dir, exist_ok=True) for l in self.layers: logging.debug("Generating layer %s, size %s" % (l.digest, util.size(l.size))) l.build(build_dir) os.link('%s/%s' % (layer_base, l.digest), '%s/%s' % (img_dir, l.digest)) dockerfile += 'COPY %s /%s' % (l.digest, l.digest), df_path = '%s/Dockerfile' % img_dir with open(df_path, 'w') as f: f.write('\n'.join(dockerfile)) tag = '%s/%s' % (registry, str(self)) if registry else str(self) img, _ = docker_cli.images.build(path=img_dir, tag=tag, rm=True, dockerfile=os.path.abspath(df_path)) api = docker.APIClient() for a in self.aliases: repo, tag = a.split(':') if registry: repo = '%s/%s' % (registry, repo) api.tag(img.id, repo, tag) return img
def build(self, build_dir: str, density: float = .5): base = '%s/layer' % build_dir os.makedirs(base, exist_ok=True) layer_f = '%s/%s' % (base, self.digest) if os.path.exists(layer_f): return try: logging.debug('Creating layer %s, size: %s ...' % (self.digest, util.size(self.size))) with open(layer_f, 'wb') as f: if self.size == 0: return size = self.size - 1 if size > 0: load_size, empty_size = int(size * density), int( size * (1 - density)) f.seek(empty_size) f.write(rnd.bytes(load_size)) f.write(b'\0') except Exception as e: logging.exception('error building layer %s' % self.digest) raise e
def predict(G, node1, node2, mapping): n1 = set(G.neighbors(node1)) n2 = set(G.neighbors(node2)) cn_set = n1.intersection(n2) epi = 0.01 return sum(( 1/math.log(size(G.neighbors(i)) + epi) for i in cn_set ))
layer_map[l.digest] += img, dep_count, alias_count = 0, 0 for img in images.values(): for l in img.layers: for p in layer_map[l.digest]: if p == img: continue if img.is_child(p): print('%s -> %s, parent: %d, child: %d' % (p, img, len(p.layers), len(img.layers))) img.parent = p dep_count += 1 elif img.has_alias(p): print('%s == %s, # of layers: %d' % (p, img, len(p.layers))) img.add_alias(p) alias_count += 1 print('Find %d dependencies among %d images' % (dep_count, len(images))) print('Find %d aliases' % (alias_count / 2)) return images if __name__ == '__main__': images = parse_images(DATA_CENTER) resolve_image_dependencies(images) for i in images.values(): i.squash_layers() save_images(images, DATA_CENTER) total_size = sum(l.size for i in images.values() for l in i.layers) print('Total size: %s' % util.size(total_size))