def main(): """Generate Python 2 only package list.""" parser = argparse.ArgumentParser() parser.add_argument('-n', '--num-package', type=int, default=50) parser.add_argument('-o', '--outfile', default=RESULT_PATH) parser.add_argument('-c', '--cache-dir', default=CACHE_PATH) parser.add_argument('--overrides', default=OVERRIDE_PATH) parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true') args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) cache = utils.get_cache(args.cache_dir) overrides = utils.get_overrides(args.overrides) top_py2_packages = generate(args.num_package, cache=cache, overrides=overrides) with open(args.outfile, 'w', encoding='utf8') as out: json.dump(top_py2_packages, out)
def test_get_cache(self): '''test_run_command tests sending a command to commandline using subprocess ''' print("Testing utils.get_cache...") from utils import get_cache # If there is no cache_base, we should get default print("Case 1: No cache base returns default") home = os.environ['HOME'] cache = get_cache() self.assertEqual("%s/.singularity" % home, cache) self.assertTrue(os.path.exists(cache)) # If we give a base, we should get that base instead print("Case 2: custom specification of cache base") cache_base = '%s/cache' % (home) cache = get_cache(cache_base=cache_base) self.assertEqual(cache_base, cache) self.assertTrue(os.path.exists(cache)) # If we specify a subfolder, we should get that added print("Case 3: Ask for subfolder in cache base") subfolder = 'docker' cache = get_cache(subfolder=subfolder) self.assertEqual("%s/.singularity/%s" % (home, subfolder), cache) self.assertTrue(os.path.exists(cache)) # If we disable the cache, we should get temporary directory print("Case 4: Disable the cache (uses /tmp)") cache = get_cache(disable_cache=True) self.assertTrue(os.path.exists(cache)) self.assertTrue(re.search("tmp", cache) != None) # If environmental variable set, should use that print("Case 5: cache base obtained from environment") SINGULARITY_CACHEDIR = '%s/cache' % (home) os.environ['SINGULARITY_CACHEDIR'] = SINGULARITY_CACHEDIR cache = get_cache() self.assertEqual(SINGULARITY_CACHEDIR, cache) self.assertTrue(os.path.exists(cache))
def test_get_cache(self): '''test_run_command tests sending a command to commandline using subprocess ''' print("Testing utils.get_cache...") from utils import get_cache # If there is no cache_base, we should get default print("Case 1: No cache base returns default") home = os.environ['HOME'] cache = get_cache() self.assertEqual("%s/.singularity" %home,cache) self.assertTrue(os.path.exists(cache)) # If we give a base, we should get that base instead print("Case 2: custom specification of cache base") cache_base = '%s/cache' %(home) cache = get_cache(cache_base=cache_base) self.assertEqual(cache_base,cache) self.assertTrue(os.path.exists(cache)) # If we specify a subfolder, we should get that added print("Case 3: Ask for subfolder in cache base") subfolder = 'docker' cache = get_cache(subfolder=subfolder) self.assertEqual("%s/.singularity/%s" %(home,subfolder),cache) self.assertTrue(os.path.exists(cache)) # If we disable the cache, we should get temporary directory print("Case 4: Disable the cache (uses /tmp)") cache = get_cache(disable_cache=True) self.assertTrue(os.path.exists(cache)) self.assertTrue(re.search("tmp",cache)!=None) # If environmental variable set, should use that print("Case 5: cache base obtained from environment") SINGULARITY_CACHEDIR = '%s/cache' %(home) os.environ['SINGULARITY_CACHEDIR'] = SINGULARITY_CACHEDIR cache = get_cache() self.assertEqual(SINGULARITY_CACHEDIR,cache) self.assertTrue(os.path.exists(cache))
def get_new_ts(inotify_handle): cache = get_cache() for event in inotify_handle.event_gen(): if event is not None: (header, type_names, watch_path, filename) = event if filename.endswith('.ts') and 'IN_MOVED_TO' == type_names[0]: container = time.strftime("{0}%Y%m%d".format(CONTAINER_PREFIX), time.localtime()) ts_file = "%s/%s" % (watch_path.decode('utf-8'), filename.decode('utf-8')) channel = ts_file.split('/')[-2] ts_name = ts_file.split('/')[-1].split("-")[0] user_info = get_user_info() # 获取Ts文件的播放时长 result = get_ts_time_len(ts_file) if not result: print "get ts time len error. ts: <{0}>".format(ts_file) continue # 获取频道的最后一个值,用来确定序号 channel_last = cache.lrange(channel, -1, -1) # ts文件的序号 ts_order_no = 0 if channel_last: ts_obj = ConvertTsFileName(channel_last[0]) ts_order_no = ts_obj.ts_order_no ts_order_no += 1 new_tsname = "{0}_{1}_{2}_01ws{3}.ts".format( ts_name, result.split(".")[0], result.split(".")[1], str(ts_order_no).zfill(6)) obj_file = channel + "/" + new_tsname cache.rpush(channel, new_tsname) upload_ts2oss(user_info=user_info, container=container, obj_file=obj_file, ts_file=ts_file)
def run(args): # Find root filesystem location if args.rootfs != None: singularity_rootfs = args.rootfs else: singularity_rootfs = os.environ.get("SINGULARITY_ROOTFS", None) if singularity_rootfs == None and args.shub == None: logger.error( "root file system not specified OR defined as environmental variable, exiting!" ) sys.exit(1) if singularity_rootfs != None: logger.info("Root file system defined as %s", singularity_rootfs) # Does the registry require authentication? auth = None if args.username is not None and args.password is not None: auth = basic_auth_header(args.username, args.password) logger.info("Username for registry authentication: %s", args.username) # Does the user want to download a Singularity image? if args.shub != None: image = args.shub manifest = get_shub_manifest(image) if args.pull_folder == None: cache_base = get_cache(subfolder="shub", disable_cache=args.disable_cache) else: cache_base = args.pull_folder # The image name is the md5 hash, download if it's not there image_name = get_image_name(manifest) image_file = "%s/%s" % (cache_base, image_name) if not os.path.exists(image_file): image_file = download_image(manifest=manifest, download_folder=cache_base) else: print("Image already exists at %s, skipping download." % image_file) logger.info("Singularity Hub Image Download: %s", image_file) # If singularity_rootfs is provided, write metadata to it if singularity_rootfs != None: logger.debug( "Writing SINGULARITY_RUNDIR and SINGULARITY_IMAGE to %s", singularity_rootfs) write_file("%s/SINGULARITY_RUNDIR" % singularity_rootfs, os.path.dirname(image_file)) write_file("%s/SINGULARITY_IMAGE" % singularity_rootfs, image_file) # Do we have a docker image specified? elif args.docker != None: # Does the user want to override default Entrypoint and use CMD as runscript? includecmd = args.includecmd logger.info("Including Docker command as Runscript? %s", includecmd) image = args.docker logger.info("Docker image: %s", image) # Input Parsing ---------------------------- # Parse image name, repo name, and namespace image = parse_image_uri(image=image, uri="docker://") namespace = image['namespace'] repo_name = image['repo_name'] repo_tag = image['repo_tag'] # Tell the user the namespace, repo name and tag logger.info("Docker image path: %s/%s:%s", namespace, repo_name, repo_tag) # IMAGE METADATA ------------------------------------------- # Use Docker Registry API (version 2.0) to get images ids, manifest # Get an image manifest - has image ids to parse, and will be # used later to get Cmd manifest = get_manifest(repo_name=repo_name, namespace=namespace, repo_tag=repo_tag, registry=args.registry, auth=auth) # Get images from manifest using version 2.0 of Docker Registry API images = get_images(manifest=manifest) # DOWNLOAD LAYERS ------------------------------------------- # Each is a .tar.gz file, obtained from registry with curl # Get the cache (or temporary one) for docker cache_base = get_cache(subfolder="docker", disable_cache=args.disable_cache) layers = [] for image_id in images: # Download the layer, if we don't have it targz = "%s/%s.tar.gz" % (cache_base, image_id) if not os.path.exists(targz): targz = get_layer(image_id=image_id, namespace=namespace, repo_name=repo_name, download_folder=cache_base, registry=args.registry, auth=auth) layers.append(targz) # in case we want a list at the end # Extract image and remove tar output = extract_tar(targz, singularity_rootfs) if output is None: logger.error("Error extracting image: %s", targz) sys.exit(1) if args.disable_cache == True: os.remove(targz) # If the user wants to include the CMD as runscript, generate it here if includecmd == True: spec = "Cmd" else: spec = "Entrypoint" cmd = get_config(manifest, spec=spec) # Only add runscript if command is defined if cmd != None: print("Adding Docker %s as Singularity runscript..." % (spec.upper())) print(cmd) runscript = create_runscript(cmd=cmd, base_dir=singularity_rootfs) # When we finish, clean up images if args.disable_cache == True: shutil.rmtree(cache_base) logger.info("*** FINISHING DOCKER BOOTSTRAP PYTHON PORTION ****\n")
def run(args): # Find root filesystem location if args.rootfs != None: singularity_rootfs = args.rootfs else: singularity_rootfs = os.environ.get("SINGULARITY_ROOTFS", None) if singularity_rootfs == None and args.shub == None: logger.error("root file system not specified OR defined as environmental variable, exiting!") sys.exit(1) if singularity_rootfs != None: logger.info("Root file system defined as %s", singularity_rootfs) # Does the registry require authentication? auth = None if args.username is not None and args.password is not None: auth = basic_auth_header(args.username, args.password) logger.info("Username for registry authentication: %s", args.username) # Does the user want to download a Singularity image? if args.shub != None: image_id = int(args.shub) manifest = get_shub_manifest(image_id) cache_base = get_cache(subfolder="shub", disable_cache = args.disable_cache) # The image name is the md5 hash, download if it's not there image_name = get_image_name(manifest) image_file = "%s/%s" %(cache_base,image_name) if not os.path.exists(image_file): image_file = download_image(manifest=manifest, download_folder=cache_base) else: print("Image already exists at %s, skipping download." %image_file) logger.info("Singularity Hub Image Download: %s", image_file) # If singularity_rootfs is provided, write metadata to it if singularity_rootfs != None: logger.debug("Writing SINGULARITY_RUNDIR and SINGULARITY_IMAGE to %s",singularity_rootfs) write_file("%s/SINGULARITY_RUNDIR" %singularity_rootfs, os.path.dirname(image_file)) write_file("%s/SINGULARITY_IMAGE" %singularity_rootfs, image_file) # Do we have a docker image specified? elif args.docker != None: # Does the user want to override default Entrypoint and use CMD as runscript? includecmd = args.includecmd logger.info("Including Docker command as Runscript? %s", includecmd) image = args.docker logger.info("Docker image: %s", image) # Input Parsing ---------------------------- # Parse image name, repo name, and namespace # First split the docker image name by / image = image.split('/') # If there are two parts, we have namespace with repo (and maybe tab) if len(image) == 2: namespace = image[0] image = image[1] # Otherwise, we must be using library namespace else: namespace = "library" image = image[0] # Now split the docker image name by : image = image.split(':') if len(image) == 2: repo_name = image[0] repo_tag = image[1] # Otherwise, assume latest of an image else: repo_name = image[0] repo_tag = "latest" # Tell the user the namespace, repo name and tag logger.info("Docker image path: %s/%s:%s", namespace,repo_name,repo_tag) # IMAGE METADATA ------------------------------------------- # Use Docker Registry API (version 2.0) to get images ids, manifest # Get an image manifest - has image ids to parse, and will be # used later to get Cmd manifest = get_manifest(repo_name=repo_name, namespace=namespace, repo_tag=repo_tag, registry=args.registry, auth=auth) # Get images from manifest using version 2.0 of Docker Registry API images = get_images(repo_name=repo_name, namespace=namespace, registry=args.registry, auth=auth) # DOWNLOAD LAYERS ------------------------------------------- # Each is a .tar.gz file, obtained from registry with curl # Get the cache (or temporary one) for docker cache_base = get_cache(subfolder="docker", disable_cache = args.disable_cache) layers = [] for image_id in images: # Download the layer, if we don't have it targz = "%s/%s.tar.gz" %(cache_base,image_id) if not os.path.exists(targz): targz = get_layer(image_id=image_id, namespace=namespace, repo_name=repo_name, download_folder=cache_base, registry=args.registry, auth=auth) layers.append(targz) # in case we want a list at the end # Extract image and remove tar output = extract_tar(targz,singularity_rootfs) if output is None: logger.error("Error extracting image: %s", targz) sys.exit(1) if args.disable_cache == True: os.remove(targz) # If the user wants to include the CMD as runscript, generate it here if includecmd == True: spec="Cmd" else: spec="Entrypoint" cmd = get_config(manifest,spec=spec) # Only add runscript if command is defined if cmd != None: print("Adding Docker %s as Singularity runscript..." %(spec.upper())) print(cmd) runscript = create_runscript(cmd=cmd, base_dir=singularity_rootfs) # When we finish, clean up images if args.disable_cache == True: shutil.rmtree(cache_base) logger.info("*** FINISHING DOCKER BOOTSTRAP PYTHON PORTION ****\n")
def main(): logger.info("\n*** STARTING DOCKER BOOTSTRAP PYTHON PORTION ****") parser = argparse.ArgumentParser( description="bootstrap Docker images for Singularity containers") # Name of the docker image, required parser.add_argument( "--docker", dest='docker', help= "name of Docker image to bootstrap, in format library/ubuntu:latest", type=str, default=None) # root file system of singularity image parser.add_argument("--rootfs", dest='rootfs', help="the path for the root filesystem to extract to", type=str, default=None) # Docker registry (default is registry-1.docker.io parser.add_argument( "--registry", dest='registry', help="the registry path to use, to replace registry-1.docker.io", type=str, default=None) # Flag to add the Docker CMD as a runscript parser.add_argument( "--cmd", dest='includecmd', action="store_true", help= "boolean to specify that CMD should be used instead of ENTRYPOINT as the runscript.", default=False) parser.add_argument("--username", dest='username', help="username for registry authentication", default=None) parser.add_argument("--password", dest='password', help="password for registry authentication", default=None) # Flag to disable cache parser.add_argument("--no-cache", dest='disable_cache', action="store_true", help="boolean to specify disabling the cache.", default=False) try: args = parser.parse_args() except: logger.error("Input args to %s improperly set, exiting.", os.path.abspath(__file__)) parser.print_help() sys.exit(0) # Find root filesystem location if args.rootfs != None: singularity_rootfs = args.rootfs logger.info("Root file system defined by command line variable as %s", singularity_rootfs) else: singularity_rootfs = os.environ.get("SINGULARITY_ROOTFS", None) if singularity_rootfs == None: logger.error( "root file system not specified OR defined as environmental variable, exiting!" ) sys.exit(1) logger.info("Root file system defined by env variable as %s", singularity_rootfs) # Does the registry require authentication? auth = None if args.username is not None and args.password is not None: auth = basic_auth_header(args.username, args.password) logger.info("Username for registry authentication: %s", args.username) # Does the user want to override default Entrypoint and use CMD as runscript? includecmd = args.includecmd logger.info("Including Docker command as Runscript? %s", includecmd) # Do we have a docker image specified? if args.docker != None: image = args.docker logger.info("Docker image: %s", image) # INPUT PARSING ------------------------------------------- # Parse image name, repo name, and namespace # First split the docker image name by / image = image.split('/') # If there are two parts, we have namespace with repo (and maybe tab) if len(image) == 2: namespace = image[0] image = image[1] # Otherwise, we must be using library namespace else: namespace = "library" image = image[0] # Now split the docker image name by : image = image.split(':') if len(image) == 2: repo_name = image[0] repo_tag = image[1] # Otherwise, assume latest of an image else: repo_name = image[0] repo_tag = "latest" # Tell the user the namespace, repo name and tag logger.info("Docker image path: %s/%s:%s", namespace, repo_name, repo_tag) # IMAGE METADATA ------------------------------------------- # Use Docker Registry API (version 2.0) to get images ids, manifest # Get an image manifest - has image ids to parse, and will be # used later to get Cmd manifest = get_manifest(repo_name=repo_name, namespace=namespace, repo_tag=repo_tag, registry=args.registry, auth=auth) # Get images from manifest using version 2.0 of Docker Registry API images = get_images(manifest=manifest, registry=args.registry, auth=auth) # DOWNLOAD LAYERS ------------------------------------------- # Each is a .tar.gz file, obtained from registry with curl # Get the cache (or temporary one) for docker cache_base = get_cache(subfolder="docker", disable_cache=args.disable_cache) layers = [] for image_id in images: # Download the layer, if we don't have it targz = "%s/%s.tar.gz" % (cache_base, image_id) if not os.path.exists(targz): targz = get_layer(image_id=image_id, namespace=namespace, repo_name=repo_name, download_folder=cache_base, registry=args.registry, auth=auth) layers.append(targz) # in case we want a list at the end # @chrisfilo suggestion to try compiling into one tar.gz # Extract image and remove tar extract_tar(targz, singularity_rootfs) if args.disable_cache == True: os.remove(targz) # If the user wants to include the CMD as runscript, generate it here if includecmd == True: spec = "Cmd" else: spec = "Entrypoint" cmd = get_config(manifest, spec=spec) # Only add runscript if command is defined if cmd != None: print("Adding Docker %s as Singularity runscript..." % (spec.upper())) print(cmd) runscript = create_runscript(cmd=cmd, base_dir=singularity_rootfs) # When we finish, clean up images if args.disable_cache == True: shutil.rmtree(cache_base) logger.info("*** FINISHING DOCKER BOOTSTRAP PYTHON PORTION ****\n")
def read_leap_motion_data(base_dir=base_dir, load_cache=True, cache_name='leap_motion_data', workers=8): """ Load leap motion data with ThreadPoolExecutor, use cache if it exists. Return: persons: A dictionary. persons[PERSON_ID][GESTURE_ID][ROUND_ID] 'fingers_dis_direction': list(list(float)) 'fingers_dis_position': list(list(float)) 'fingers_int_direction': list(list(float)) 'fingers_int_position': list(list(float)) 'fingers_num': int 'fingers_pro_direction': list(list(float)) 'fingers_pro_position': list(list(float) 'fingers_tip_direction': list(list(float)) 'fingers_tip_position': list(list(float)) 'hand_confidence': float 'hand_direction': list(float) 'hand_sphere_center': list(float) 'hand_sphere_radius': float 'palm_normal': list(float) 'palm_position': list(float) 'palm_position_refined': list(float) Author: David """ files = [] users = set() for root, _, _files in os.walk(base_dir): if len(_files) <= 1: continue print(root) _, person, gesture = root.rsplit('\\', 2) files.append([person, gesture, _files]) users.add(person) if load_cache: print_log('Trying fetch cache for persons') persons = get_cache(cache_name) if persons is None: print_log('No cache, first building') persons = defaultdict(partial(defaultdict, partial(defaultdict, defaultdict))) elif len(persons) == len(users): print_log('Using cache for persons') return persons else: print_log('Some cache is missing, rebuilding') else: persons = defaultdict(partial(defaultdict, partial(defaultdict, defaultdict))) _diff = users - set(persons.keys()) files = [k for k in files if k[0] in _diff] errors = False def _read_datasets(dataset, target): nonlocal errors for person, gesture, files in dataset: print_log('Processing {:<3s} - {:<3s}'.format(person, gesture)) for f in files: try: e = f.find('_') if e == -1: e = f.find('.') idx = int(f[1:e]) path = os.path.join(base_dir, person, gesture, f) key, val = read(path) target[person][gesture][idx][key] = val except Exception as e: error(e) errors = True return chunk_size = int(math.ceil(len(files) / workers)) with ThreadPoolExecutor(max_workers=workers) as executor: for idx in range(workers+1): chunk = files[idx * chunk_size:(idx+1) * chunk_size] executor.submit(_read_datasets, chunk, persons) print_log('Setting cache for persons') if not errors: set_cache(persons, cache_name) return persons