Exemplo n.º 1
0
def command_create(working_dir: str, project_name, *argv):
    print('working-dir:' + working_dir)
    pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    project_path = os.path.abspath(os.path.join(working_dir, project_name))

    print('Creating restfx project "%s"' % project_name)

    if os.path.exists(project_path):
        print('[ERROR] Project path "%s" exists.' % project_path)
        sys.exit(1)

    # 示例文件
    sample_file = os.path.abspath(
        os.path.join(pkg_root, 'internal_assets', 'sample.tar.gz'))
    print('Creating project structure')
    import tarfile
    tarfile = tarfile.open(sample_file)
    tarfile.extractall(project_path)
    tarfile.close()

    command_genid(project_path)

    print("""Created !

It is time to have fun with restfx.
""".format(project_name=project_name))
Exemplo n.º 2
0
def extractfile(future_path):
    if future_path.endswith('.zip'):
        print("Unzipping to : "+future_path+" patience will be needed.")
        zipfile.extractall(future_path, None, None)
    if future_path.endswith('.tgz'):
        print("Extracting file to : "+future_path+" patience will be needed.")
        tarfile.extractall(future_path, None)
    else:
        print("Format seems wrong....")
        return
Exemplo n.º 3
0
def extract_tar_files(tar_file, out_directory):
    """
    extracts all files in a tar archive
    """
    tar_name = ntpath.basename(tar_file)
    un_tar_path = os.path.join(tar_file, tar_name)
    tarfile.TarFile(tar_file)
    tarfile.extractall(out_directory)

    return data_interpreter(un_tar_path, out_directory)
Exemplo n.º 4
0
def _analyze_tarfile_for_import(tarfile, project, schema, tmpdir):
    def read_sp_manifest_file(path):
        # Must use forward slashes, not os.path.sep.
        fn_manifest = _tarfile_path_join(path, project.Job.FN_MANIFEST)
        try:
            with closing(tarfile.extractfile(fn_manifest)) as file:
                if sys.version_info < (3, 6):
                    return json.loads(file.read().decode())
                else:
                    return json.loads(file.read())
        except KeyError:
            pass

    if schema is None:
        schema_function = read_sp_manifest_file
    elif callable(schema):
        schema_function = _with_consistency_check(schema,
                                                  read_sp_manifest_file)
    elif isinstance(schema, str):
        schema_function = _with_consistency_check(
            _make_path_based_schema_function(schema), read_sp_manifest_file)
    else:
        raise TypeError(
            "The schema variable must be None, callable, or a string.")

    mappings = dict()
    skip_subdirs = set()

    dirs = [member.name for member in tarfile.getmembers() if member.isdir()]
    for name in sorted(dirs):
        if os.path.dirname(
                name) in skip_subdirs:  # skip all sub-dirs of identified dirs
            skip_subdirs.add(name)
            continue

        sp = schema_function(name)
        if sp is not None:
            job = project.open_job(sp)
            if os.path.exists(job.workspace()):
                raise DestinationExistsError(job)
            mappings[name] = job
            skip_subdirs.add(name)

    # Check uniqueness
    if len(set(mappings.values())) != len(mappings):
        raise StatepointParsingError(
            "The jobs identified with the given schema function are not unique!"
        )

    tarfile.extractall(path=tmpdir)
    for path, job in mappings.items():
        src = os.path.join(tmpdir, path)
        assert os.path.isdir(tmpdir)
        assert os.path.isdir(src)
        yield src, _CopyFromTarFileExecutor(src, job)
Exemplo n.º 5
0
def extract_tar(target, current, zip=False):
    tar = None
    if not zip:
        import tarfile
        tar = tarfile.open(target)
    else:
        import zipfile
        tar = zipfile.ZipFile(target)
    try:
        tar.extractall(path=current)
    except:
        return False
    return True
	def extract_arc(self, target, current, zip=False, rar=False):
		tar = None
		if not zip and not rar:
			import tarfile
			tar = tarfile.open(target)
		elif zip:
			import zipfile
			tar = zipfile.ZipFile(target)
		elif rar:
			import rarfile
			tar = rarfile.RarFile(target)
		try:
			tar.extractall(path=current)
		except Exception, e:
			return False
Exemplo n.º 7
0
def download_peaks2maps_model(data_dir=None, overwrite=False, verbose=1):
    """
    Download the trained Peaks2Maps model from OHBM 2018.
    """
    url = "https://zenodo.org/record/1257721/files/ohbm2018_model.tar.xz?download=1"

    temp_dataset_name = 'peaks2maps_model_ohbm2018__temp'
    temp_data_dir = _get_dataset_dir(temp_dataset_name, data_dir=data_dir, verbose=verbose)

    dataset_name = 'peaks2maps_model_ohbm2018'
    data_dir = temp_data_dir.replace(temp_dataset_name, dataset_name)

    desc_file = op.join(data_dir, 'description.txt')
    if op.isfile(desc_file) and overwrite is False:
        shutil.rmtree(temp_data_dir)
        return data_dir

    LGR.info('Downloading the model (this is a one-off operation)...')
    # Streaming, so we can iterate over the response.
    r = requests.get(url, stream=True)
    f = BytesIO()

    # Total size in bytes.
    total_size = int(r.headers.get('content-length', 0))
    block_size = 1024 * 1024
    wrote = 0
    for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size),
                     unit='MB', unit_scale=True):
        wrote = wrote + len(data)
        f.write(data)
    if total_size != 0 and wrote != total_size:
        raise Exception("Download interrupted")

    f.seek(0)
    LGR.info('Uncompressing the model to {}...'.format(temp_data_dir))
    tarfile = TarFile(fileobj=LZMAFile(f), mode="r")
    tarfile.extractall(temp_data_dir)

    os.rename(op.join(temp_data_dir, 'ohbm2018_model'), data_dir)
    shutil.rmtree(temp_data_dir)

    with open(desc_file, 'w') as fo:
        fo.write('The trained Peaks2Maps model from OHBM 2018.')

    if verbose > 0:
        print('\nDataset moved to {}\n'.format(data_dir))

    return data_dir
Exemplo n.º 8
0
def _run_setup_py(tarfile, setup_filename, data):
    tempdir = tempfile.mkdtemp()
    setuptools.sandbox.DirectorySandbox(tempdir).run(lambda: tarfile.extractall(tempdir))

    setup_filename = os.path.join(tempdir, setup_filename)
    distutils.core._setup_stop_after = "config"
    setuptools.sandbox.run_setup(setup_filename, "")
    dist = distutils.core._setup_distribution
    shutil.rmtree(tempdir)

    if dist.ext_modules:
        data["is_extension"] = True
    if dist.scripts:
        data["scripts"] = dist.scripts
    if dist.test_suite:
        data["test_suite"] = dist.test_suite
    if dist.install_requires:
        data["install_requires"] = dist.install_requires
    if dist.extras_require:
        data["extras_require"] = dist.extras_require
    if dist.data_files:
        data["data_files"] = dist.data_files
    if dist.entry_points:
        data["entry_points"] = dist.entry_points
Exemplo n.º 9
0
    exit()
try:
    if console == False:
        from appJar import gui
        app = gui("Music Player")
except ImportError:
    LogErr()
    bcast("AppJar could not be imported")
    bcast("This is required for the GUI to work")
    option = raw_input("Install? (yes/no): ")
    if option.lower() == "yes" or option.lower() == "y":
        try:
            bcast("Installing AppJar...")
            urllib.urlretrieve(url + "/appJar.tar.gz", "AppJar.tar.gz")
            tarfile.open("appJar.tar.gz")
            tarfile.extractall()
            tarfile.close()
            os.remove("appJar.tar.gz")
            from AppJar import gui
            app = gui("Music Player")
        except:
            LogErr()
            bcast("Unable to download and install AppJar")
            bcast("The GUI will now be disabled")
            console = True
    elif option.lower() == "no" or option.lower() == "n":
        bcast("GUI will now be disabled.")
        bcast("You will be prompted again on next run")
    else:
        bcast("Invalid option")
        bcast("AppJar will not be installed")
Exemplo n.º 10
0
def _analyze_tarfile_for_import(tarfile, project, schema, tmpdir):
    """Validate paths in tarfile.

    Parameters
    ----------
    tarfile : :class:`tarfile.TarFile`
        tarfile to analyze.
    project : :class:`~signac.Project`
        The project to import the data into.
    schema : str or callable
        An optional schema function, which is either a string or a function that accepts a
        path as its first and only argument and returns the corresponding state point as dict.
        (Default value = None).
    tmpdir : :class:`tempfile.TemporaryDirectory`
        Temporary directory, an instance of ``TemporaryDirectory``.

    Yields
    ------
    src : str
        Source path.
    copy_executor : callable
        A callable that uses a provided function to copy to a destination.

    Raises
    ------
    TypeError
        If the schema given is not None, callable, or a string.
    :class:`~signac.errors.DestinationExistsError`
        If a job is already initialized.
    :class:`~signac.errors.StatepointParsingError`
        If the jobs identified with the given schema function are not unique.
    AssertionError
        If ``tmpdir`` given is not a directory.

    """

    def read_sp_manifest_file(path):
        """Read state point from the manifest file.

        Parameters
        ----------
        path : str
            Path to manifest file.

        Returns
        -------
        dict
            state point.

        """
        # Must use forward slashes, not os.path.sep.
        fn_manifest = _tarfile_path_join(path, project.Job.FN_MANIFEST)
        try:
            with closing(tarfile.extractfile(fn_manifest)) as file:
                return json.loads(file.read())
        except KeyError:
            pass

    if schema is None:
        schema_function = read_sp_manifest_file
    elif callable(schema):
        schema_function = _with_consistency_check(schema, read_sp_manifest_file)
    elif isinstance(schema, str):
        schema_function = _with_consistency_check(
            _make_path_based_schema_function(schema), read_sp_manifest_file
        )
    else:
        raise TypeError("The schema variable must be None, callable, or a string.")

    mappings = {}
    skip_subdirs = set()

    dirs = [member.name for member in tarfile.getmembers() if member.isdir()]
    for name in sorted(dirs):
        if (
            os.path.dirname(name) in skip_subdirs
        ):  # skip all sub-dirs of identified dirs
            skip_subdirs.add(name)
            continue

        sp = schema_function(name)
        if sp is not None:
            job = project.open_job(sp)
            if os.path.exists(job.workspace()):
                raise DestinationExistsError(job)
            mappings[name] = job
            skip_subdirs.add(name)

    # Check uniqueness
    if len(set(mappings.values())) != len(mappings):
        raise StatepointParsingError(
            "The jobs identified with the given schema function are not unique!"
        )

    tarfile.extractall(path=tmpdir)
    for path, job in mappings.items():
        assert os.path.isdir(tmpdir)
        src = os.path.join(tmpdir, path)
        assert os.path.isdir(src)
        copy_executor = _CopyFromTarFileExecutor(src, job)
        yield src, copy_executor
Exemplo n.º 11
0
import pathlib
import tarfile
import os
print("start setting ORCA")
tmp_orca_dir = pathlib.Path("/tmp/orca_tmp", "r")
tarfile = tarfile.open(pathlib.Path.home() / "ORCA-303.tbz")
if not (tmp_orca_dir / "orca_3_0_3_linux_x86-64/ORCA").is_file():
    print("ORCA has not copied on remote node")
    tarfile.extractall(str(tmp_orca_dir))
    os.system("export orca_dir=" + str(tmp_orca_dir) +
              "/orca_3_0_3_linux_x86-64/")
    if (tmp_orca_dir / "orca_3_0_3_linux_x86-64/ORCA").is_file():
        print("ORCA was copied")
tarfile.close()
print("")
Exemplo n.º 12
0
    print "Data must be stored on CASTOR ..."
else:
    print "Data must be stored on AFS ..."

if (output_mode != "ROOTMODE"):
    print "Getting files for " + crabdir + " and putting in " + ntupdir
else:
    print "Getting files for " + input_root + " and putting in " + ntupdir

if (output_mode != "ROOTMODE"):
    # untar if required
    files = os.listdir(crabdir + "/res")
    for file in files:
        if re.search("tgz", file):
            tarfile.open(file, "r:gz")
            tarfile.extractall(crabdir + "/res")

# hadd all the files together
if (output_mode != "ROOTMODE"):
    hadd = Popen("hadd -f " + ntupdir + "/" + crabdir + ".root " + crabdir +
                 "/res/*.root",
                 shell=True)
    hadd.wait()
else:
    result = commands.getoutput("nsls " + input_root + "/ > " + crabdir +
                                "/list.txt")
    result = commands.getoutput("awk '{print \"rfio:" + input_root +
                                "/\" $1}' " + crabdir + "/list.txt > " +
                                crabdir + "/list2.txt")
    hadd = Popen("hadd -f " + crabdir + ".root @" + crabdir + "/list2.txt",
                 shell=True)
Exemplo n.º 13
0
 def uncompress_tgz(self, path):
     tarfile = tarfiles.open(path)
     tarfile.extractall(self.path)
     tarfile.close()
Exemplo n.º 14
0
from connector.settings import ELFINDER_ROOT, ELFINDER_URL, ELFINDER_THUMB
Exemplo n.º 15
0
CORD19_FILES: List[Tuple[str, bool]] = [
    ("comm_use_subset.tar.gz", True),
    ("biorxiv_medrxiv.tar.gz", True),
    ("noncomm_use_subset.tar.gz", True),
    ("custom_license.tar.gz", True),
    (f"metadata.csv", True)]


# Borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
def download_file(fname: str) -> None:
    with requests.get(CORD19_BASE + fname, stream=True) as r:
        r.raise_for_status()
        with open(os.path.join(SOURCE_DIR, fname), 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192):
                if chunk: # filter out keep-alive new chunks
                    f.write(chunk)


for file, download in CORD19_FILES:
    if download:
        print(f"Downloading: {file}")
        download_file(file)
    else:
        print(f"Skipped: {file}")


for file, _ in CORD19_FILES:
    if file.endswith(".tar.gz"):
        tarfile = tarfile.open(os.path.join(SOURCE_DIR, file), "r:gz")
        tarfile.extractall(SOURCE_DIR)
Exemplo n.º 16
0
	def create_metafile(self, datafile):
		info = collections.defaultdict(lambda: "")

		# Extract datafile in temporary directory and scan for dependencies.
		tmpdir = self.mktemp(directory=True)

		if self.payload_compression == "xz":
			tarfile = tar.InnerTarFileXz.open(datafile)
		else:
			tarfile = tar.InnerTarFile.open(datafile)

		tarfile.extractall(path=tmpdir)
		tarfile.close()

		# Run the dependency tracker.
		self.pkg.track_dependencies(self.builder, tmpdir)

		# Generic package information including Pakfire information.
		info.update({
			"pakfire_version" : PAKFIRE_VERSION,
			"uuid"            : self.pkg.uuid,
			"type"            : "binary",
		})

		# Include distribution information.
		info.update(self.pakfire.distro.info)
		info.update(self.pkg.info)

		# Update package information for string formatting.
		info.update({
			"groups"      : " ".join(self.pkg.groups),
			"prerequires" : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.prerequires]),
			"requires"    : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.requires]),
			"provides"    : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.provides]),
			"conflicts"   : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.conflicts]),
			"obsoletes"   : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.obsoletes]),
			"recommends"  : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.recommends]),
			"suggests"    : "\n".join([PACKAGE_INFO_DEPENDENCY_LINE % d \
				for d in self.pkg.suggests]),
		})

		# Format description.
		description = [PACKAGE_INFO_DESCRIPTION_LINE % l \
			for l in util.text_wrap(self.pkg.description, length=80)]
		info["description"] = "\n".join(description)

		# Build information.
		info.update({
			# Package it built right now.
			"build_time" : int(time.time()),
			"build_id"   : uuid.uuid4(),
		})

		# Installed size (equals size of the uncompressed tarball).
		info.update({
			"inst_size" : self.getsize(datafile),
		})

		metafile = self.mktemp()

		f = open(metafile, "w")
		f.write(PACKAGE_INFO % info)
		f.close()

		return metafile
Exemplo n.º 17
0
    print "Data must be stored on CASTOR ..."
else :
    print "Data must be stored on AFS ..."

if (output_mode!="ROOTMODE"):
    print "Getting files for "+crabdir+" and putting in "+ntupdir
else:
    print "Getting files for "+input_root+" and putting in "+ntupdir
    
if (output_mode!="ROOTMODE"):
    # untar if required
    files=os.listdir(crabdir+"/res")
    for file in files:
        if re.search("tgz", file):
            tarfile.open(file, "r:gz")
            tarfile.extractall(crabdir+"/res")

# hadd all the files together
if (output_mode!="ROOTMODE"):
    hadd=Popen("hadd -f "+ntupdir+"/"+crabdir+".root "+crabdir+"/res/*.root", shell=True)
    hadd.wait()
else:
    result=commands.getoutput("nsls "+input_root+"/ > "+crabdir+"/list.txt")
    result=commands.getoutput("awk '{print \"rfio:"+input_root+"/\" $1}' "+crabdir+"/list.txt > "+crabdir+"/list2.txt");
    hadd=Popen("hadd -f "+crabdir+".root @"+crabdir+"/list2.txt", shell=True)
    hadd.wait()
    print ""
    print "Move "+crabdir+".root to "+output_root+"/"
    result=commands.getoutput("rfcp "+crabdir+".root "+output_root)
    result=commands.getoutput("mv "+crabdir+".root "+ntupdir+"/") 
    print "------------------------------------------------------------"
Exemplo n.º 18
0
def provided_members_archive_handler(filename):
    tar = tarfile.open(filename)
    tarfile.extractall(path=tempfile.mkdtemp(), members=tar)
    tar.close()