예제 #1
0
    def write(self, file_name):
        if not self.data or not os.path.isdir(self.data):
            raise Exception('Must set data before building')

        gzfile = GzipFile(file_name, 'w')
        tar = TarFile(fileobj=gzfile, mode='w')

        buff = BytesIO(json.dumps(self.control).encode())
        info = TarInfo(name='./CONTROL')
        info.size = buff.getbuffer().nbytes
        tar.addfile(tarinfo=info, fileobj=buff)

        if self.init is not None:
            buff = BytesIO(self.init.encode())
            info = TarInfo(name='./INIT')
            info.size = buff.getbuffer().nbytes
            tar.addfile(tarinfo=info, fileobj=buff)

        data = BytesIO()
        datatar = TarFile(fileobj=data, mode='w')
        datatar.add(self.data, '/')
        datatar.close()
        data.seek(0)

        info = TarInfo(name='./DATA')
        info.size = data.getbuffer().nbytes
        tar.addfile(tarinfo=info, fileobj=data)

        tar.close()
        gzfile.close()
예제 #2
0
 def decompressFile(self, directory, fileName):
     filePath = path.join(directory, fileName)
     if filePath.endswith(".zip"):
         with ZipFile(filePath, "r") as zipObj:
             zipObj.extractall(path=directory)
     elif filePath.endswith("tar.gz"):
         tar = TarFile(filePath, "r:gz")
         tar.extractall(path=directory)
         tar.close()
     elif filePath.endswith("tar"):
         tar = TarFile(filePath, "r:")
         tar.extractall(path=directory)
         tar.close()
    def __init__(self, filename):
        MultiThreadDump.__init__(self)

        if filename[-2:] == 'gz':
            tar_file = TarFile(filename, 'r:gz')
        else:
            tar_file = TarFile(filename, 'r')

        for member in tar_file.getmembers():
            if member.isfile():
                tar_member = tar_file.extractfile(member)
                child_name = tar_member.name

                lines = tar_member.readlines()
                self.thread_dumps[child_name] = ThreadDump(lines)
    def _createScriptExtensionTarArchive(self, sourceDirectory,
                                         scriptExtensionName):
        """ Creates a TAR archive for the given script extension. """

        tarFileName = scriptExtensionName + ".tar"
        tarFilePath = os.path.join(self.__buildConfiguration.distDirectory,
                                   tarFileName)
        tarFile = TarFile(tarFilePath, "w")

        for inputDirectory in ["lib", "src"]:
            baseDirectory = os.path.join(sourceDirectory, inputDirectory)
            if os.path.exists(baseDirectory):
                for packageDirName in os.listdir(baseDirectory):
                    pythonModulesToAddList = list()
                    packageDirectory = os.path.join(baseDirectory,
                                                    packageDirName)
                    if os.path.exists(packageDirectory):
                        for walkTuple in os.walk(packageDirectory):
                            directoryPath = walkTuple[0]
                            fileNameList = walkTuple[2]
                            for fileName in fileNameList:
                                if fileName.endswith(
                                        ".py") or fileName == "SCRIPTS":
                                    filePath = os.path.join(
                                        directoryPath, fileName)
                                    pythonModulesToAddList.append(filePath)

                    for pythonModule in pythonModulesToAddList:
                        startPosition = pythonModule.find(baseDirectory) + len(
                            baseDirectory) + 1
                        archiveName = pythonModule[startPosition:]
                        tarFile.add(pythonModule, archiveName)
        tarFile.close()
        if self.verbose:
            print("Created tar archive '%s'." % tarFilePath)
예제 #5
0
def step_impl(context):
    client = docker.from_env()
    csvlint = client.containers.create(
        'gsscogs/csvlint',
        command=f'csvlint -s /tmp/{context.schema_filename}'
    )
    archive = BytesIO()
    context.schema_io.seek(0, SEEK_END)
    schema_size = context.schema_io.tell()
    context.schema_io.seek(0)
    context.csv_io.seek(0, SEEK_END)
    csv_size = context.csv_io.tell()
    context.csv_io.seek(0)
    with TarFile(fileobj=archive, mode='w') as t:
        tis = TarInfo(str(context.schema_filename))
        tis.size = schema_size
        tis.mtime = time.time()
        t.addfile(tis, BytesIO(context.schema_io.getvalue().encode('utf-8')))
        tic = TarInfo(str(context.csv_filename))
        tic.size = csv_size
        tic.mtime = time.time()
        t.addfile(tic, BytesIO(context.csv_io.getvalue().encode('utf-8')))
    archive.seek(0)
    csvlint.put_archive('/tmp/', archive)
    csvlint.start()
    response = csvlint.wait()
    sys.stdout.write(csvlint.logs().decode('utf-8'))
    assert_equal(response['StatusCode'], 0)
예제 #6
0
def pack_archive(request_staging_dir, archive, pr):
    """Create a tar file containing the files that are in the
       MigrationArchive object"""

    # if the file exists then delete it!
    try:
        os.unlink(tar_file_path)
    except:
        pass
    # create the tar file
    tar_file = TarFile(tar_file_path, mode='w')
    logging.debug(("Created TarFile archive file: {}").format(tar_file_path))

    # get the MigrationFiles belonging to this archive
    migration_files = archive.migrationfile_set.all()
    # loop over the MigrationFiles in the MigrationArchive
    for mp in migration_paths:
        # don't add if it's a directory - files under the directory will
        # be added
        if not (os.path.isdir(mp[0])):
            tar_file.add(mp[0], arcname=mp[1])
            logging.debug(
                ("    Adding file to TarFile archive: {}").format(mp[0]))

    tar_file.close()

    ### end of parallelisation

    return tar_file_path
 def decompress (file, password = None) :
     if is_zipfile(file) :
         try :
             fileinfo = ZipInfo.from_file(file)
             filetype = fileinfo.compress_type
         except Exception as e :
             filetype = 0
             print(e)
         try :
             ZipFile(file, compression = filetype).extractall(pwd = password.encode())
             print("Le fichier à été extrait avec succès !")
             print("Vous pouvez ouvrir le fichier souhaitez avec le raccourci Ctrl-C ou Menu->Fichier->Ouvrir.")
         except :
             print("Ce mot de passe ne correspond pas.")
     elif is_tarfile(file) :
         try :
             fileinfo = TarInfo.from_file(file)
             filetype = fileinfo.compress_type
         except Exception as e :
             filetype = 0
             print(e)
         try :
             TarFile(file, tarinfo = TarInfo).extractall(pwd = password.encoded())
             print("Le fichier à été extrait avec succès !")
             print("Vous pouvez ouvrir le fichier souhaitez avec le raccourci Ctrl-C ou Menu->Fichier->Ouvrir.")
         except :
             print("Ce mot de passe ne correspond pas.")
     else :
         print("Ce fichier n'est nis un fichier tar ni un fichier zip.")
예제 #8
0
    def open_zip(self, archive_path):

        if self.archive:
            self.archive.close()

        self.archive_path = archive_path

        filename, file_extension = os.path.splitext(archive_path)

        if file_extension == ".zip" or file_extension == ".cbz":
            self.archive = ZipFile(archive_path, 'r')
            self.archive_type = "zip"
            namelist = self.archive.namelist()
        elif file_extension == ".tar" or file_extension == ".cbt":
            self.archive = TarFile(archive_path, 'r')
            self.archive_type = "tar"
            namelist = self.archive.getnames()
        else:
            raise ("archive not supported")

        # we sort the files by decimal found, excluding directories /
        self.listfile = sorted([x for x in namelist if not x.endswith('/')],
                               key=lambda name: alphanum_key(name))

        self.archive_length = len(self.listfile)
        self.listfile_index = 0
def getDataFromTarfile(tarfile):
    tf = TarFile(tarfile)
    members = [m.name for m in tf.getmembers()]
    if list(set([os.path.exists(x) for x in members])) != [True]:
        tf.extractall()
    tf.close
    return members
예제 #10
0
    def test_rawtar(self):
        """Create a normal tar archive and restore it"""
        raw = BytesIO()
        tarfile = TarFile(mode='w', fileobj=raw)

        testdata = rand_str(20) * 5000

        inf = TarInfo("simpletar")
        fileraw = BytesIO()
        fileraw.write(testdata)
        inf.size = len(testdata)
        fileraw.seek(0)

        tarfile.addfile(inf, fileobj=fileraw)
        tarfile.close()

        raw.seek(0)
        data = raw.read()

        cnt = rand_str(20)
        ret = requests.put(self.make_uri("restore", container=cnt), data=data)

        self.assertEqual(ret.status_code, 201)
        meta, stream = self.conn.object_fetch(self.account, cnt, "simpletar")
        self.assertEqual(
            md5("".join(stream)).hexdigest(),
            md5(testdata).hexdigest())
예제 #11
0
    def repos_install(self, mess, args):
        """ install a plugin repository from the given source or a known public repo (see !repos to find those).
        for example from a known repo : !install err-codebot
        for example a git url : [email protected]:gbin/plugin.git
        or an url towards a tar.gz archive : http://www.gootz.net/plugin-latest.tar.gz
        """
        if not args.strip():
            return "You should have an urls/git repo argument"
        if args in KNOWN_PUBLIC_REPOS:
            args = KNOWN_PUBLIC_REPOS[args][0]  # replace it by the url
        git_path = which('git')

        if not git_path:
            return 'git command not found: You need to have git installed on your system to by able to install git based plugins.'

        if args.endswith('tar.gz'):
            tar = TarFile(fileobj=urlopen(args))
            tar.extractall(path=self.plugin_dir)
            human_name = args.split('/')[-1][:-7]
        else:
            human_name = human_name_for_git_url(args)
            p = subprocess.Popen([git_path, 'clone', args, human_name], cwd=self.plugin_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            feedback = p.stdout.read().decode('utf-8')
            error_feedback = p.stderr.read().decode('utf-8')
            if p.wait():
                return "Could not load this plugin : \n%s\n---\n%s" % (feedback, error_feedback)
        self.add_plugin_repo(human_name, args)
        errors = self.update_dynamic_plugins()
        if errors:
            self.send(mess.getFrom(), 'Some plugins are generating errors:\n' + '\n'.join(errors), message_type=mess.getType())
        else:
            self.send(mess.getFrom(), "A new plugin repository named %s has been installed correctly from %s. Refreshing the plugins commands..." % (human_name, args), message_type=mess.getType())
        self.activate_non_started_plugins()
        return "Plugin reload done."
예제 #12
0
    def _prepare_dataset(cls):
        tar = TarFile(cls.dataset_tar_local)
        tar.extractall(cls.dataset_dir)

        # Get rid of non-image files
        for f in os.listdir(cls.dataset_dir):
            if f[-3:] != 'jpg':
                os.remove(os.path.join(cls.dataset_dir, f))

        images = {
            'up': [],
            'down': os.listdir(cls.dataset_dir),
            'left': [],
            'right': []
        }

        for rot_deg, orientation in zip((90, 180, 270),
                                        ('left', 'up', 'right')):
            for img_file in images['down']:
                img = cv2.imread(os.path.join(cls.dataset_dir, img_file))
                new_img_file = orientation + '_' + img_file
                cv2.imwrite(os.path.join(cls.dataset_dir, new_img_file),
                            rotate_image(img, rot_deg))
                images[orientation].append(new_img_file)

        return images
예제 #13
0
    def _get_hash_tar(self, image_id, indexes, top_most_layer):
        target_file = HashedBytesIo()

        with TarFile(mode='w', fileobj=target_file) as tar_file:
            layers_dict = inspectlayers(self._dc, image_id)

            if isinstance(indexes, slice):
                layers = layers_dict[':layers'][indexes]
            else:
                if indexes:
                    self.assertLess(top_most_layer,
                                    len(indexes),
                                    msg='image: {}; indexes: {}'.format(
                                        image_id, indexes))
                    self.assertGreaterEqual(
                        top_most_layer,
                        -len(indexes),
                        msg='image: {}; indexes: {}'.format(image_id, indexes))
                    self.assertEqual(indexes[top_most_layer],
                                     min(indexes),
                                     msg='image: {}; indexes: {}'.format(
                                         image_id, indexes))

                layers = [layers_dict[':layers'][i] for i in indexes]

            extractlayers(self._dc, layers, tar_file, top_most_layer)

        target_file.seek(0)

        return target_file
예제 #14
0
def extract_files_from(archive_path, output_path=".", open_mode='r'):
    if not os.path.exists(archive_path):
        raise FileNotFoundError

    if not tarfile.is_tarfile(archive_path):
        raise tarfile.TarError

    with TarFile(archive_path, open_mode) as wiki_archive:
        temp_path = output_path + "/temp"
        all_image_files = wiki_archive.getmembers()
        all_image_files_output_paths = [
            output_path + "/" + image.path.split('/')[-1]
            for image in all_image_files if not image.isdir()
        ]
        all_image_files_temp_paths = [
            temp_path + "/" + image.path for image in all_image_files
            if not image.isdir()
        ]

        print("Extracting " + archive_path)

        wiki_archive.extractall(temp_path)

        for temp_file_path, output_path in zip(all_image_files_temp_paths,
                                               all_image_files_output_paths):
            try:
                if 'wiki.mat' in temp_file_path:
                    shutil.move(temp_file_path,
                                Constants.METADATA_OUTPUT_PATH + 'wiki.mat')
                else:
                    shutil.move(temp_file_path, output_path)
            except FileNotFoundError:
                continue

        shutil.rmtree(temp_path)
예제 #15
0
    def install_repo(self, repo):
        if repo in KNOWN_PUBLIC_REPOS:
            repo = KNOWN_PUBLIC_REPOS[repo]['path']  # replace it by the url
        git_path = which('git')

        if not git_path:
            return ('git command not found: You need to have git installed on '
                    'your system to be able to install git based plugins.', )

        # TODO: Update download path of plugin.
        if repo.endswith('tar.gz'):
            tar = TarFile(fileobj=urlopen(repo))
            tar.extractall(path=self.plugin_dir)
            s = repo.split(':')[-1].split('/')[-2:]
            human_name = '/'.join(s).rstrip('.tar.gz')
        else:
            human_name = human_name_for_git_url(repo)
            p = subprocess.Popen([git_path, 'clone', repo, human_name],
                                 cwd=self.plugin_dir,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            feedback = p.stdout.read().decode('utf-8')
            error_feedback = p.stderr.read().decode('utf-8')
            if p.wait():
                return "Could not load this plugin: \n\n%s\n\n---\n\n%s" % (
                    feedback, error_feedback),
        self.add_plugin_repo(human_name, repo)
        return self.update_dynamic_plugins()
예제 #16
0
파일: csvw.py 프로젝트: GSS-Cogs/gss-utils
def run_csvlint(context):
    client = docker.from_env()
    csvlint = client.containers.create(
        'gsscogs/csvlint',
        command=f'csvlint -s /tmp/{context.metadata_filename}')
    archive = BytesIO()
    context.metadata_io.seek(0, SEEK_END)
    metadata_size = context.metadata_io.tell()
    context.metadata_io.seek(0)
    context.csv_io.seek(0, SEEK_END)
    csv_size = context.csv_io.tell()
    context.csv_io.seek(0)
    with TarFile(fileobj=archive, mode='w') as t:
        tis = TarInfo(str(context.metadata_filename))
        tis.size = metadata_size
        tis.mtime = time.time()
        t.addfile(tis, BytesIO(context.metadata_io.read().encode('utf-8')))
        tic = TarInfo(str(context.csv_filename))
        tic.size = csv_size
        tic.mtime = time.time()
        t.addfile(tic, BytesIO(context.csv_io.read().encode('utf-8')))
        if hasattr(context, 'codelists'):
            t.add(Path('features') / 'fixtures' / context.codelists,
                  arcname=context.codelists)
    archive.seek(0)
    csvlint.put_archive('/tmp/', archive)
    csvlint.start()
    response = csvlint.wait()
    sys.stdout.write(csvlint.logs().decode('utf-8'))
    return (response, csvlint.logs().decode('utf-8'))
예제 #17
0
 def _package_model(self, model, key, tmpfn, **kwargs):
     # package the model using corresponding save or save_model method
     model_path = os.path.join(self.model_store.tmppath, key + '.mlflow')
     if isinstance(model, mlflow.models.Model):
         model.save(model, tmpfn)
     elif isinstance(model, mlflow.pyfunc.PythonModel):
         mlflow.pyfunc.save_model(model_path,
                                  python_model=model,
                                  artifacts=kwargs.get('artifacts'))
     elif isinstance(model, str) and (self._is_path(model) or
                                      model.startswith(self.MLFLOW_PREFIX)):
         # a mlflow model local storage
         # https://www.mlflow.org/docs/latest/models.html#storage-format
         if model.startswith(self.MLFLOW_PREFIX):
             model = model.replace(self.MLFLOW_PREFIX, '')
         if model.lower().endswith('mlmodel'):
             model_path = os.path.dirname(model)
         else:
             model_path = model
     else:
         # some supported model flavor perhaps?
         flavor = self._infer_model_flavor(model)
         flavor.save_model(model, model_path, **kwargs)
     with TarFile(tmpfn, mode='w') as tarf:
         tarf.add(model_path, recursive=True)
     return tmpfn
예제 #18
0
def read_tar_tags(tar_path, show_matching_files=False):
    """
    Read the dicom from a tar file
    :param tar_path:
    :return: dictionary of key, value pairs
    >>> read_tar_tags(os.path.join(_res_dir,'dicom.tar'),True)
    ['dicom/10-060.dcm', 'dicom/subdir/subdir/1-051.dcm', 'dicom/subdir/subdir/10-060.dcm']
    >>> all_tags=read_tar_tags(os.path.join(_res_dir,'dicom.tar'))
    >>> len(all_tags)
    179
    >>> all_tags['path']
    '/Users/mader/Dropbox/Informatics/pyqae-master/test/resources/dicom.tar#dicom/10-060.dcm'
    >>> all_tags['Columns']
    512
    """
    with TarFile(tar_path, 'r') as c_tar:
        all_info = c_tar.getmembers()
        all_files = [
            tar_info for tar_info in all_info
            if not tar_info.isdir() and (tar_info.name.endswith('.dcm') and (
                not os.path.basename(tar_info.name).startswith('.')))
        ]
        if show_matching_files:
            return [cfile.name for cfile in all_files]
        if len(all_files) > 0:
            dcm_data = read_dicom_tags(c_tar.extractfile(all_files[0]))
            f_path = '{}#{}'.format(tar_path, all_files[0].name)
        else:
            warn("Must have at least one file", RuntimeWarning)
            dcm_data = None
            f_path = tar_path
        dcm_list = list(dcm_data.items()) if dcm_data is not None else []
        return dict(dcm_list + [('path', f_path)])
예제 #19
0
def _get_checkpoint_dir():
    from appdirs import AppDirs
    dirs = AppDirs(appname="nimare", appauthor="neurostuff", version="1.0")
    checkpoint_dir = os.path.join(dirs.user_data_dir, "ohbm2018_model")
    if not os.path.exists(checkpoint_dir):
        LGR.info('Downloading the model (this is a one-off operation)...')
        url = "https://zenodo.org/record/1257721/files/ohbm2018_model.tar.xz?download=1"
        # Streaming, so we can iterate over the response.
        r = requests.get(url, stream=True)
        f = BytesIO()

        # Total size in bytes.
        total_size = int(r.headers.get('content-length', 0))
        block_size = 1024 * 1024
        wrote = 0
        for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size),
                         unit='MB', unit_scale=True):
            wrote = wrote + len(data)
            f.write(data)
        if total_size != 0 and wrote != total_size:
            raise Exception("Download interrupted")

        f.seek(0)
        LGR.info('Uncompressing the model to %s...'.format(checkpoint_dir))
        tarfile = TarFile(fileobj=LZMAFile(f), mode="r")
        tarfile.extractall(dirs.user_data_dir)
    return checkpoint_dir
예제 #20
0
def analyze():
    try:
        fn = 'temp/{}.tar'.format(
            md5(request.remote_addr.encode()).hexdigest())

        if request.method == 'POST':
            fp = request.files['file']
            fp.save(fn)

            if not is_tarfile(fn):
                return '<script>alert("Uploaded file is not \'tar\' file.");history.back(-1);</script>'

            tf = TarFile(fn)
            tf.extractall(fn.split('.')[0])
            bd1 = fn.split('/')[1].split('.')[0]
            bd2 = fn.split('/')[1]

            return render_template('analyze',
                                   path=bd1,
                                   fn=bd1,
                                   files=tf.getnames())

    except Exception as e:
        return response('Error', 500)

    finally:
        try:
            os.remove(fn)

        except:
            return response('Error', 500)
예제 #21
0
def tarball_images(
    images: List[Image.Image],
    *,
    name: str = None,
    animated: bool = False,
    format: str = "png",
    extras: List[Tuple[str, BytesIO]],
) -> BytesIO:
    fp = BytesIO()
    tar = TarFile(mode="w", fileobj=fp)

    for idx, image in enumerate(images):
        f = BytesIO()
        if animated:
            image[0].save(f, format, append_images=image[1:], save_all=True, loop=0)
        else:
            image.save(f, format)

        f.seek(0)
        if name:
            info = TarInfo(f"{name}_{idx}.{format}")
        else:
            info = TarInfo(f"{idx}.{format}")
        info.size = len(f.getbuffer())
        tar.addfile(info, fileobj=f)

    for extra in extras:
        info = TarInfo(extra[0] or "_.txt")
        info.size = len(extra[1].getbuffer())
        tar.addfile(info, fileobj=extra[1])

    fp.seek(0)
    return fp
예제 #22
0
def create_tar(filepaths: Iterable[str], target_file: str):
    """Create tar file using given filename containing the directory to_compress
	and all of its files.
	"""
    with TarFile(target_file, mode="w") as tar_file:
        for filepath in filepaths:
            tar_file.add(filepath)
예제 #23
0
 def test_tar_experiment_download(self):
     self.assertTrue(all(df.verified for df in self.dfs))
     response = self.client.get(reverse(
         'tardis.tardis_portal.download.streaming_download_experiment',
         args=(self.exp.id, 'tar')))
     with NamedTemporaryFile('w') as tarfile:
         for c in response.streaming_content:
             tarfile.write(c)
         tarfile.flush()
         self.assertEqual(int(response['Content-Length']),
                          os.stat(tarfile.name).st_size)
         tf = TarFile(tarfile.name)
         if settings.EXP_SPACES_TO_UNDERSCORES:
             exp_title = self.exp.title.replace(' ', '_')
         else:
             exp_title = self.exp.title
         exp_title = urllib.parse.quote(exp_title,
                           safe=settings.SAFE_FILESYSTEM_CHARACTERS)
         for df in self.dfs:
             full_path = os.path.join(
                 exp_title,
                 urllib.parse.quote(self.ds.description,
                       safe=settings.SAFE_FILESYSTEM_CHARACTERS),
                 df.directory, df.filename)
             # docker has a file path limit of ~240 characters
             if os.environ.get('DOCKER_BUILD', 'false') != 'true':
                 tf.extract(full_path, '/tmp')
                 self.assertEqual(
                     os.stat(os.path.join('/tmp', full_path)).st_size,
                     int(df.size))
예제 #24
0
  def detect( cls, target_file, magic_type ):
    filename = os.path.basename( target_file.name )

    if not filename.endswith( '.tar.gz'):
      return None

    if not magic_type.startswith( 'gzip compressed data' ):
      return None

    ( filename, _, _ ) = filename.rsplit( '.', 2 )

    gzfile = GzipFile( fileobj=target_file.file, mode='r' )
    tarfile = TarFile( fileobj=gzfile, mode='r' )

    try:
      manifest = json.loads( tarfile.extractfile( 'MANIFEST.json' ).read() )
    except ( KeyError, TypeError, json.JSONDecodeError ):
      return None

    tarfile.close()
    gzfile.close()

    if 'collection_info' not in manifest:
      return None

    try:
      ( namespace, name, version ) = filename.split( '-' )
    except ValueError:
      raise ValueError( 'Unrecognized Galaxy file name Format' )

    return cls( filename, '{0}-{1}'.format( namespace, name ), 'all', version, 'galaxy' )
예제 #25
0
def compress(packagename):
    tar = BytesIO()
    with TarFile(fileobj=tar, mode='w') as tf:
        tf.add(packagename, packagename)
    #TODO: This was originally gzipped, but the gzipped value seems to change on repeated compressions, breaking hashing.
    # Looks like the issue is a timestamp that can be overriden with a parameter, but let's leave it uncompressed for now.
    return tar.getvalue()
예제 #26
0
def pack_archives(archive_list, q):
    """Pack the files in the archive_list into tar files"""
    for archive_info in archive_list:
        # first element is tarfile path / archive location
        tar_file_path = archive_info[0]
        try:
            os.unlink(tar_file_path)
        except:
            pass
        # create the tar file
        tar_file = TarFile(tar_file_path, mode='w')
        logging.debug(
            ("Created TarFile archive file: {}").format(tar_file_path))

        # second element contains the MigrationFiles for this archive
        migration_paths = archive_info[1]
        # loop over the MigrationFiles in the MigrationArchive
        for mp in migration_paths:
            # don't add if it's a directory - files under the directory will
            # be added
            if not (os.path.isdir(mp[0])):
                tar_file.add(mp[0], arcname=mp[1])
                logging.debug(
                    ("    Adding file to TarFile archive: {}").format(mp[0]))
        tar_file.close()
        # calculate digest (element 2), digest format (element 3)
        # and size (element 4) and add to archive
        archive_info[2] = calculate_digest_adler32(tar_file_path)
        archive_info[3] = "ADLER32"
        archive_info[4] = os.stat(tar_file_path).st_size

    q.put(archive_list)
예제 #27
0
  def detect( cls, target_file, magic_type ):
    filename = os.path.basename( target_file.name )

    if not filename.endswith( '.tar.gz'):
      return None

    if not magic_type.startswith( 'gzip compressed data' ):
      return None

    ( filename, _, _ ) = filename.rsplit( '.', 2 )

    try:
      ( package, version ) = filename.rsplit( '-', 1 )  # ie: cinp-0.9.2.tar.gz
    except ValueError:
      return None

    gzfile = GzipFile( fileobj=target_file.file, mode='r' )
    tarfile = TarFile( fileobj=gzfile, mode='r' )

    try:
      info = tarfile.extractfile( '{0}/PKG-INFO'.format( filename ) )
    except KeyError:
      return None

    tarfile.close()
    gzfile.close()

    if info is None:
      return None

    return cls( filename, package, 'all', version, 'python' )
예제 #28
0
def run_ics(group: str, turtle: bytes, extra_files: List[str] = (), extra_data: List[str] = ()):
    client = docker.from_env()
    files = ['data.ttl']
    if len(extra_files) > 0:
        files.extend(extra_files)
    tests = client.containers.create(
        'gsscogs/gdp-sparql-tests',
        command=f'''sparql-test-runner -t /usr/local/tests/{group} -p dsgraph='<urn:x-arq:DefaultGraph>' '''
                f'''{" ".join('/tmp/' + f for f in files)}'''
    )
    archive = BytesIO()
    with TarFile(fileobj=archive, mode='w') as t:
        ttl = TarInfo('data.ttl')
        ttl.size = len(turtle)
        ttl.mtime = time.time()
        t.addfile(ttl, BytesIO(turtle))
        for filename in extra_files:
            actual_path = Path('features') / 'fixtures' / 'extra' / filename
            with actual_path.open('rb') as actual_file:
                extra_file = t.gettarinfo(arcname=filename, fileobj=actual_file)
                t.addfile(extra_file, actual_file)
        for i, add_turtle in enumerate(extra_data):
            filename = f'extra_{i}.ttl'
            add_ttl = TarInfo(filename)
            add_ttl.size = len(add_turtle)
            add_ttl.mtime = time.time()
            t.addfile(add_ttl, BytesIO(add_turtle.encode('utf-8')))
            files.append(filename)
    archive.seek(0)
    tests.put_archive('/tmp/', archive)
    tests.start()
    response = tests.wait()
    sys.stdout.write(tests.logs().decode('utf-8'))
    return response['StatusCode']
예제 #29
0
def copy_to_container(container: "Container", source_path: str,
                      target_path: str) -> None:
    """
    Copy a file into a Docker container

    :param container: Container object
    :param source_path: Source file path
    :param target_path: Target file path (in the container)
    :return:
    """
    # https://github.com/docker/docker-py/issues/1771
    with open(source_path, "rb") as f:
        data = f.read()

    tarinfo = TarInfo(name=os.path.basename(target_path))
    tarinfo.size = len(data)
    tarinfo.mtime = int(time.time())

    stream = BytesIO()
    tar = TarFile(fileobj=stream, mode="w")
    tar.addfile(tarinfo, BytesIO(data))
    tar.close()

    stream.seek(0)
    container.put_archive(path=os.path.dirname(target_path),
                          data=stream.read())
예제 #30
0
 def create_archive(self):
     (handle, path) = mkstemp(dir=self.temp_dir)
     os.close(handle)
     archive = TarFile(path, mode='w')
     archive.add(os.path.join(_common.RSRC, 'full.mp3'), 'full.mp3')
     archive.close()
     return path