Beispiel #1
0
def _factory(version, factory):
    for nick1, nick2, name, vers, package in factories:
        if version == vers and (factory == nick1 or factory == nick2):
            print 'downloading factory archive:'
            chunk_read(package, urlopen('%sarchives/%s' % (url, package)), report_hook=chunk_report)
            dir_name = '%s-%s' % (nick1, version)
            rmtree(dir_name, True)
            with taropen(package, 'r:gz') as tar:
                tar.extractall()
            img_name = '%s/image-%s-%s.zip' % (dir_name, nick1, version)
            print 'creating factory image:'
            with zipopen(img_name, 'w') as img:
                for f in ('system.img', 'userdata.img', 'android-info.txt', 'cache.img', 'recovery.img', 'boot.img'):
                    try:
                        img.write('out/target/product/%s/%s' % (nick2, f), f)
                    except OSError:
                        pass
            with taropen(package, 'w:gz') as tar:
                tar.add(dir_name)
            rmtree(dir_name, True)
            print 'factory image: %s' % package
            break
    else:
        print 'no factory available for the version "%s" and the device "%s"' % (version, factory)
        exit(1)
    exit(0)
Beispiel #2
0
    def stream(self, fileobj, callback=None, sleeptime=5):
        """
        Stream the bundle to the fileobj.

        The fileobj should be an open file like object with 'wb' options.
        If the callback is given then percent complete of the size
        of the bundle will be given to the callback as the first argument.
        """
        notifythread = None
        if callable(callback):
            self._save_total_size()
            notifythread = self._setup_notify_thread(callback, sleeptime)

        tarfile = taropen(None, 'w|', fileobj)
        for file_data in self.file_data:
            tarinfo, fileobj = self._tarinfo_from_file_data(file_data)
            tarfile.addfile(tarinfo, fileobj)
            self.md_obj.append(
                self._build_file_info(file_data, fileobj.hashdigest()))
        md_txt = metadata_encode(self.md_obj)
        md_txt = md_txt if PY2 else bytes(md_txt, 'UTF-8')
        md_fd = StringIO(md_txt)
        md_tinfo = TarInfo('metadata.txt')
        md_tinfo.size = len(md_txt)
        tarfile.addfile(md_tinfo, md_fd)
        tarfile.close()
        self._complete = True

        if callable(callback):
            notifythread.join()
Beispiel #3
0
def get_module_meta_path(module_description):
    """Returns the finder to be appended to sys.meta_path
    module_description is a tuple of 2 elements:
        format: either 'zip', 'tar', 'tar:gz', 'tar:bz' or a string to be used as module name
        content: a base64 encoded string of a zip archive, a tar(gz/bz2) archive or a plain python module
    """
    raw_format = module_description[0].split(':')
    if raw_format[0] in ('zip', 'tar'):
        f = BytesIO()
        f.write(decodestring(module_description[1]))
        f.seek(0)
        if raw_format[0] == 'zip':
            zipf = PyZipFile(f)
            module_dict = dict((splitext(z.filename)[0].replace('/', '.'),
                                zipf.open(z.filename).read())
                               for z in zipf.infolist()
                               if splitext(z.filename)[1] == ".py")
        elif raw_format[0] == 'tar':
            compression = raw_format[1] if len(raw_format) > 1 else ''
            tarf = taropen(fileobj=f, mode="r:" + compression)
            module_dict = dict((splitext(t.name)[0].replace('/', '.'),
                                tarf.extractfile(t.name).read())
                               for t in tarf.getmembers()
                               if splitext(t.name)[1] == ".py")
    else:
        module_dict = {
            module_description[0]: decodestring(module_description[1])
        }
    return Finder(module_dict)
Beispiel #4
0
def download_and_untar(url, tarName, dirName):
    """
    **Description**
        open a url, expecting a tar file and downloads it
        to 'tarName'. Then untar the file inside 'dirName'

    **Parameters**
        url:
            valid URL link
        tarName:
            name of the tar file on the local machine to
            store the archive (full path)
        dirName:
            name of the directory into which one wants to
            untar the archive (full path)

    **Returns**
        nothing
    """
    #-- download part
    page = urlopen(url)
    tar_file = open(tarName, "wb")
    # size of the download unit (here 2**15 = 32768 Bytes)
    block_size = 32768
    dl_size = 0
    file_size = -1
    try:
        file_size = int(page.info()['content-length'])
    except:
        print "could not determine size of tarball so"
        print "no progress bar  on download displayed"
    if file_size > 0:
        while True:
            Buffer = page.read(block_size)
            if not Buffer:
                break
            dl_size += block_size
            tar_file.write(Buffer)
            status = r"Downloaded : %20d Bytes [%4.1f%%]" % (dl_size,
                     dl_size * 100. / file_size)
            status = status + chr(8) * (len(status) + 1)
            print status,
        print ''
    else:
        tar_file.write(page.read())
    tar_file.close()
    #-- untar part
    tar = taropen(tarName)
    file_list = tar.getmembers()
    untar_size = 0
    tar_size = len(file_list)
    for item in file_list:
        tar.extract(item, path=dirName)
        untar_size += 1
        status = r"Untared    : %20i Files [%4.1f%%]" % (untar_size,
                 untar_size * 100. / tar_size)
        status = status + chr(8) * (len(status) + 1)
        print status,
    print ''
    tar.close()
Beispiel #5
0
    def generate_tgz(self):
        fps_ids = self.all_associated_filepath_ids
        with qdb.sql_connection.TRN:
            sql = """SELECT filepath, data_directory_id FROM qiita.filepath
                        WHERE filepath_id IN %s"""
            qdb.sql_connection.TRN.add(sql, [tuple(fps_ids)])

            full_fps = [join(qdb.util.get_mountpoint_path_by_id(mid), f)
                        for f, mid in
                        qdb.sql_connection.TRN.execute_fetchindex()]

            _, analysis_mp = qdb.util.get_mountpoint('analysis')[0]
            tgz = join(analysis_mp, '%d_files.tgz' % self.id)
            try:
                with taropen(tgz, "w:gz") as tar:
                    for f in full_fps:
                        tar.add(f, arcname=basename(f))
                error_txt = ''
                return_value = 0
            except Exception as e:
                error_txt = str(e)
                return_value = 1

            if return_value == 0:
                self._add_file(tgz, 'tgz')

        return '', error_txt, return_value
Beispiel #6
0
def col2python(fn):
    m = re.match(DATFILE_PATTERN, fn)
    if m == None:
        raise Exception(
            "It should be .tar.gz, .tgz or .dat file, got: {}.".format(fn))

    base, ext = m.groups()

    m = re.match(DATSQNC_PATTERN, base)
    if m:
        base, nr = m.groups()
        nr = int(nr)
        fde = '.{:03d}.dat'.format(nr)
    else:
        fde = '.dat'

    # print base + fde
    if ext == 'tar.gz' or ext == 'tgz':
        from tarfile import open as taropen
        with taropen(fn, "r:gz") as tar:
            fc = tar.extractfile(base + '.col')
            fd = tar.extractfile(base + fde)
            ft = tar.extractfile(base + '.txt')
            return readcd(fc, fd), pyminiconf(ft)
    elif ext == 'dat':
        with    open(base + fde,'r') as fd, \
                open(base + '.col','r') as fc, \
                open(base + '.txt','r') as ft:
            return readcd(fc, fd), pyminiconf(ft)
    else:
        raise Exception("I cannot read {} extension.".format(ext))
Beispiel #7
0
    def generate_tgz(self):
        fps_ids = self.all_associated_filepath_ids
        with qdb.sql_connection.TRN:
            sql = """SELECT filepath, data_directory_id FROM qiita.filepath
                        WHERE filepath_id IN %s"""
            qdb.sql_connection.TRN.add(sql, [tuple(fps_ids)])

            full_fps = [
                join(qdb.util.get_mountpoint_path_by_id(mid), f)
                for f, mid in qdb.sql_connection.TRN.execute_fetchindex()
            ]

            _, analysis_mp = qdb.util.get_mountpoint('analysis')[0]
            tgz = join(analysis_mp, '%d_files.tgz' % self.id)
            try:
                with taropen(tgz, "w:gz") as tar:
                    for f in full_fps:
                        tar.add(f, arcname=basename(f))
                error_txt = ''
                return_value = 0
            except Exception as e:
                error_txt = str(e)
                return_value = 1

            if return_value == 0:
                self._add_file(tgz, 'tgz')

        return '', error_txt, return_value
Beispiel #8
0
 def check(self, theme_path):
     try:
         tar = taropen(theme_path)
     except:
         return None
     try:
         config = tar.extractfile("theme")
     except:
         tar.close()
         return None
     name = None
     for line in config.readlines():
         # Split at "=" and clean up the key and value
         if not "=" in line:
             continue
         key, value = line.split("=", 1)
         key = key.strip().lstrip().lower()
         value = value.strip().lstrip()
         # Remove comments
         if "#" in key:
             continue
         # If there is a trailing comment, remove it
         # But avoid removing # if it's in a quote
         sharp = value.find("#")
         if sharp != -1 and value.count("\"", 0, sharp) % 2 == 0 and \
            value.count("'", 0, sharp) % 2 == 0:
                value = value.split("#", 1)[0].strip()
         # Remove quote signs
         if value[0] in ("\"", "'") and value[-1] in ("\"", "'"):
             value = value[1:-1]
         if key == "name":
             name = value
             break
     tar.close()
     return name
Beispiel #9
0
def downloadCifar100(filename):
    url_cifar100 = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
    tmp_cifar100 = 'cifar-100-python.tar.gz'

    print 'Downloading Cifar-100 dataset...'
    urlretrieve(url_cifar100, tmp_cifar100)
    
    print 'Uncompressing tar.gz files...'
    tar = taropen(tmp_cifar100, 'r:gz')
    tar.extract('cifar-100-python/train')
    tar.extract('cifar-100-python/test')
    tar.close()

    data_train = load_pickled('cifar-100-python/train')
    data_test = load_pickled('cifar-100-python/test')
    
    x_tr = np.stack(data_train['data'], axis=0)[:45000]
    t_tr = np.asarray(data_train['fine_labels'])[:45000]
    x_va = np.stack(data_train['data'], axis=0)[45000:]
    t_va = np.asarray(data_train['fine_labels'])[45000:]
    x_te = np.stack(data_test['data'], axis=0)
    t_te = np.asarray(data_test['fine_labels'])
    
    remove('cifar-100-python/train')
    remove('cifar-100-python/test')
    rmdir('cifar-100-python')
    remove(tmp_cifar100)
    
    print 'Storing Cifar-100 data to ''%s''' % (filename)
    np.savez_compressed(filename,
                        x_tr=x_tr, t_tr=t_tr,
                        x_va=x_va, t_va=t_va,
                        x_te=x_te, t_te=t_te)
    print 'Cifar-100 is now ready'
Beispiel #10
0
def downloadCifar10(filename):
    url_cifar10 = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    tmp_cifar10 = 'cifar-10-python.tar.gz'

    print 'Downloading Cifar-10 dataset...'
    urlretrieve(url_cifar10, tmp_cifar10)

    print 'Uncompressing tar.gz files...'
    tar = taropen(tmp_cifar10, 'r:gz')
    tar.extract('cifar-10-batches-py/data_batch_1')
    tar.extract('cifar-10-batches-py/data_batch_2')
    tar.extract('cifar-10-batches-py/data_batch_3')
    tar.extract('cifar-10-batches-py/data_batch_4')
    tar.extract('cifar-10-batches-py/data_batch_5')
    tar.extract('cifar-10-batches-py/test_batch')
    tar.close()

    data_batch_1 = load_pickled('cifar-10-batches-py/data_batch_1')
    data_batch_2 = load_pickled('cifar-10-batches-py/data_batch_2')
    data_batch_3 = load_pickled('cifar-10-batches-py/data_batch_3')
    data_batch_4 = load_pickled('cifar-10-batches-py/data_batch_4')
    data_batch_5 = load_pickled('cifar-10-batches-py/data_batch_5')
    data_test = load_pickled('cifar-10-batches-py/test_batch')

    x_tr = np.concatenate(
        (np.stack(data_batch_1['data'],
                  axis=0), np.stack(data_batch_2['data'], axis=0),
         np.stack(data_batch_3['data'],
                  axis=0), np.stack(data_batch_4['data'], axis=0),
         np.stack(data_batch_5['data'], axis=0)[:5000]),
        axis=0)
    t_tr = np.concatenate(
        (np.asarray(data_batch_1['labels']), np.asarray(
            data_batch_2['labels']), np.asarray(data_batch_3['labels']),
         np.asarray(data_batch_4['labels']), np.asarray(
             data_batch_5['labels'])[:5000]),
        axis=0)
    x_va = np.stack(data_batch_5['data'], axis=0)[5000:]
    t_va = np.asarray(data_batch_5['labels'])[5000:]
    x_te = np.stack(data_test['data'], axis=0)
    t_te = np.asarray(data_test['labels'])

    remove('cifar-10-batches-py/data_batch_1')
    remove('cifar-10-batches-py/data_batch_2')
    remove('cifar-10-batches-py/data_batch_3')
    remove('cifar-10-batches-py/data_batch_4')
    remove('cifar-10-batches-py/data_batch_5')
    remove('cifar-10-batches-py/test_batch')
    rmdir('cifar-10-batches-py')
    remove(tmp_cifar10)

    print 'Storing Cifar-10 data to ' '%s' '' % (filename)
    np.savez_compressed(filename,
                        x_tr=x_tr,
                        t_tr=t_tr,
                        x_va=x_va,
                        t_va=t_va,
                        x_te=x_te,
                        t_te=t_te)
    print 'Cifar-10 is now ready'
    def stream(self, fileobj, callback=None, sleeptime=5):
        """
        Stream the bundle to the fileobj.

        This method is a blocking I/O operation.
        The ``fileobj`` should be an open file like object with 'wb' options.
        An asynchronous callback method MAY be provided via the optional ``callback``
        keyword argument. Periodically, the callback method is provided with the current
        percentage of completion.
        """
        notifythread = None
        if callable(callback):
            self._save_total_size()
            notifythread = self._setup_notify_thread(callback, sleeptime)

        tarfile = taropen(None, 'w|', fileobj)
        for file_data in self.file_data:
            tarinfo, fileobj = self._tarinfo_from_file_data(file_data)
            tarfile.addfile(tarinfo, fileobj)
            self.md_obj.append(
                self._build_file_info(file_data, fileobj.hashdigest()))
        md_txt = bytes(metadata_encode(self.md_obj), 'utf8')
        md_fd = StringIO(md_txt)
        md_tinfo = TarInfo('metadata.txt')
        md_tinfo.size = len(md_txt)
        tarfile.addfile(md_tinfo, md_fd)
        tarfile.close()
        self._complete = True

        if callable(callback):
            notifythread.join()
Beispiel #12
0
def load_context():
    global _animal_sizes
    tar = taropen(PATH_ANIMALS, 'r|gz')
    _animal_sizes = {
        m.name.rsplit('/')[-1]: m.size
        for m in tar.getmembers() if not m.name.endswith('.')
    }
    tar.close()
Beispiel #13
0
 def test_build(self):
     formula_file = join(FORMULA_DIR, 'foo/foo-1.0.py')
     formula_cls = Formula.from_file(formula_file)
     formula = formula_cls()
     package_file = formula.build(self.tmpdir)
     f = taropen(package_file)
     meta = json.load(f.extractfile('.ipkg.meta'))
     self.assertEqual(meta['name'], 'foo')
Beispiel #14
0
def submit_VAMPS(preprocessed_data_id):
    """Submit preprocessed data to VAMPS

    Parameters
    ----------
    preprocessed_data_id : int
        The preprocesssed data id
    """
    preprocessed_data = PreprocessedData(preprocessed_data_id)
    study = Study(preprocessed_data.study)
    sample_template = SampleTemplate(study.sample_template)
    prep_template = PrepTemplate(preprocessed_data.prep_template)

    status = preprocessed_data.submitted_to_vamps_status()
    if status in ('submitting', 'success'):
        raise ValueError("Cannot resubmit! Current status is: %s" % status)

        preprocessed_data.update_vamps_status('submitting')

    # Generating a tgz
    targz_folder = mkdtemp(prefix=qiita_config.working_dir)
    targz_fp = join(targz_folder, '%d_%d_%d.tgz' % (study.id,
                                                    prep_template.id,
                                                    preprocessed_data.id))
    targz = taropen(targz_fp, mode='w:gz')

    # adding sample/prep
    samp_fp = join(targz_folder, 'sample_metadata.txt')
    sample_template.to_file(samp_fp)
    targz.add(samp_fp, arcname='sample_metadata.txt')
    prep_fp = join(targz_folder, 'prep_metadata.txt')
    prep_template.to_file(prep_fp)
    targz.add(prep_fp, arcname='prep_metadata.txt')

    # adding preprocessed data
    for _, fp, fp_type in preprocessed_data.get_filepaths():
        if fp_type == 'preprocessed_fasta':
            targz.add(fp, arcname='preprocessed_fasta.fna')

    targz.close()

    # submitting
    cmd = ("curl -F user=%s -F pass='******' -F uploadFile=@%s -F "
           "press=UploadFile %s" % (qiita_config.vamps_user,
                                    qiita_config.vamps_pass,
                                    targz_fp,
                                    qiita_config.vamps_url))
    obs, _, _ = system_call(cmd)

    exp = ("<html>\n<head>\n<title>Process Uploaded File</title>\n</head>\n"
           "<body>\n</body>\n</html>")

    if obs != exp:
        preprocessed_data.update_vamps_status('failure')
        return False
    else:
        preprocessed_data.update_vamps_status('success')
        return True
Beispiel #15
0
def submit_VAMPS(preprocessed_data_id):
    """Submit preprocessed data to VAMPS

    Parameters
    ----------
    preprocessed_data_id : int
        The preprocesssed data id
    """
    preprocessed_data = PreprocessedData(preprocessed_data_id)
    study = Study(preprocessed_data.study)
    sample_template = SampleTemplate(study.sample_template)
    prep_template = PrepTemplate(preprocessed_data.prep_template)

    status = preprocessed_data.submitted_to_vamps_status()
    if status in ('submitting', 'success'):
        raise ValueError("Cannot resubmit! Current status is: %s" % status)

        preprocessed_data.update_vamps_status('submitting')

    # Generating a tgz
    targz_folder = mkdtemp(prefix=qiita_config.working_dir)
    targz_fp = join(
        targz_folder,
        '%d_%d_%d.tgz' % (study.id, prep_template.id, preprocessed_data.id))
    targz = taropen(targz_fp, mode='w:gz')

    # adding sample/prep
    samp_fp = join(targz_folder, 'sample_metadata.txt')
    sample_template.to_file(samp_fp)
    targz.add(samp_fp, arcname='sample_metadata.txt')
    prep_fp = join(targz_folder, 'prep_metadata.txt')
    prep_template.to_file(prep_fp)
    targz.add(prep_fp, arcname='prep_metadata.txt')

    # adding preprocessed data
    for _, fp, fp_type in preprocessed_data.get_filepaths():
        if fp_type == 'preprocessed_fasta':
            targz.add(fp, arcname='preprocessed_fasta.fna')

    targz.close()

    # submitting
    cmd = ("curl -F user=%s -F pass='******' -F uploadFile=@%s -F "
           "press=UploadFile %s" %
           (qiita_config.vamps_user, qiita_config.vamps_pass, targz_fp,
            qiita_config.vamps_url))
    obs, _, _ = system_call(cmd)

    exp = ("<html>\n<head>\n<title>Process Uploaded File</title>\n</head>\n"
           "<body>\n</body>\n</html>")

    if obs != exp:
        preprocessed_data.update_vamps_status('failure')
        return False
    else:
        preprocessed_data.update_vamps_status('success')
        return True
def index_tar(tarfile):
    tarh = taropen(tarfile, ignore_zeros=True)
    tarsize = check_tar_index(tarfile)
    if tarsize:  # index not created yet
        print '   * TAR index file not found, creating it...'
        out = open(tarfile + 'i', 'w')
        out.write('# TAR size: %d\n' % tarsize)
        for member in tarh:
            out.write('%s\t%d\t%d\n' % (
                member.name.rsplit('_', 1)[0], member.offset_data, member.size))
        out.close()
Beispiel #17
0
 def _build_from_tars(self, for_name: str) -> str:
     archives = [self._last_tar] + self._archives if self._last_tar else self._archives
     for tarname in archives:
         with taropen(tarname) as tarfile:  # type: TarFile
             for taritem in tarfile:  # type: TarInfo
                 filepath = taritem.name
                 if for_name not in filepath:
                     continue
                 tarfile.extract(taritem, EXTRACT_PATH)
                 self._last_tar = tarname
                 return opjoin(EXTRACT_PATH, filepath)
     return None
Beispiel #18
0
    def _unpack_libpython(self):
        from tarfile import open as taropen

        for tag in libpython_tags:
            dest = path.join(libpython_dists, 'cpython-' + tag)
            if path.isdir(dest):
                continue

            print('Unpack', tag)
            src = path.join(libpython_dists, 'python-' + tag) + '.tar.gz'

            with taropen(src, mode='r:gz') as tar:
                tar.extractall(libpython_dists)
Beispiel #19
0
 def _build_from_tars(self, for_name: str) -> str:
     archives = [self._last_tar
                 ] + self._archives if self._last_tar else self._archives
     for tarname in archives:
         with taropen(tarname) as tarfile:  # type: TarFile
             for taritem in tarfile:  # type: TarInfo
                 filepath = taritem.name
                 if for_name not in filepath:
                     continue
                 tarfile.extract(taritem, EXTRACT_PATH)
                 self._last_tar = tarname
                 return opjoin(EXTRACT_PATH, filepath)
     return None
Beispiel #20
0
    def __init__(self, path_to_tar):
        tar = taropen(path_to_tar)
        self.pixbufs = {}
        no_passive_maximize = None
        for button in ('close', 'minimize', 'maximize', 'restore'):
            try:
                self.pixbufs['%s_normal_active'%button] = self.load_pixbuf(tar, 'active/%s_normal.png'%button)
            except:
                if button == 'restore':
                    self.pixbufs['%s_normal_active'%button] = self.pixbufs['maximize_normal_active']
                    self.pixbufs['%s_prelight_active'%button] = self.pixbufs['maximize_prelight_active']
                    self.pixbufs['%s_pressed_active'%button] = self.pixbufs['maximize_pressed_active']
                    self.pixbufs['%s_normal_passive'%button] = self.pixbufs['maximize_normal_passive']
                    self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['maximize_prelight_passive']
                    self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['maximize_pressed_passive']
                    continue
                else:
                    raise
            try:
                self.pixbufs['%s_prelight_active'%button] = self.load_pixbuf(tar, 'active/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_prelight_active'%button] = self.pixbufs['%s_normal_active'%button]
            try:
                self.pixbufs['%s_pressed_active'%button] = self.load_pixbuf(tar, 'active/%s_pressed.png'%button)
            except:
                self.pixbufs['%s_pressed_active'%button] = self.pixbufs['%s_prelight_active'%button]

            try:
                self.pixbufs['%s_normal_passive'%button] = self.load_pixbuf(tar, 'passive/%s_normal.png'%button)
            except:
                if button == 'maximize':
                    no_passive_maximize = True
                if button == 'restore' and not no_passive_maximize:
                    self.pixbufs['%s_normal_passive'%button] = self.pixbufs['maximize_normal_passive']
                    self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['maximize_prelight_passive']
                    self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['maximize_pressed_passive']
                    continue
                self.pixbufs['%s_normal_passive'%button] = self.pixbufs['%s_normal_active'%button]
                self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['%s_prelight_active'%button]
                self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['%s_pressed_active'%button]
                continue
            try:
                self.pixbufs['%s_prelight_passive'%button] = self.load_pixbuf(tar, 'passive/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['%s_normal_passive'%button]
            try:
                self.pixbufs['%s_pressed_passive'%button] = self.load_pixbuf(tar, 'passive/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['%s_prelight_passive'%button]

        tar.close()
Beispiel #21
0
    def __init__(self, path_to_tar):
        tar = taropen(path_to_tar)
        self.pixbufs = {}
        no_passive_maximize = None
        for button in ('close', 'minimize', 'maximize', 'restore'):
            try:
                self.pixbufs['%s_normal_active'%button] = self.load_pixbuf(tar, 'active/%s_normal.png'%button)
            except:
                if button == 'restore':
                    self.pixbufs['%s_normal_active'%button] = self.pixbufs['maximize_normal_active']
                    self.pixbufs['%s_prelight_active'%button] = self.pixbufs['maximize_prelight_active']
                    self.pixbufs['%s_pressed_active'%button] = self.pixbufs['maximize_pressed_active']
                    self.pixbufs['%s_normal_passive'%button] = self.pixbufs['maximize_normal_passive']
                    self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['maximize_prelight_passive']
                    self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['maximize_pressed_passive']
                    continue
                else:
                    raise
            try:
                self.pixbufs['%s_prelight_active'%button] = self.load_pixbuf(tar, 'active/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_prelight_active'%button] = self.pixbufs['%s_normal_active'%button]
            try:
                self.pixbufs['%s_pressed_active'%button] = self.load_pixbuf(tar, 'active/%s_pressed.png'%button)
            except:
                self.pixbufs['%s_pressed_active'%button] = self.pixbufs['%s_prelight_active'%button]

            try:
                self.pixbufs['%s_normal_passive'%button] = self.load_pixbuf(tar, 'passive/%s_normal.png'%button)
            except:
                if button == 'maximize':
                    no_passive_maximize = True
                if button == 'restore' and not no_passive_maximize:
                    self.pixbufs['%s_normal_passive'%button] = self.pixbufs['maximize_normal_passive']
                    self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['maximize_prelight_passive']
                    self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['maximize_pressed_passive']
                    continue
                self.pixbufs['%s_normal_passive'%button] = self.pixbufs['%s_normal_active'%button]
                self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['%s_prelight_active'%button]
                self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['%s_pressed_active'%button]
                continue
            try:
                self.pixbufs['%s_prelight_passive'%button] = self.load_pixbuf(tar, 'passive/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_prelight_passive'%button] = self.pixbufs['%s_normal_passive'%button]
            try:
                self.pixbufs['%s_pressed_passive'%button] = self.load_pixbuf(tar, 'passive/%s_prelight.png'%button)
            except:
                self.pixbufs['%s_pressed_passive'%button] = self.pixbufs['%s_prelight_passive'%button]

        tar.close()
Beispiel #22
0
def create_archive(outdir: str, archive_type: str):

    screenshots_files = glob(f"{outdir}{sep}*.png")
    if archive_type == "zip":
        with zfopen(f"{outdir}.zip", 'w') as zip_archive:
            for sf in screenshots_files:
                zip_archive.write(sf)
        print(f"[+] Created zip archive {outdir}.zip")

    elif archive_type == "tar":
        with taropen(f"{outdir}.tar", 'w') as tar_archive:
            for sf in screenshots_files:
                tar_archive.add(sf)
        print(f"[+] Created tar archive {outdir}.zip")
def unfoldArchives(workingPath, archives):
    submFolders = []
    opener = {
        'tar.gz': lambda tgzFile: taropen(tgzFile, 'r:gz'),
        'tar': lambda tFile: taropen(tFile, 'r:'),
        'zip': lambda zFile: ZipFile(zFile, 'r'),
        'rar': lambda rFile: RarFile(rFile, 'r')
    }

    print('Start unfolding...', end='')
    for archive in archives:
        for k, v in opener.items():
            if archive.endswith(k):
                with v(archive) as locFile:
                    path = os.path.join(
                        workingPath,
                        os.path.basename(archive)[0:-len('.' + k)])
                    locFile.extractall(path)
                    submFolders.append(path)

    print('[OK]')

    return submFolders
Beispiel #24
0
    def download(self):
        tar_dbfile = abspath(join(self.data_folder, 'GeoLite2-City.tar.gz'))
        url = 'http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz'
        downloaded = False

        retry_counter = 0

        while not downloaded:
            self.logger.info('Downloading GeoLite2 from %s', url)
            try:
                urlretrieve(url, tar_dbfile)
                downloaded = True
            except HTTPError as e:
                self.logger.error(
                    "Problem downloading new GeoLite2 DB... Trying again. Error: %s",
                    e)
                sleep(2)
                retry_counter = (retry_counter + 1)

                if retry_counter >= 3:
                    self.logger.error(
                        "Retried downloading the new GeoLite2 DB 3 times and failed... Aborting!"
                    )
                    result_status = 1
                    return result_status
        try:
            remove(self.dbfile)
        except FileNotFoundError:
            self.logger.warning(
                "Cannot remove GeoLite2 DB as it does not exist!")

        self.logger.debug("Opening GeoLite2 tar file : %s", tar_dbfile)

        tar = taropen(tar_dbfile, 'r:gz')

        for files in tar.getmembers():
            if 'GeoLite2-City.mmdb' in files.name:
                self.logger.debug('"GeoLite2-City.mmdb" FOUND in tar file')
                files.name = basename(files.name)
                tar.extract(files, self.data_folder)
                self.logger.debug('%s has been extracted to %s', files,
                                  self.data_folder)
        tar.close()
        try:
            remove(tar_dbfile)
            self.logger.debug('Removed the GeoLite2 DB TAR file.')
        except FileNotFoundError:
            self.logger.warning(
                "Cannot remove GeoLite2 DB TAR file as it does not exist!")
Beispiel #25
0
def download_and_extract_aosp(version, force=False):
    # get package name
    package = None
    for v, p in aosps:
        if v == version:
            package = p
            break
    if package is None:
        print 'version "%s" does not exist' % version
        exit(1)
    # check directory content
    if force:
        for d in (d for d in listdir('.') if isdir(d) and not (d.startswith('.') or d == 'tests')):
            rmtree(d)
    else:
        for _ in (d for d in listdir('.') if isdir(d) and not (d.startswith('.') or d == 'tests')):
            print 'directory not empty'
            exit(2)
    print 'downloading Android package:'
    chunk_read(package, urlopen(url + package), report_hook=chunk_report)
    print 'extracting content'
    taropen(package).extractall()
    # clean
    remove(package)
Beispiel #26
0
 def check(self, path_to_tar):
     #TODO: Optimize this
     tar = taropen(path_to_tar)
     config = tar.extractfile("config")
     parser = make_parser()
     theme_handler = ThemeHandler()
     try:
         parser.setContentHandler(theme_handler)
         parser.parse(config)
     except:
         config.close()
         tar.close()
         raise
     config.close()
     tar.close()
     return theme_handler.get_name()
Beispiel #27
0
 def geo(self):
     for g in self.geo_urls:
         n = self.geo_urls[g].split('/')[-1]
         print(f'Downloading {n}')
         data = get(self.geo_urls[g])
         with open(join(self.path, n), 'wb') as f:
             f.write(data.content)
         t = taropen(join(self.path, n), 'r')
         for m in t.getmembers():
             if '.mmdb' in m.name:
                 t.extract(m, self.path)
                 move(join(self.path, m.name),
                      join(self.path,
                           m.name.split('/')[-1]))
                 rmtree(join(self.path, m.name.split('/')[0]))
         t.close()
         unlink(join(self.path, n))
Beispiel #28
0
def generate_sortmerna_tgz(out_dir):
    """Generates the sortmerna failures tgz command

    Parameters
    ----------
    out_dir : str
        The job output directory

    Returns
    -------
    str
        The sortmerna failures tgz command
    """
    to_tgz = join(out_dir, 'sortmerna_picked_otus')
    tgz = to_tgz + '.tgz'
    with taropen(tgz, "w:gz") as tar:
        tar.add(to_tgz, arcname=basename(to_tgz))
Beispiel #29
0
def targz(tgzfile, srcdir):
    """
	Do a "tar zcf"-like for a directory
	@params:
		`tgzfile`: the final .tgz file
		`srcdir`:  the source directory
	"""
    from tarfile import open as taropen
    from glob import glob
    from os import chdir, getcwd
    cwd = getcwd()
    tar = taropen(tgzfile, 'w:gz')
    chdir(srcdir)
    for name in glob('./*'):
        tar.add(name)
    tar.close()
    chdir(cwd)
Beispiel #30
0
def generate_sortmerna_tgz(out_dir):
    """Generates the sortmerna failures tgz command

    Parameters
    ----------
    out_dir : str
        The job output directory

    Returns
    -------
    str
        The sortmerna failures tgz command
    """
    to_tgz = join(out_dir, 'sortmerna_picked_otus')
    tgz = to_tgz + '.tgz'
    with taropen(tgz, "w:gz") as tar:
        tar.add(to_tgz, arcname=basename(to_tgz))
Beispiel #31
0
def get_root_directory(filename):
    """Detects if the archive has a root directory or not"""
    if is_zipfile(filename):
        members = [member.filename for member in ZipFile(filename).infolist()]
    elif is_tarfile(filename):
        members = [member.name for member in taropen(filename).getmembers()]
    else:
        return None

    root_directory = members[0]
    if root_directory[-1] != '/':
        root_directory = root_directory + '/'

    for member in members[1:]:
        if not member.startswith(root_directory):
            return None

    return root_directory
Beispiel #32
0
 def check(path_to_tar):
     tar = taropen(path_to_tar)
     try:
         for button in ('close', 'minimize', 'maximize'):
             # Try loading the pixbuf to if everything is OK
             f = tar.extractfile('active/%s_normal.png'%button)
             buffer=f.read()
             pixbuf_loader=gtk.gdk.PixbufLoader()
             pixbuf_loader.write(buffer)
             pixbuf_loader.close()
             f.close()
             pixbuf_loader.get_pixbuf()
     except KeyError:
         tar.close()
         print "Nambar couldn't read the image %s from theme file %s"%('active/%s_normal.png'%button, path_to_tar)
         print "This theme will be ignored."
         return False
     tar.close()
     return True
Beispiel #33
0
 def check(path_to_tar):
     tar = taropen(path_to_tar)
     try:
         for button in ('close', 'minimize', 'maximize'):
             # Try loading the pixbuf to if everything is OK
             f = tar.extractfile('active/%s_normal.png'%button)
             buffer=f.read()
             pixbuf_loader=gtk.gdk.PixbufLoader()
             pixbuf_loader.write(buffer)
             pixbuf_loader.close()
             f.close()
             pixbuf_loader.get_pixbuf()
     except KeyError:
         tar.close()
         print "Nambar couldn't read the image %s from theme file %s"%('active/%s_normal.png'%button, path_to_tar)
         print "This theme will be ignored."
         return False
     tar.close()
     return True
def get_module_meta_path(module_description):
    """Returns the finder to be appended to sys.meta_path
    module_description is a tuple of 2 elements:
        format: either 'zip', 'tar', 'tar:gz', 'tar:bz' or a string to be used as module name
        content: a base64 encoded string of a zip archive, a tar(gz/bz2) archive or a plain python module
    """
    raw_format = module_description[0].split(':')
    if raw_format[0] in ('zip', 'tar'):
        f = BytesIO()
        f.write(decodestring(module_description[1]))
        f.seek(0)
        if raw_format[0] == 'zip':
            zipf = PyZipFile(f)
            module_dict = dict((splitext(z.filename)[0].replace('/', '.'), zipf.open(z.filename).read()) for z in zipf.infolist() if splitext(z.filename)[1] == ".py")
        elif raw_format[0] == 'tar':
            compression = raw_format[1] if len(raw_format) > 1 else ''
            tarf = taropen(fileobj=f, mode="r:" + compression)
            module_dict = dict((splitext(t.name)[0].replace('/', '.'), tarf.extractfile(t.name).read()) for t in tarf.getmembers() if splitext(t.name)[1] == ".py")
    else:
        module_dict = {module_description[0]: decodestring(module_description[1])}
    return Finder(module_dict)
Beispiel #35
0
    def download_sbo(self, pkg: str, pkgdata: list) -> None:
        """
        download SlackBuild script and sources from 'sbo' repository
        """
        if self.os_ver == 'current':
            self.os_ver = self.spman_conf['OS_LAST_RELEASE']

        fname = '{0}.tar.gz'.format(pkg)
        url = '{0}{1}/{2}/{3}'.format(self.repo_url, self.os_ver, pkgdata[1],
                                      fname)
        # downloading SlackBuild script
        Download(url, self.dest).start()
        # unpack SlackBuild archive
        archive = '{0}{1}'.format(self.dest, fname)
        tar = taropen(archive)
        tar.extractall(self.dest)
        # remove archive
        remove(archive)
        # download sources
        for url in pkgdata[7]:
            Download(url, '{0}{1}/'.format(self.dest, pkg)).start()
Beispiel #36
0
def getPythonDocs(url):
    with urlopen("".join(url)) as resp:
        print("Sending HTTP request...")
        if resp.code == 200:
            arguments = vars(args().parse_args())

            for _, v in arguments.items():
                tarbz2 = "".join(url).split("/")[5]
                with open(tarbz2, "wb") as tar:
                    print(f"Downloading {tarbz2} to {v}")
                    tar.write(resp.read())

                    rename(tarbz2, f"{v}/{tarbz2}")

                    # After moving bz2, extract it! This function returns a TarFile obj :D)
                    with taropen(name=f"{v}/{tarbz2}", mode="r:bz2") as bz2:
                        print("Extracting bz2..")
                        bz2.extractall(path=v)
                        print("Cleaning up..")
                        remove(f"{v}/{tarbz2}")
                        print("Done!")

                        # Make a log of this in database
                        cur.execute("INSERT INTO pdfdocs VALUES (?, ?, ?)",
                                    (uname, v, date))
                        print(
                            "Creating log... This is only local, thought it would be cool to implement :P"
                        )
                        queryPdfDocsTable()

                        # Save changes!
                        database.commit()

                        # Close connection!
                        database.close()
        else:
            print(
                "This function will not work as it is supposed to because the python docs cannot be downloaded"
            )
Beispiel #37
0
    def reload(self):
        tar = taropen(self.theme_path)
        config = tar.extractfile("config")

        # Parse
        parser = make_parser()
        theme_handler = ThemeHandler()
        parser.setContentHandler(theme_handler)
        parser.parse(config)
        self.theme = theme_handler.get_dict()

        # Name
        self.name = theme_handler.get_name()

        self.types = theme_handler.get_types()

        # Pixmaps
        self.surfaces = {}
        pixmaps = {}
        if self.theme.has_key("pixmaps"):
            pixmaps = self.theme["pixmaps"]["content"]
        for (type_, d) in pixmaps.items():
            if type_ == "pixmap_from_file":
                self.surfaces[d["name"]] = self.load_surface(tar, d["file"])

        # Popup style
        ps = self.theme.get("popup_style", {})
        self.default_popup_style = ps.get("file_name", "dbx.tar.gz")

        # Colors
        self.color_names = {}
        self.default_colors = {}
        self.default_alphas = {}
        colors = {}
        if self.theme.has_key("colors"):
            colors = self.theme["colors"]["content"]
        for i in range(1, 9):
            c = "color%s"%i
            if colors.has_key(c):
                d = colors[c]
                if d.has_key("name"):
                    self.color_names[c] = d["name"]
                if d.has_key("default"):
                    if self.test_color(d["default"]):
                        self.default_colors[c] = d["default"]
                    else:
                        logger.warning("Theme error: %s\'s default for" % c + \
                                       " theme %s cannot be read." % self.name)
                        logger.info("A default color should start with an " + \
                                    "\"#\" and be followed by six " + \
                                    "hex-digits, for example \"#FF13A2\".")
                if d.has_key("opacity"):
                    alpha = d["opacity"]
                    if self.test_alpha(alpha):
                        self.default_alphas[c] = alpha
                    else:
                        logger.warning("Theme error: %s\'s opacity" % c + \
                                       " for theme %s" % self.name + \
                                       " cannot be read.")
                        logger.info("The opacity should be a number " + \
                                    "(\"0\"-\"100\") or the words " + \
                                    "\"not used\".")


        # Sets
        self.sets = {}
        sets = {}
        if self.theme.has_key("sets"):
            sets = self.theme["sets"]["content"]
        for (type_, d) in sets.items():
            if type_ == "set":
                self.sets[d["name"]] = d
        config.close()
        tar.close()

        # Inform rest of dockbar about the reload.
        self.globals.theme_name = self.name
        self.globals.update_colors(self.name,
                                   self.default_colors, self.default_alphas)
        self.globals.update_popup_style(self.name, self.default_popup_style)
        self.emit("theme_reloaded")
Beispiel #38
0
def archive(files):
    with closing(StringIO()) as buffer:
        with taropen(mode='w', fileobj=buffer) as tar:
            for f in files:
                tar.add(f, arcname=basename(f))
        return b64encode(buffer.getvalue())
Beispiel #39
0
#coding=utf-8
'''
Created on 2012-6-6

@author: zhaojp
'''

from tarfile import open as taropen;

tfile = taropen('D:/test.tar.gz', 'w:gz');#打开一个tarfile输出流 压缩格式为gz

tfile.debug = 3;#设置生成调试信息级别 0为不生成 3生成全部调试信息
tfile.errorlevel = 3;#设置错误级别 如果为0则忽略全部错误 如果设置为1 错误将会导致IOError或OSError 

tfile.add('F:/LIBS/checkstyle-5.5/README', 'README');
tfile.add('F:/LIBS/checkstyle-5.5/commons-logging-1.1.1.jar', 'jar/commons-logging-1.1.1.jar');#添加一个文件到压缩文件中 并给添加到压缩文件中的文件起别名或者更改存储路径
tfile.add('F:/LIBS/checkstyle-5.5/checks', 'checks', True);#将checks目录中全部的内容递归添加到压缩文件中

tfile.close();#关闭tar归档文件

tfile = taropen('D:/test.tar.gz', 'r:gz');#打开一个tarfile输入流 压缩格式为gz

tfile.extract('jar/commons-logging-1.1.1.jar', 'D:/');#从归档文件中提取出一个成员输出到指定的目录中

tme = tfile.getmember('README');#查找归档成员name 返回包含相关信息的TarInfo对象
fin = tfile.extractfile('README');#从归档文件中提取成员 返回一个只读类文件 内容能够通过read() readline() readlines() seek() 和 tell()操作读取 member可以是文档成员的名称 或是TarInfo实例
names = tfile.getnames();#返回归档文件的成员名称列表
members = tfile.getmembers();#返回所有归档文件成员的TarInfo对象列表

for name in names:print(name, end=' ');
print('\n', '-' * 30)
Beispiel #40
0
def submit_VAMPS(artifact_id):
    """Submit artifact to VAMPS

    Parameters
    ----------
    artifact_id : int
        The artifact id

    Raises
    ------
    ComputeError
        - If the artifact cannot be submitted to VAMPS
        - If the artifact is associated with more than one prep template
    """
    artifact = Artifact(artifact_id)
    if not artifact.can_be_submitted_to_vamps:
        raise ComputeError("Artifact %d cannot be submitted to VAMPS"
                           % artifact_id)
    study = artifact.study
    sample_template = study.sample_template
    prep_templates = artifact.prep_templates
    if len(prep_templates) > 1:
        raise ComputeError(
            "Multiple prep templates associated with the artifact: %s"
            % artifact_id)
    prep_template = prep_templates[0]

    # Also need to check that is not submitting (see item in #1523)
    if artifact.is_submitted_to_vamps:
        raise ValueError("Cannot resubmit artifact %s to VAMPS!" % artifact_id)

    # Generating a tgz
    targz_folder = mkdtemp(prefix=qiita_config.working_dir)
    targz_fp = join(targz_folder, '%d_%d_%d.tgz' % (study.id,
                                                    prep_template.id,
                                                    artifact_id))
    targz = taropen(targz_fp, mode='w:gz')

    # adding sample/prep
    samp_fp = join(targz_folder, 'sample_metadata.txt')
    sample_template.to_file(samp_fp)
    targz.add(samp_fp, arcname='sample_metadata.txt')
    prep_fp = join(targz_folder, 'prep_metadata.txt')
    prep_template.to_file(prep_fp)
    targz.add(prep_fp, arcname='prep_metadata.txt')

    # adding preprocessed data
    for _, fp, fp_type in artifact.filepaths:
        if fp_type == 'preprocessed_fasta':
            targz.add(fp, arcname='preprocessed_fasta.fna')

    targz.close()

    # submitting
    cmd = ("curl -F user=%s -F pass='******' -F uploadFile=@%s -F "
           "press=UploadFile %s" % (qiita_config.vamps_user,
                                    qiita_config.vamps_pass,
                                    targz_fp,
                                    qiita_config.vamps_url))
    obs, stderr, rv = system_call(cmd)
    if rv != 0:
        error_msg = ("Error:\nStd output:%s\nStd error:%s" % (obs, stderr))
        raise ComputeError(error_msg)

    exp = ("<html>\n<head>\n<title>Process Uploaded File</title>\n</head>\n"
           "<body>\n</body>\n</html>")

    if obs != exp:
        return False
    else:
        artifact.is_submitted_to_vamps = True
        return True
Beispiel #41
0
    def reload(self):
        tar = taropen(self.theme_path)
        config = tar.extractfile('config')

        # Parse
        parser = make_parser()
        theme_handler = ThemeHandler()
        parser.setContentHandler(theme_handler)
        parser.parse(config)
        self.theme = theme_handler.get_dict()

        # Name
        self.name = theme_handler.get_name()

        self.types = theme_handler.get_types()

        # Pixmaps
        self.surfaces = {}
        pixmaps = {}
        if self.theme.has_key('pixmaps'):
            pixmaps = self.theme['pixmaps']['content']
        for (type, d) in pixmaps.items():
            if type == 'pixmap_from_file':
                self.surfaces[d['name']] = self.load_surface(tar, d['file'])

        # Colors
        self.color_names = {}
        self.default_colors = {}
        self.default_alphas = {}
        colors = {}
        if self.theme.has_key('colors'):
            colors = self.theme['colors']['content']
        for i in range(1, 9):
            c = 'color%s'%i
            if colors.has_key(c):
                d = colors[c]
                if d.has_key('name'):
                    self.color_names[c] = d['name']
                if d.has_key('default'):
                    if self.test_color(d['default']):
                        self.default_colors[c] = d['default']
                    else:
                        print 'Theme error: %s\'s default for theme %s cannot be read.'%(c, self.name)
                        print 'A default color should start with an "#" ' + \
                              'and be followed by six hex-digits,' + \
                              'for example "#FF13A2".'
                if d.has_key('opacity'):
                    alpha = d['opacity']
                    if self.test_alpha(alpha):
                        self.default_alphas[c] = alpha
                    else:
                        print 'Theme error: ' + \
                              '%s\'s opacity for theme %s'%(c, self.name) + \
                              ' cannot be read.'
                        print 'The opacity should be a number ("0"-"100") ' + \
                              'or the words "not used".'

        config.close()
        tar.close()

        # Inform rest of dockbar about the reload.
        self.globals.theme_name = self.name
        self.globals.update_colors(self.name,
                                   self.default_colors, self.default_alphas)
        self.emit('theme_reloaded')
Beispiel #42
0
def write_matrix(inbam,
                 resolution,
                 biases,
                 outdir,
                 filter_exclude=(1, 2, 3, 4, 6, 7, 8, 9, 10),
                 normalizations=('decay', ),
                 region1=None,
                 start1=None,
                 end1=None,
                 clean=True,
                 region2=None,
                 start2=None,
                 end2=None,
                 extra='',
                 half_matrix=True,
                 nchunks=100,
                 tmpdir='.',
                 append_to_tar=None,
                 ncpus=8,
                 cooler=False,
                 verbose=True):
    """
    Writes matrix file from a BAM file containing interacting reads. The matrix
    will be extracted from the genomic BAM, the genomic coordinates of this
    matrix will be at the intersection of two regions defined byt the parameters
    region1, start1, end1 and region2, start2, end2. If the wanted matrix is
    than the second coodinate can be skipped.

    :param inbam: path to BAM file (generated byt TADbit)
    :param resolution: resolution at which we want to write the matrix
    :param biases: path to a file with biases
    :param outdir: path to a folder where to write output files
    :param (1, 2, 3, 4, 6, 7, 8, 9, 10) filter exclude: filters to define the
       set of valid pair of reads.
    :param ('decay',) normalization: tuple with normalizations to use, can be 'decay',
       'norm' or/and 'raw'. One file per normalization will be created.
    :param None region1: chromosome name of the first region from which to
       extract the matrix
    :param None region1: chromosome name of the first region from which to
       extract the matrix
    :param None start1: start coordinate of the first region from which to
       extract the matrix
    :param None end1: end coordinate of the first region from which to
       extract the matrix
    :param None region2: chromosome name of the second region from which to
       extract the matrix
    :param None start2: start coordinate of the second region from which to
       extract the matrix
    :param None end2: end coordinate of the second region from which to
       extract the matrix
    :param True half_matrix: writes only half of the matrix (and the diagonal)
    :param '.' tmpdir: where to write temporary files
    :param None append_to_tar: path to a TAR file were generated matrices will
       be written directly
    :param 8 ncpus: number of cpus to use to read the BAM file
    :param True verbose: speak
    :param 100 nchunks: maximum number of chunks into which to cut the BAM

    :returns: path to output files
    """
    if start1 is not None and end1:
        if end1 - start1 < resolution:
            raise Exception(
                'ERROR: region1 should be at least as big as resolution')
    if start2 is not None and end2:
        if end2 - start2 < resolution:
            raise Exception(
                'ERROR: region2 should be at least as big as resolution')

    if isinstance(normalizations, list):
        normalizations = tuple(normalizations)
    elif isinstance(normalizations, str):
        normalizations = tuple([normalizations])

    if not isinstance(filter_exclude, int):
        filter_exclude = filters_to_bin(filter_exclude)

    regions, rand_hash, bin_coords, chunks = read_bam(inbam,
                                                      filter_exclude,
                                                      resolution,
                                                      ncpus=ncpus,
                                                      region1=region1,
                                                      start1=start1,
                                                      end1=end1,
                                                      region2=region2,
                                                      start2=start2,
                                                      end2=end2,
                                                      tmpdir=tmpdir,
                                                      nchunks=nchunks,
                                                      verbose=verbose)

    if region1:
        regions = [region1]
        if region2:
            regions.append(region2)

    bamfile = AlignmentFile(inbam, 'rb')
    sections = OrderedDict(
        zip(bamfile.references, [x for x in bamfile.lengths]))

    if biases:
        bias1, bias2, decay, bads1, bads2 = get_biases_region(
            biases, bin_coords)
    elif normalizations != ('raw', ):
        raise Exception(
            'ERROR: should provide path to file with biases (pickle).')
    else:
        bads1 = bads2 = {}

    start_bin1, start_bin2 = bin_coords[::2]
    if verbose:
        printime('  - Writing matrices')
    # define output file name
    name = _generate_name(regions, (start1, start2), (end1, end2), resolution)

    # prepare file header
    outfiles = []
    if cooler:
        if 'h5py' not in modules:
            raise Exception(
                'ERROR: cooler output is not available. Probably ' +
                'you need to install h5py\n')
        if 'decay' in normalizations or 'raw&decay' in normalizations:
            raise Exception(
                'ERROR: decay and raw&decay matrices cannot be exported '
                'to cooler format. Cooler only accepts weights per column/row')
        fnam = 'raw_%s_%s%s.mcool' % (name, nicer(resolution).replace(' ', ''),
                                      ('_' + extra) if extra else '')
        if os.path.exists(os.path.join(outdir, fnam)):
            os.remove(os.path.join(outdir, fnam))
        out_raw = cooler_file(os.path.join(outdir, fnam), resolution, sections,
                              regions)
        out_raw.create_bins()
        out_raw.prepare_matrix(start_bin1, start_bin2)
        outfiles.append((os.path.join(outdir, fnam), fnam))
    else:
        if 'raw' in normalizations:
            fnam = 'raw_%s_%s%s.abc' % (name, nicer(resolution).replace(
                ' ', ''), ('_' + extra) if extra else '')
            if append_to_tar:
                out_raw = StringIO()
                outfiles.append((out_raw, fnam))
            else:
                out_raw = open(os.path.join(outdir, fnam), 'w')
                outfiles.append((os.path.join(outdir, fnam), fnam))
            for reg in regions:
                out_raw.write('# CRM %s\t%d\n' % (reg, sections[reg]))

            out_raw.write('# %s resolution:%d\n' % (name, resolution))
            if region2:
                out_raw.write('# BADROWS %s\n' %
                              (','.join([str(b) for b in bads1])))
                out_raw.write('# BADCOLS %s\n' %
                              (','.join([str(b) for b in bads2])))
            else:
                out_raw.write('# MASKED %s\n' %
                              (','.join([str(b) for b in bads1])))

        # write file header
        if 'norm' in normalizations:
            fnam = 'nrm_%s_%s%s.abc' % (name, nicer(resolution).replace(
                ' ', ''), ('_' + extra) if extra else '')
            if append_to_tar:
                out_nrm = StringIO()
                outfiles.append((out_nrm, fnam))
            else:
                out_nrm = open(os.path.join(outdir, fnam), 'w')
                outfiles.append((os.path.join(outdir, fnam), fnam))
            for reg in regions:
                out_nrm.write('# CRM %s\t%d\n' % (reg, sections[reg]))

            out_nrm.write('# %s resolution:%d\n' % (name, resolution))
            if region2:
                out_nrm.write('# BADROWS %s\n' %
                              (','.join([str(b) for b in bads1])))
                out_nrm.write('# BADCOLS %s\n' %
                              (','.join([str(b) for b in bads2])))
            else:
                out_nrm.write('# MASKED %s\n' %
                              (','.join([str(b) for b in bads1])))
        if 'decay' in normalizations or 'raw&decay' in normalizations:
            fnam = 'dec_%s_%s%s.abc' % (name, nicer(resolution).replace(
                ' ', ''), ('_' + extra) if extra else '')
            if append_to_tar:
                out_dec = StringIO()
                outfiles.append((out_dec, fnam))
            else:
                out_dec = open(os.path.join(outdir, fnam), 'w')
                outfiles.append((os.path.join(outdir, fnam), fnam))
            for reg in regions:
                out_dec.write('# CRM %s\t%d\n' % (reg, sections[reg]))

            out_dec.write('# %s resolution:%d\n' % (name, resolution))
            if region2:
                out_dec.write('# BADROWS %s\n' %
                              (','.join([str(b) for b in bads1])))
                out_dec.write('# BADCOLS %s\n' %
                              (','.join([str(b) for b in bads2])))
            else:
                out_dec.write('# MASKED %s\n' %
                              (','.join([str(b) for b in bads1])))

    # functions to write lines of pairwise interactions
    def write_raw(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_raw.write('{}\t{}\t{}\n'.format(a, b, v))

        def writer(_, a, b, v):
            out_raw.write('{}\t{}\t{}\n'.format(a, b, v))

        return writer2 if func else writer

    def write_bias(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_nrm.write('{}\t{}\t{}\n'.format(a, b, v / bias1[a] / bias2[b]))

        def writer(_, a, b, v):
            out_nrm.write('{}\t{}\t{}\n'.format(a, b, v / bias1[a] / bias2[b]))

        return writer2 if func else writer

    def write_expc(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))

        def writer(c, a, b, v):
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))

        return writer2 if func else writer

    def write_expc_2reg(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] /
                decay[c][abs((a + start_bin1) - (b + start_bin2))]))

        def writer(c, a, b, v):
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] /
                decay[c][abs((a + start_bin1) - (b + start_bin2))]))

        return writer2 if func else writer

    def write_expc_err(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            try:
                out_dec.write('{}\t{}\t{}\n'.format(
                    a, b, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\n'.format(a, b, 'nan'))

        def writer(c, a, b, v):
            try:
                out_dec.write('{}\t{}\t{}\n'.format(
                    a, b, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\n'.format(a, b, 'nan'))

        return writer2 if func else writer

    def write_raw_and_expc(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            try:
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b]))

        def writer(c, a, b, v):
            try:
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b] / decay[c][abs(a - b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b]))

        return writer2 if func else writer

    write = None
    if 'raw' in normalizations:
        write = write_raw(write)
    if 'norm' in normalizations and not cooler:
        write = write_bias(write)
    if 'decay' in normalizations and not cooler:
        if len(regions) == 1:
            if region2:
                write = write_expc_2reg(write)
            else:
                write = write_expc(write)
        else:
            write = write_expc_err(write)
    if 'raw&decay' in normalizations and not cooler:
        write = write_raw_and_expc(write)

    # pull all sub-matrices and write full matrix
    if region2 is not None:  # already half-matrix in this case
        half_matrix = False

    if cooler:
        for ichunk, c, j, k, v in _iter_matrix_frags(chunks,
                                                     tmpdir,
                                                     rand_hash,
                                                     verbose=verbose,
                                                     clean=clean,
                                                     include_chunk_count=True):
            if j > k:
                continue
            if j not in bads1 and k not in bads2:
                out_raw.write_iter(ichunk, j, k, v)
        out_raw.close()
    else:
        if half_matrix:
            for c, j, k, v in _iter_matrix_frags(chunks,
                                                 tmpdir,
                                                 rand_hash,
                                                 verbose=verbose,
                                                 clean=clean):
                if k > j:
                    continue
                if j not in bads1 and k not in bads2:
                    write(c, j, k, v)
        else:
            for c, j, k, v in _iter_matrix_frags(chunks,
                                                 tmpdir,
                                                 rand_hash,
                                                 verbose=verbose,
                                                 clean=clean):
                if j not in bads1 and k not in bads2:
                    write(c, j, k, v)

    fnames = {}
    if append_to_tar:
        lock = LockFile(append_to_tar)
        with lock:
            archive = taropen(append_to_tar, "a:")
            for fobj, fnam in outfiles:
                fobj.seek(0)
                info = archive.tarinfo(name=fnam)
                info.size = len(fobj.buf)
                archive.addfile(tarinfo=info, fileobj=fobj)
            archive.close()
    else:
        if cooler:
            fnames['RAW'] = out_raw.name
            if 'norm' in normalizations:
                fnam = 'nrm_%s_%s%s.mcool' % (name, nicer(resolution).replace(
                    ' ', ''), ('_' + extra) if extra else '')
                copyfile(outfiles[0][0], os.path.join(outdir, fnam))
                out_nrm = cooler_file(os.path.join(outdir, fnam), resolution,
                                      sections, regions)
                bias_data_row = [1. / b if b > 0 else 0 for b in bias1]
                bias_data_col = [1. / b if b > 0 else 0 for b in bias2]
                out_nrm.write_weights(bias_data_row, bias_data_col,
                                      *bin_coords)
                outfiles.append((os.path.join(outdir, fnam), fnam))
                fnames['NRM'] = os.path.join(outdir, fnam)
        else:
            if 'raw' in normalizations:
                out_raw.close()
                fnames['RAW'] = out_raw.name
            if 'norm' in normalizations:
                out_nrm.close()
                fnames['NRM'] = out_nrm.name
            if 'decay' in normalizations:
                out_dec.close()
                fnames['DEC'] = out_dec.name
            if 'raw&decay' in normalizations:
                out_dec.close()
                fnames['RAW&DEC'] = out_dec.name

    # this is the last thing we do in case something goes wrong
    if clean:
        os.system('rm -rf %s' % (os.path.join(tmpdir, '_tmp_%s' %
                                              (rand_hash))))

    return fnames
Beispiel #43
0
    def reload(self):
        if self.theme_path is None:
            return
        self.default_colors = {"bg_color": "#111111", "bg_alpha": 127,
                               "bar2_bg_color":"#111111", "bar2_bg_alpha": 127}
        try:
            tar = taropen(self.theme_path)
        except:
            logger.debug("Error opening dock theme %s" % self.theme_path)
            self.settings = {}
            self.name = "DBX"
            self.bg = {1: None, 2:None}
            self.bg_sizes = {1: -1, 2:-1}
            self.globals.set_dock_theme("dbx.tar.gz", self.default_colors)
            self.emit("dock-theme-reloaded")
            return
        # Load settings
        try:
            config = tar.extractfile("theme")
        except:
            logger.exception("Error extracting theme from %s" % \
                             self.theme_path)
            tar.close()
            self.settings = {}
            self.name = "DBX"
            self.bg = None
            self.globals.set_dock_theme("dbx.tar.gz", self.default_colors)
            self.emit("dock-theme-reloaded")
            return
        old_settings = self.settings
        self.settings = {}
        name = None
        for line in config.readlines():
            # Split at "=" and clean up the key and value
            if not "=" in line:
                continue
            key, value = line.split("=", 1)
            key = key.strip().lstrip().lower()
            value = value.strip().lstrip()
            # Remove comments
            if "#" in key:
                continue
            # If there is a trailing comment, remove it
            # But avoid removing # if it's in a quote
            sharp = value.find("#")
            if sharp != -1 and value.count("\"", 0, sharp) % 2 == 0 and \
               value.count("'", 0, sharp) % 2 == 0:
                   value = value.split("#", 1)[0].strip()
            # Remove quote signs
            if value[0] in ("\"", "'") and value[-1] in ("\"", "'"):
                value = value[1:-1]

            if key == "name":
                name = value
                continue
            value = value.lower()
            self.settings[key] = value
        config.close()
        if name:
            self.name = name
        else:
            # Todo: Error handling here!
            self.settings = old_settings
            tar.close()
            self.globals.set_dock_theme("dbx.tar.gz", self.default_colors)
            self.emit("dock-theme-reloaded")
            return
        # Load background
        self.bg = {1:None, 2:None}
        self.bg_sizes = {1: -1, 2: -1}
        self.resized_bg = {}
        if "background.png" in tar.getnames():
            bgf = tar.extractfile("background.png")
            self.bg[1] = cairo.ImageSurface.create_from_png(bgf)
            bgf.close()
        if "bar2_background.png" in tar.getnames():
            bgf = tar.extractfile("bar2_background.png")
            self.bg[2] = cairo.ImageSurface.create_from_png(bgf)
            bgf.close()
        tar.close()

        for key in self.default_colors.keys():
            if key in self.settings:
                value = self.settings.pop(key)
                if "alpha" in key:
                    value = int(round(int(value))*2.55)
                elif value[0] != "#":
                        value = "#%s" % value
                self.default_colors[key] = value

        # Inform rest of dockbar about the reload.
        self.globals.set_dock_theme(self.theme_path.rsplit("/", 1)[-1],
                                    self.default_colors)
        self.emit("dock-theme-reloaded")
Beispiel #44
0
    def reload(self):
        if self.style_path is None:
            return
        # Default settings
        self.bg = None
        self.cb_pressed_pic = None
        self.cb_hover_pic = None
        self.cb_normal_pic = None
        self.settings = {"border_color2": "#000000",
                         "menu_item_lr_padding": 3}
        self.name = "DBX"
        try:
            tar = taropen(self.style_path)
        except:
            logger.debug("Error opening style %s" % self.style_path)
            self.globals.set_popup_style("dbx.tar.gz")
            self.emit("popup-style-reloaded")
            return
        # Load settings
        try:
            config = tar.extractfile("style")
        except:
            logger.exception("Error extracting style from %s" % \
                             self.style_path)
            tar.close()
            self.globals.set_popup_style("dbx.tar.gz")
            self.emit("popup-style-reloaded")
            return
        self.settings = {}
        for line in config.readlines():
            # Split at "=" and clean up the key and value
            if not "=" in line:
                continue
            key, value = line.split("=", 1)
            key = key.strip().lstrip().lower()
            value = value.strip().lstrip()
            # Remove comments
            if "#" in key:
                continue
            # If there is a trailing comment, remove it
            # But avoid removing # if it's in a quote
            sharp = value.find("#")
            if sharp != -1 and value.count("\"", 0, sharp) % 2 == 0 and \
               value.count("'", 0, sharp) % 2 == 0:
                   value = value.split("#", 1)[0].strip()
            # Remove quote signs
            if value[0] in ("\"", "'") and value[-1] in ("\"", "'"):
                value = value[1:-1]

            if key == "name":
                name = value
                continue
            value = value.lower()
            self.settings[key] = value
        config.close()
        if name:
            self.name = name
        else:
            self.settings = {"border_color2": "#000000",
                             "menu_item_lr_padding": 3}
            self.globals.set_popup_style("dbx.tar.gz")
            self.emit("popup-style-reloaded")
            tar.close()
            return
        # Load background
        if "background.png" in tar.getnames():
            bgf = tar.extractfile("background.png")
            self.bg = cairo.ImageSurface.create_from_png(bgf)
            bgf.close()
        if "closebutton/normal.png" in tar.getnames():
            cbf = tar.extractfile("closebutton/normal.png")
            self.cb_normal_pic = cairo.ImageSurface.create_from_png(cbf)
            cbf.close()
        if "closebutton/pressed.png" in tar.getnames():
            cbf = tar.extractfile("closebutton/pressed.png")
            self.cb_pressed_pic = cairo.ImageSurface.create_from_png(cbf)
            cbf.close()
        if "closebutton/hover.png" in tar.getnames():
            cbf = tar.extractfile("closebutton/hover.png")
            self.cb_hover_pic = cairo.ImageSurface.create_from_png(cbf)
            cbf.close()
        tar.close()

        # Inform rest of dockbar about the reload.
        self.globals.set_popup_style(self.style_path.rsplit("/", 1)[-1])
        self.emit("popup-style-reloaded")
 def source(self):
     url = "https://github.com/martinmoene/lest/archive/v{0}.tar.gz".format(self.version)
     with closing(urlopen(url)) as dl:
         with taropen(mode='r|gz', fileobj=dl) as archive:
             archive.extractall()
Beispiel #46
0
def unpack(name, tar_data):
    directory = mkdtemp(prefix=name)
    with closing(StringIO(b64decode(tar_data))) as buffer:
        with taropen(mode='r', fileobj=buffer) as tar:
            tar.extractall(path=directory)
    return directory
Beispiel #47
0
    def download(self):
        tar_dbfile = abspath(join(self.data_folder, 'GeoLite2-City.tar.gz'))
        maxmind_url = (
            'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City'
            f'&suffix=tar.gz&license_key={self.maxmind_license_key}')
        downloaded = False

        retry_counter = 0

        while not downloaded:
            self.logger.info('Downloading GeoLite2 DB from MaxMind...')
            try:
                urlretrieve(maxmind_url, tar_dbfile)
                downloaded = True
            except URLError as e:
                self.logger.error("Problem downloading new MaxMind DB: %s", e)
                result_status = 1
                return result_status
            except HTTPError as e:
                if e.code == 401:
                    self.logger.error(
                        "Your MaxMind license key is incorect! Check your config: %s",
                        e)
                    result_status = 1
                    return result_status
                else:
                    self.logger.error(
                        "Problem downloading new MaxMind DB... Trying again: %s",
                        e)
                    sleep(2)
                    retry_counter = (retry_counter + 1)

                if retry_counter >= 3:
                    self.logger.error(
                        "Retried downloading the new MaxMind DB 3 times and failed... Aborting!"
                    )
                    result_status = 1
                    return result_status
        try:
            remove(self.dbfile)
        except FileNotFoundError:
            self.logger.warning(
                "Cannot remove MaxMind DB as it does not exist!")

        self.logger.debug("Opening MaxMind tar file : %s", tar_dbfile)

        tar = taropen(tar_dbfile, 'r:gz')

        for files in tar.getmembers():
            if 'GeoLite2-City.mmdb' in files.name:
                self.logger.debug('"GeoLite2-City.mmdb" FOUND in tar file')
                files.name = basename(files.name)
                tar.extract(files, self.data_folder)
                self.logger.debug('%s has been extracted to %s', files,
                                  self.data_folder)
        tar.close()
        try:
            remove(tar_dbfile)
            self.logger.debug('Removed the MaxMind DB tar file.')
        except FileNotFoundError:
            self.logger.warning(
                "Cannot remove MaxMind DB TAR file as it does not exist!")
 def source(self):
     release_url = "https://github.com/philsquared/Catch/archive/v{0}.tar.gz".format(self.version)
     with closing(urlopen(release_url)) as dl:
         with taropen(mode='r|gz', fileobj=dl) as archive:
             archive.extractall()
Beispiel #49
0
    for r in TRN.execute_fetchindex():
        to_tgz = None
        a = Artifact(r[0])
        for _, fp, fp_type in a.filepaths:
            if fp_type == 'directory':
                # removing / from the path if it exists
                to_tgz = fp[:-1] if fp[-1] == '/' else fp
                break

        if to_tgz is None:
            continue

        tgz = to_tgz + '.tgz'
        if not exists(tgz):
            with taropen(tgz, "w:gz") as tar:
                tar.add(to_tgz, arcname=basename(to_tgz))

        a_id = a.id
        # Add the new tgz file to the artifact.
        fp_ids = insert_filepaths([(tgz, tgz_id)], a_id, a.artifact_type,
                                  move_files=False)
        sql = """INSERT INTO qiita.artifact_filepath
                    (artifact_id, filepath_id)
                 VALUES (%s, %s)"""
        sql_args = [[a_id, fp_id] for fp_id in fp_ids]
        TRN.add(sql, sql_args, many=True)
        TRN.execute()

    #
    # Generating compressed files for analysis
 def setUpClass(self):
     tf = taropen(join(self._tests_dir, 'linux_network.tgz'))
     tf.extractall(path=self._temp_dir)
     InterfaceStatistics._scn = self._scn_mock
Beispiel #51
0
 def setUpClass(self):
     tf = taropen(join(self._tests_dir, 'linux_network.tgz'))
     tf.extractall(path=self._temp_dir)
     InterfaceStatistics._scn = self._scn_mock
Beispiel #52
0
        'metavar': 'params',
        'help': "parameters for the bot, one or more key or bot_key"
    }),
    (('-v', '--verbosity'), {
        'type': int,
        'default': 1,
        'help': "0 = condensed, 1 = expanded, 4+ = debugging info"
    })
)

if __name__ == '__main__':
    args = parse_args(description, arguments)

    for params in args.params_sets:
        params_key, params_filename = resolve_path(args.bot, params, 'params')
        pack = 'packs/{}.tar.gz'.format(params_key)
        with taropen(pack, 'w:gz') as archive:
            archive.add('packs/template/bot.py', 'bot.py')
            archive.add('packs/template/bot_wrapper.pyx', 'bot_wrapper.pyx')
            archive.add('packs/template/bot_base.pxd', 'bot_base.pxd')
            archive.add('packs/template/bot_base.pyx', 'bot_base.pyx')
            archive.add('bot_{}.pxd'.format(args.bot), 'bot_core.pxd')
            archive.add('bot_{}.pyx'.format(args.bot), 'bot_core.pyx')
            archive.add(params_filename, 'params.npz')

        if args.verbosity == 0:
            print(params_key)
        else:
            print("Packed {} with params {} as {} ({:.1f} KB).".format(
                        args.bot, params_key, pack, getsize(pack) / 1000))
Beispiel #53
0
def write_matrix(inbam, resolution, biases, outdir,
                 filter_exclude=(1, 2, 3, 4, 6, 7, 8, 9, 10),
                 normalizations=('decay',),
                 region1=None, start1=None, end1=None, clean=True,
                 region2=None, start2=None, end2=None, extra='',
                 half_matrix=True, nchunks=None, tmpdir='.', append_to_tar=None,
                 ncpus=8, verbose=True):
    """
    Writes matrix file from a BAM file containing interacting reads. The matrix
    will be extracted from the genomic BAM, the genomic coordinates of this
    matrix will be at the intersection of two regions defined byt the parameters
    region1, start1, end1 and region2, start2, end2. If the wanted matrix is
    than the second coodinate can be skipped.

    :param inbam: path to BAM file (generated byt TADbit)
    :param resolution: resolution at which we want to write the matrix
    :param biases: path to a file with biases
    :param outdir: path to a folder where to write output files
    :param (1, 2, 3, 4, 6, 7, 8, 9, 10) filter exclude: filters to define the
       set of valid pair of reads.
    :param ('decay',) normalization: tuple with normalizations to use, can be 'decay',
       'norm' or/and 'raw'. One file per normalization will be created.
    :param None region1: chromosome name of the first region from which to
       extract the matrix
    :param None region1: chromosome name of the first region from which to
       extract the matrix
    :param None start1: start coordinate of the first region from which to
       extract the matrix
    :param None end1: end coordinate of the first region from which to
       extract the matrix
    :param None region2: chromosome name of the second region from which to
       extract the matrix
    :param None start2: start coordinate of the second region from which to
       extract the matrix
    :param None end2: end coordinate of the second region from which to
       extract the matrix
    :param True half_matrix: writes only half of the matrix (and the diagonal)
    :param '.' tmpdir: where to write temporary files
    :param None append_to_tar: path to a TAR file were generated matrices will
       be written directly
    :param 8 ncpus: number of cpus to use to read the BAM file
    :param True verbose: speak
    :param None nchunks: maximum number of chunks into which to cut the BAM

    :returns: path to output files
    """
    if start1 is not None and end1:
        if end1 - start1 < resolution:
            raise Exception('ERROR: region1 should be at least as big as resolution')
    if start2 is not None and end2:
        if end2 - start2 < resolution:
            raise Exception('ERROR: region2 should be at least as big as resolution')

    if isinstance(normalizations, list):
        normalizations = tuple(normalizations)
    elif isinstance(normalizations, str):
        normalizations = tuple([normalizations])

    if not isinstance(filter_exclude, int):
        filter_exclude = filters_to_bin(filter_exclude)

    regions, rand_hash, bin_coords, chunks = read_bam(
        inbam, filter_exclude, resolution, ncpus=ncpus,
        region1=region1, start1=start1, end1=end1,
        region2=region2, start2=start2, end2=end2,
        tmpdir=tmpdir, nchunks=nchunks, verbose=verbose)

    if region1:
        regions = [region1]
        if region2:
            regions.append(region2)

    bamfile = AlignmentFile(inbam, 'rb')
    sections = OrderedDict(zip(bamfile.references,
                               [x for x in bamfile.lengths]))

    if biases:
        bias1, bias2, decay, bads1, bads2 = get_biases_region(biases, bin_coords)
    elif normalizations != ('raw', ):
        raise Exception('ERROR: should provide path to file with biases (pickle).')
    else:
        bads1 = bads2 = {}

    start_bin1, start_bin2 = bin_coords[::2]
    if verbose:
        printime('  - Writing matrices')
    # define output file name
    name = _generate_name(regions, (start1, start2), (end1, end2), resolution)

    # prepare file header
    outfiles = []
    if 'raw' in normalizations:
        fnam = 'raw_%s_%s%s.abc' % (name,
                                    nicer(resolution).replace(' ', ''),
                                    ('_' + extra) if extra else '')
        if append_to_tar:
            out_raw = StringIO()
            outfiles.append((out_raw, fnam))
        else:
            out_raw = open(os.path.join(outdir, fnam), 'w')
            outfiles.append((os.path.join(outdir, fnam), fnam))
        for reg in regions:
            out_raw.write('# CRM %s\t%d\n' % (reg, sections[reg]))

        out_raw.write('# %s resolution:%d\n' % (name, resolution))
        if region2:
            out_raw.write('# BADROWS %s\n' % (','.join([str(b) for b in bads1])))
            out_raw.write('# BADCOLS %s\n' % (','.join([str(b) for b in bads2])))
        else:
            out_raw.write('# MASKED %s\n' % (','.join([str(b) for b in bads1])))

    # write file header
    if 'norm' in normalizations:
        fnam = 'nrm_%s_%s%s.abc' % (name,
                                    nicer(resolution).replace(' ', ''),
                                    ('_' + extra) if extra else '')
        if append_to_tar:
            out_nrm = StringIO()
            outfiles.append((out_nrm, fnam))
        else:
            out_nrm = open(os.path.join(outdir, fnam), 'w')
            outfiles.append((os.path.join(outdir, fnam), fnam))
        for reg in regions:
            out_nrm.write('# CRM %s\t%d\n' % (reg, sections[reg]))

        out_nrm.write('# %s resolution:%d\n' % (name, resolution))
        if region2:
            out_nrm.write('# BADROWS %s\n' % (','.join([str(b) for b in bads1])))
            out_nrm.write('# BADCOLS %s\n' % (','.join([str(b) for b in bads2])))
        else:
            out_nrm.write('# MASKED %s\n' % (','.join([str(b) for b in bads1])))
    if 'decay' in normalizations or 'raw&decay' in normalizations:
        fnam = 'dec_%s_%s%s.abc' % (name,
                                    nicer(resolution).replace(' ', ''),
                                    ('_' + extra) if extra else '')
        if append_to_tar:
            out_dec = StringIO()
            outfiles.append((out_dec, fnam))
        else:
            out_dec = open(os.path.join(outdir, fnam), 'w')
            outfiles.append((os.path.join(outdir, fnam), fnam))
        for reg in regions:
            out_dec.write('# CRM %s\t%d\n' % (reg, sections[reg]))

        out_dec.write('# %s resolution:%d\n' % (
            name, resolution))
        if region2:
            out_dec.write('# BADROWS %s\n' % (','.join([str(b) for b in bads1])))
            out_dec.write('# BADCOLS %s\n' % (','.join([str(b) for b in bads2])))
        else:
            out_dec.write('# MASKED %s\n' % (','.join([str(b) for b in bads1])))

    # functions to write lines of pairwise interactions
    def write_raw(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_raw.write('{}\t{}\t{}\n'.format(a, b, v))
        def writer(_, a, b, v):
            out_raw.write('{}\t{}\t{}\n'.format(a, b, v))
        return writer2 if func else writer

    def write_bias(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_nrm.write('{}\t{}\t{}\n'.format(a, b, v / bias1[a] / bias2[b]))
        def writer(_, a, b, v):
            out_nrm.write('{}\t{}\t{}\n'.format(a, b, v / bias1[a] / bias2[b]))
        return writer2 if func else writer

    def write_expc(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
        def writer(c, a, b, v):
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
        return writer2 if func else writer

    def write_expc_2reg(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs((a + start_bin1) - (b + start_bin2))]))
        def writer(c, a, b, v):
            out_dec.write('{}\t{}\t{}\n'.format(
                a, b, v / bias1[a] / bias2[b] / decay[c][abs((a + start_bin1) - (b + start_bin2))]))
        return writer2 if func else writer

    def write_expc_err(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            try:
                out_dec.write('{}\t{}\t{}\n'.format(
                    a, b, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\n'.format(a, b, 'nan'))
        def writer(c, a, b, v):
            try:
                out_dec.write('{}\t{}\t{}\n'.format(
                    a, b, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\n'.format(a, b, 'nan'))
        return writer2 if func else writer

    def write_raw_and_expc(func=None):
        def writer2(c, a, b, v):
            func(c, a, b, v)
            try:
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b]))
        def writer(c, a, b, v):
            try:
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b] / decay[c][abs(a-b)]))
            except KeyError:  # different chromosomes
                out_dec.write('{}\t{}\t{}\t{}\n'.format(
                    a, b, v, v / bias1[a] / bias2[b]))
        return writer2 if func else writer

    write = None
    if 'raw'   in normalizations:
        write = write_raw(write)
    if 'norm'  in normalizations:
        write = write_bias(write)
    if 'decay' in normalizations:
        if len(regions) == 1:
            if region2:
                write = write_expc_2reg(write)
            else:
                write = write_expc(write)
        else:
            write = write_expc_err(write)
    if 'raw&decay' in normalizations:
        write = write_raw_and_expc(write)

    # pull all sub-matrices and write full matrix
    if region2 is not None:  # already half-matrix in this case
        half_matrix = False

    if half_matrix:
        for c, j, k, v in _iter_matrix_frags(chunks, tmpdir, rand_hash,
                                             verbose=verbose, clean=clean):
            if k > j:
                continue
            if j not in bads1 and k not in bads2:
                write(c, j, k, v)
    else:
        for c, j, k, v in _iter_matrix_frags(chunks, tmpdir, rand_hash,
                                             verbose=verbose, clean=clean):
            if j not in bads1 and k not in bads2:
                write(c, j, k, v)

    fnames = {}
    if append_to_tar:
        lock = LockFile(append_to_tar)
        with lock:
            archive = taropen(append_to_tar, "a:")
            for fobj, fnam in outfiles:
                fobj.seek(0)
                info = archive.tarinfo(name=fnam)
                info.size=len(fobj.buf)
                archive.addfile(tarinfo=info, fileobj=fobj)
            archive.close()
    else:
        if 'raw' in normalizations:
            out_raw.close()
            fnames['RAW'] = out_raw.name
        if 'norm' in normalizations:
            out_nrm.close()
            fnames['NRM'] = out_nrm.name
        if 'decay' in normalizations:
            out_dec.close()
            fnames['DEC'] = out_dec.name
        if 'raw&decay' in normalizations:
            out_dec.close()
            fnames['RAW&DEC'] = out_dec.name

    # this is the last thing we do in case something goes wrong
    if clean:
        os.system('rm -rf %s' % (os.path.join(tmpdir, '_tmp_%s' % (rand_hash))))

    return fnames