Exemplo n.º 1
0
def extract_all(fp, fmt=None, root='.'):
    def find_decompressor(fmt_):
        dc_map = {
            'gzip': (tarfile.open, 'r:gz'),
            'zip': (zipfile.ZipFile, 'r'),
            'bz2': (tarfile.open, 'r:bz2')
        }
        if not fmt_ in dc_map:
            raise ValueError('the `%s` format is not supported.' % (fmt_,))
        return dc_map[fmt_]

    if not fmt:
        if fp.endswith('.zip'):
            fmt = 'zip'
        elif fp.endswith('.tar.gz') or fp.endswith('.tgz'):
            fmt = 'gzip'
        elif fp.endswith('.tar.bz2') or fp.endswith('.tbz'):
            fmt = 'bz2'
        else:
            fmt = 'None'

    opener, mode = find_decompressor(fmt)

    try:
        with opener(fp, mode) as f:
            f.extractall(path=root)
    except StandardError as e:
        logger.fatal('error occur while extracting file %s: %s', fp, str(e))
Exemplo n.º 2
0
 def prompt_for_eula(self):
     eula = self._eula_by_brand()
     print eula
     ret = self.io.require_confirmation("Do you accept the terms above?")
     if not ret:
         logger.fatal("Setup aborted, Cancelled by user")
         quit()
Exemplo n.º 3
0
def get_train_loader(conf, data_mode, sample_identity=False):
    if data_mode == 'emore':
        root = conf.emore_folder/'imgs'
    elif data_mode == 'glint':
        root = conf.glint_folder/'imgs'
    else:
        logger.fatal('invalide data_mode {}'.format(data_mode))
        exit(1)

    class_num, class_to_idx = find_classes(root)
    train_transform = trans.Compose([
        trans.RandomHorizontalFlip(),
        trans.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0),
        trans.ToTensor(),
        trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
    path_ds = make_dataset(root, class_to_idx, extensions)
    dataset = ImageDataset(path_ds, train_transform)

    if sample_identity:
        train_sampler = DistRandomIdentitySampler(dataset.dataset, conf.batch_size, conf.num_instances)
    else:
        train_sampler = distributed.DistributedSampler(dataset)
    loader = DataLoader(dataset, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
    return loader, class_num
Exemplo n.º 4
0
 def prompt_for_eula(self):
     eula = self._eula_by_brand()
     print eula
     ret = self.io.require_confirmation("Do you accept the terms above?")
     if not ret:
         logger.fatal("Setup aborted, Cancelled by user")
         quit()
Exemplo n.º 5
0
def main():

    arguments = docopt.docopt(__doc__, version=VERSION_NUMBER)

    input_map = arguments["<contact_map>"]
    binning = int(arguments["--binning"])
    normalized = arguments["--normalize"]
    vmax = float(arguments["--max"])

    output_file = arguments["--output"]

    process_matrix = save_matrix
    if not output_file or output_file == "output.png":
        process_matrix = plot_matrix

    raw_map = load_raw_matrix(input_map)

    sparse_map = raw_cols_to_sparse(raw_map)

    if normalized:
        sparse_map = hcs.normalize_sparse(sparse_map, norm="SCN")

    if binning > 1:
        binned_map = hcs.bin_sparse(M=sparse_map, subsampling_factor=binning)
    else:
        binned_map = sparse_map

    try:
        dense_map = sparse_to_dense(binned_map)
        process_matrix(dense_map, filename=output_file, vmax=vmax)
    except MemoryError:
        logger.fatal("Contact map is too large to load, try binning more")
Exemplo n.º 6
0
def get_train_loader_from_txt(conf, data_mode, sample_identity=False):
    if data_mode == 'emore':
        txt_path = conf.emore_folder/'imgs'/'train_list.txt'
    elif data_mode == 'glint':
        txt_path = conf.glint_folder/'imgs'/'train_list.txt'
    else:
        logger.fatal('invalide data_mode {}'.format(data_mode))
        exit(1)

    train_transform = trans.Compose([
        trans.RandomHorizontalFlip(),
        trans.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0),
        trans.ToTensor(),
        trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    dataset = ImageLandmarkDataset(txt_path, train_transform)
    if sample_identity:
        train_sampler = DistRandomIdentitySampler(dataset.dataset, conf.batch_size, conf.num_instances)
    else:
        train_sampler = distributed.DistributedSampler(dataset)
    loader = DataLoader(dataset, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
    return loader, dataset.class_num