def __init__(self, discriminator, dataset, cache_dir, temp_data_dir,
                 device, args):
        self.discriminator = discriminator
        self.sdlf = SubsetDataloaderFactory(dataset)
        self.cache_dir = cache_dir
        self.device = device
        self.args = args

        self.prepare_vars = partial(hlp.prepare_vars, self.args.cuda,
                                    self.device)

        if not op.exists(self.cache_dir):
            op.makedirs(self.cache_dir)

        if not op.exists(temp_data_dir):
            op.makedirs(temp_data_dir)
        self.real_data_path = op.join(temp_data_dir, 'real_data.pkl')
        self.gen_data_path = op.join(temp_data_dir, 'generated_data.pkl')

        self._create_real_data_file = partial(hlp.create_real_data_file,
                                              outpath=self.real_data_path)
        self._create_generated_data_file = partial(
            hlp.create_generated_data_file,
            outpath=self.gen_data_path,
            cuda=self.args.cuda,
            device=self.device)

        self.criterion = nn.NLLLoss(size_average=False)
        self.optimizer = optim.Adam(self.discriminator.parameters(),
                                    lr=self.args.dscr_learning_rate)
        if self.args.cuda and torch.cuda.is_available():
            self.criterion = self.criterion.cuda()
コード例 #2
0
ファイル: initial_network.py プロジェクト: zfxu/PixelNet
def initial_network(net):
    weightfolder = '/home/xinleic/ConT/fast-rcnn/data/imagenet_models/'
    modelfolder = '/home/xinleic/ConT/fast-rcnn/models/'

    if net.find('caffenet') >= 0:
        weightfile = osp.join(weightfolder, 'CaffeNet.v2.caffemodel')
        modelfile = osp.join(modelfolder, 'CaffeNet/test-o.prototxt')
    elif net.find('middle') >= 0:
        weightfile = osp.join(weightfolder, 'VGG_CNN_M_1024.v2.caffemodel')
        modelfile = osp.join(modelfolder, 'VGG_CNN_M_1024/test-o.prototxt')
    elif net.find('vgg16') >= 0:
        weightfile = osp.join(weightfolder, 'VGG16.v2.caffemodel')
        modelfile = osp.join(modelfolder, 'VGG16/test-o.prototxt')
    else:
        ex = NotImplementedError('Sorry, network not recognized!')
        raise ex

    savefolder = osp.join(options.cnetpath, net)
    if not osp.isdir(savefolder):
        osp.makedirs(savefolder)

    initmodel = osp.join(savefolder, net + '_init.caffemodel')
    if not osp.exists(initmodel):
        os.symlink(weightfile, initmodel)

    return weightfile, modelfile
コード例 #3
0
def _get_path(config, section, option, is_dir=False, dpath=NoneType):
    """Get and validate the path specified in configuration file.

    Parameters
    ----------
    config: configparser.ConfigParser
        The initialized config parser.
    section : string
        The section of configuration file.
    option : string
        The option of the section.
    is_dir : bool
        Whether the path is a directory.
    dpath : string
        The default path.

    Returns
    -------
    path : string
        The verified absolute path specified in the option.
    """
    pth_ = _get_val(config, section, option, dval=dpath)
    pth = path.abspath(path.expanduser(pth_))
    if is_dir:
        _check(path.isdir(pth), section, option,
               message='Not a valid dir `%s`' % pth_)
        if not path.exists(pth):
            path.makedirs(pth)
    else:
        _check(path.exists(pth), section, option,
               message='File `%s` not found' % pth_)
    return pth
コード例 #4
0
ファイル: timeseries.py プロジェクト: cblanks/market-data
    def __init__(self, input_data,
                 analysis_period=[__long_ago__, datetime-date.today()],
                 lookback_days=[10]):
        """
        The initialisation method.
        """
        if not os.path.exists(self.analysis_dir):
            os.makedirs(self.analysis_dir)
        
        self.input_data = input_data
        self.lookback_days = lookback_days
        for i in lookback_days:
            if i<1: 
                print "Number of lookback days must be greater than zero."
                break

        self.name = self.__setName__()
        
        csvDataset.__init__(self, self.__csvPath__())
        
        self.headers = self.__setHeaders__()
        self.analysis_period = self.__setAnalysisPeriod__(analysis_period)
        self.preliminary_analyses = self.__setPreliminaryAnalyses__()
        
        print self.name, "(%s to %s)" % ( self.analysis_period[0].isoformat(),
                                          self.analysis_period[1].isoformat() )
コード例 #5
0
ファイル: pipe.py プロジェクト: muppetjones/rempipe
def run_pipe(summary, genome, project_dir, force=False):

    # initialize all pipes
    pipes = []
    for row in summary:

        job_name = row[0]
        files = row[1:]
        log.info('Processing "{}"'.format(job_name))

        sample_dir = os.path.join(project_dir, job_name)
        path.makedirs(sample_dir)

        pipe = NestedGenomicsPipe(
            job_name=job_name,
            odir=sample_dir,
            input_list=files,
            force=force,
            genome=genome,
        )
        pipe.write_script(directory=sample_dir)
        pipes.append(pipe)

    # execute each pipe in turn
    for pipe in pipes:
        relpath = os.path.relpath(pipe.pbs_file, ROOT_DIR)
        log.info('Running pbs script: "{}"'.format(relpath))
        pipe.run()
コード例 #6
0
ファイル: Excel.py プロジェクト: wdatasci/WDS-ModelSpec
def Excel2CSV(fn,
              targetdir=".",
              targetbasename=None,
              SheetPrefix="Sheet",
              NameTransform=None,
              nrows=None,
              ncols=None,
              isPreviewNameOnly=False):
    if not osp.isdir(targetdir):
        osp.makedirs(targetdir, mode=777, exist_ok=True)
    fnh, fnt = osp.split(fn)
    fntr, fnte = osp.splitext(fnt)
    w = xlrd.open_workbook(fn)
    for s in w._sheet_list:
        newfn = s.name
        if targetbasename:
            newfn = targetbasename + "." + SheetPrefix + "." + newfn + ".csv"
        else:
            newfn = fntr + "." + SheetPrefix + "." + newfn + ".csv"
        if NameTransform:
            newfn = NameTransform(newfn, fn)
        l_nrows = nrows if nrows is not None else s.nrows
        l_ncols = ncols if ncols is not None else s.ncols
        if isPreviewNameOnly:
            print("Original:", fn, " Sheet:", s.name)
            print("  Extract To:", newfn)
        else:
            fid = open(osp.join(targetdir, newfn), 'w')
            dw = csv.DictWriter(fid,
                                dialect=csv.excel,
                                fieldnames=list(range(l_ncols)))
            for i in range(l_nrows):
                lrow = collections.OrderedDict()
                for j in range(l_ncols):
                    if (s.cell(i, j).ctype == 1):
                        lrow[j] = s.cell(i, j).value.strip()
                    elif (s.cell(i, j).ctype == 2):
                        if (math.fabs(
                                math.trunc(s.cell(i, j).value) -
                                s.cell(i, j).value) < 1e-8):
                            lrow[j] = int(s.cell(i, j).value)
                        else:
                            lrow[j] = s.cell(i, j).value
                    elif (s.cell(i, j).ctype == 3):
                        if (math.fabs(
                                math.trunc(s.cell(i, j).value) -
                                s.cell(i, j).value) < 1e-8):
                            y, m, d, hh, mm, ss = xlrd.xldate.xldate_as_tuple(
                                s.cell(i, j).value, w.datemode)
                            lrow[j] = str(date(y, m, d)).strip()
                        else:
                            lrow[j] = str(
                                xlrd.xldate.xldate_as_datetime(
                                    s.cell(i, j).value, w.datemode)).strip()
                dw.writerow(lrow)
            fid.close()
コード例 #7
0
def download_and_unzip_ikea_meshes(dst_dir):
    zip_file = dst_dir + ".zip"
    osp.makedirs(osp.dirname(zip_file))
    download_ikea_meshes(zip_file)
    unzipped_dir = dst_dir + "_unzipped"
    unzip_file(zip_file, unzipped_dir)
    os.rename(osp.join(unzipped_dir, "IKEA"), dst_dir)
    os.remove(zip_file)
    os.removedirs(unzipped_dir)
    return True
コード例 #8
0
ファイル: plugin.py プロジェクト: OpenAZBox/RTi-Old
    def doCopyDir(self, result):
        if result is not None:
            if result[1]:
                self.session.openWithCallback(self.callback, MessageBox, _('Copying ...'), type = 1, timeout = 1)
                symlinks = False
                aaa = self.SOURCELIST.getCurrentDirectory()
                src = self.SOURCELIST.getFilename()
                bbb = src[len(aaa):]
                dst = self.TARGETLIST.getCurrentDirectory() + bbb
                names = os.listdir(src)
                
                try:
                    os.makedirs(dst)
                except:
                    pass

                errors = []
                for name in names:
                    srcname = os.path.join(src, name)
                    dstname = os.path.join(dst, name)
                    
                    try:
                        if symlinks and os.path.islink(srcname):
                            linkto = os.readlink(srcname)
                            os.symlink(linkto, dstname)
                        elif os.path.isdir(srcname):
                            shutil.copytree(srcname, dstname, symlinks)
                        else:
                            shutil.copy2(srcname, dstname)
                    continue
                    except (IOError, os.error):
                        why = None
                        errors.append((srcname, dstname, str(why)))
                        continue
                    

                
                try:
                    copystat(src, dst)
                except:
                    pass

                self.doRefresh()
コード例 #9
0
    def __init__(self, data_batch, net, common_conf, solver_conf, data_conf):
        # process params
        self._moment = float(solver_conf['moment'])
        self._batch_size = int(common_conf['batch_size'])


        self._data_path = str(data_conf['data_path'])
        self._output_dir = osp.join("..", self._data_path, 'output')
        if not osp.exists(self._output_dir):
            osp.makedirs(self._output_dir)

        self._pretrain_path = str(solver_conf['pretrain_model_path'])
        self._pretrain_path = osp.join("..", self._data_path
            , self._pretrain_path)
        #assert osp.exists(self._pretrain_path), self._pretrain_path

        self._train_dir = str(solver_conf['train_dir'])
        self._train_dir = osp.join("..", self._data_path
            , self._train_dir)
        if not osp.exists(self._train_dir):
            osp.makedirs(self._train_dir)

        self._max_iters = int(solver_conf['max_iterators'])
        self._save_iter = int(solver_conf['save_iter'])
        self._summary_iter = int(solver_conf['summary_iter'])

        self._learning_rate = float(solver_conf['learning_rate'])
        print("learning_rate:{}".format(self._learning_rate))
        self._decay_steps = int(solver_conf['decay_steps'])
        self._decay_rate = float(solver_conf['decay_rate'])
        self._stair_case = bool(solver_conf['stair_case'])

        self._num_classes = int(common_conf['num_classes'])

        self._ckpt_file = os.path.join(self._output_dir, 'save.ckpt')

        #
        self._data_batch = data_batch
        self._net = net

        # construct graph
        self.construct_graph()
    def __init__(self, discriminator, dataset, cache_dir, temp_data_dir,
                 device, args):
        self.discriminator = discriminator
        self.sdlf = SubsetDataloaderFactory(dataset)
        self.cache_dir = cache_dir
        self.device = device
        self.args = args

        if not op.exists(self.cache_dir):
            op.makedirs(self.cache_dir)

        if not op.exists(temp_data_dir):
            op.makedirs(temp_data_dir)
        self.real_data_path = op.join(temp_data_dir, 'real.data')
        self.generated_data_path = op.join(temp_data_dir, 'generated.data')

        self.criterion = nn.NLLLoss(size_average=False)
        self.optimizer = optim.Adam(self.discriminator.parameters(),
                                    lr=self.args.dscr_learning_rate)
        if self.args.cuda and torch.cuda.is_available():
            self.criterion = self.criterion.cuda()
コード例 #11
0
    def __init__(self, generator, discriminator, dataset, temp_data_dir,
                 valid_iter, device, args):
        self.generator = generator
        self.discriminator = discriminator
        self.valid_iter = valid_iter
        self.device = device
        self.args = args

        self.sdlf = SubsetDataloaderFactory(dataset)
        self.rollout = Rollout(self.generator,
                               update_rate=0.8,
                               device=self.device)

        self.gan_loss = GANLoss(use_cuda=self.args.cuda)
        self.gan_optimizer = optim.Adam(self.generator.parameters(),
                                        lr=args.adv_gen_learning_rate)

        self.dscr_loss = nn.NLLLoss(size_average=False)
        self.dscr_optimizer = optim.Adam(self.discriminator.parameters(),
                                         lr=args.adv_dscr_learning_rate)

        self.eval_loss = nn.NLLLoss(size_average=False)

        if self.args.cuda and torch.cuda.is_available():
            self.gan_loss = self.gan_loss.cuda()
            self.dscr_loss = self.dscr_loss.cuda()
            self.eval_loss = self.eval_loss.cuda()

        if not op.exists(temp_data_dir):
            op.makedirs(temp_data_dir)
        self.real_data_path = op.join(temp_data_dir, 'real.data')
        self.generated_data_path = op.join(temp_data_dir, 'generated.data')

        self.run_dir = op.join("runs", self.args.dataset,
                               datetime.now().strftime('%b%d-%y_%H:%M:%S'))
        if not op.exists(self.run_dir):
            os.makedirs(self.run_dir)

        self.gen_losses = None
        self.dscr_losses = None
コード例 #12
0
def archive_all_but_newest(F, D):
    """
    copied over from the original gen_html
    """
    if not exists(F):
        return
    if not exists(packages['archive']):
        print('mkdir: %s' % makedirs(packages['archive']))
    name = package_name(F)
    versions = sorted([(os.path.getmtime(m), m) for m in D if os.path.exists(m) and package_name(m) == name])
    for G in versions[:-1]:
        print("Archiving %s..." % G[1])
        cmd = "mv %s.* %s" % (G[1][:-5], packages['archive'])
        print(cmd)
コード例 #13
0
def main():
    args = parse_args()

    assert args.out or args.show or args.show_dir, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out", "--show" or "show-dir"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # import modules from string list.
    if cfg.get('custom_imports', None):
        from mmcv.utils import import_modules_from_strings
        import_modules_from_strings(**cfg['custom_imports'])
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.workers == 0:
        args.workers = cfg.data.workers_per_gpu

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed)

    if 'all' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
            'saturate'
        ]
    elif 'benchmark' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression'
        ]
    elif 'noise' in args.corruptions:
        corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
    elif 'blur' in args.corruptions:
        corruptions = [
            'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
        ]
    elif 'weather' in args.corruptions:
        corruptions = ['snow', 'frost', 'fog', 'brightness']
    elif 'digital' in args.corruptions:
        corruptions = [
            'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
        ]
    elif 'holdout' in args.corruptions:
        corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
    elif 'None' in args.corruptions:
        corruptions = ['None']
        args.severities = [0]
    else:
        corruptions = args.corruptions

    rank, _ = get_dist_info()
    aggregated_results = {}
    for corr_i, corruption in enumerate(corruptions):
        aggregated_results[corruption] = {}
        for sev_i, corruption_severity in enumerate(args.severities):
            # evaluate severity 0 (= no corruption) only once
            if corr_i > 0 and corruption_severity == 0:
                aggregated_results[corruption][0] = \
                    aggregated_results[corruptions[0]][0]
                continue

            test_data_cfg = copy.deepcopy(cfg.data.test)
            # assign corruption and severity
            if corruption_severity > 0:
                corruption_trans = dict(type='Corrupt',
                                        corruption=corruption,
                                        severity=corruption_severity)
                # TODO: hard coded "1", we assume that the first step is
                # loading images, which needs to be fixed in the future
                test_data_cfg['pipeline'].insert(1, corruption_trans)

            # print info
            print(f'\nTesting {corruption} at severity {corruption_severity}')

            # build the dataloader
            # TODO: support multiple images per gpu
            #       (only minor changes are needed)
            dataset = build_dataset(test_data_cfg)
            data_loader = build_dataloader(dataset,
                                           samples_per_gpu=1,
                                           workers_per_gpu=args.workers,
                                           dist=distributed,
                                           shuffle=False)

            # build the model and load checkpoint
            cfg.model.train_cfg = None
            model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
            fp16_cfg = cfg.get('fp16', None)
            if fp16_cfg is not None:
                wrap_fp16_model(model)
            checkpoint = load_checkpoint(model,
                                         args.checkpoint,
                                         map_location='cpu')
            # old versions did not save class info in checkpoints,
            # this walkaround is for backward compatibility
            if 'CLASSES' in checkpoint['meta']:
                model.CLASSES = checkpoint['meta']['CLASSES']
            else:
                model.CLASSES = dataset.CLASSES

            if not distributed:
                model = MMDataParallel(model, device_ids=[0])
                show_dir = args.show_dir
                if show_dir is not None:
                    show_dir = osp.join(show_dir, corruption)
                    show_dir = osp.join(show_dir, str(corruption_severity))
                    if not osp.exists(show_dir):
                        osp.makedirs(show_dir)
                outputs = single_gpu_test(model, data_loader, args.show,
                                          show_dir, args.show_score_thr)
            else:
                model = MMDistributedDataParallel(
                    model.cuda(),
                    device_ids=[torch.cuda.current_device()],
                    broadcast_buffers=False)
                outputs = multi_gpu_test(model, data_loader, args.tmpdir)

            if args.out and rank == 0:
                eval_results_filename = (osp.splitext(args.out)[0] +
                                         '_results' +
                                         osp.splitext(args.out)[1])
                mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if cfg.dataset_type == 'VOCDataset':
                    if eval_types:
                        for eval_type in eval_types:
                            if eval_type == 'bbox':
                                test_dataset = mmcv.runner.obj_from_dict(
                                    cfg.data.test, datasets)
                                logger = 'print' if args.summaries else None
                                mean_ap, eval_results = \
                                    voc_eval_with_return(
                                        args.out, test_dataset,
                                        args.iou_thr, logger)
                                aggregated_results[corruption][
                                    corruption_severity] = eval_results
                            else:
                                print('\nOnly "bbox" evaluation \
                                is supported for pascal voc')
                else:
                    if eval_types:
                        print(f'Starting evaluate {" and ".join(eval_types)}')
                        if eval_types == ['proposal_fast']:
                            result_file = args.out
                        else:
                            if not isinstance(outputs[0], dict):
                                result_files = dataset.results2json(
                                    outputs, args.out)
                            else:
                                for name in outputs[0]:
                                    print(f'\nEvaluating {name}')
                                    outputs_ = [out[name] for out in outputs]
                                    result_file = args.out
                                    + f'.{name}'
                                    result_files = dataset.results2json(
                                        outputs_, result_file)
                        eval_results = coco_eval_with_return(
                            result_files, eval_types, dataset.coco)
                        aggregated_results[corruption][
                            corruption_severity] = eval_results
                    else:
                        print('\nNo task was selected for evaluation;'
                              '\nUse --eval to select a task')

                # save results after each evaluation
                mmcv.dump(aggregated_results, eval_results_filename)

    if rank == 0:
        # print final results
        print('\nAggregated results:')
        prints = args.final_prints
        aggregate = args.final_prints_aggregate

        if cfg.dataset_type == 'VOCDataset':
            get_results(eval_results_filename,
                        dataset='voc',
                        prints=prints,
                        aggregate=aggregate)
        else:
            get_results(eval_results_filename,
                        dataset='coco',
                        prints=prints,
                        aggregate=aggregate)
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('labels_file', help='INPUT: dir with annotated files')
    parser.add_argument('jsons_dir', help='INPUT: dir with annotated files')
    parser.add_argument('voc_dir', help='OUTPUT: voc dataset directory')
    args = parser.parse_args()

    if not osp.exists(args.jsons_dir):
        print("directory not exists, ", args.jsons_dir)
        sys.exit(1)

    if not osp.exists(args.voc_dir):
        os.makedirs(args.voc_dir)
    if not osp.exists(osp.join(args.voc_dir, 'JPEGImages')):
        osp.makedirs(osp.join(args.voc_dir, 'JPEGImages'))
    if not osp.exists(osp.join(args.voc_dir, 'SegmentationClass')):
        os.makedirs(osp.join(args.voc_dir, 'SegmentationClass'))
    if not osp.exists(osp.join(args.voc_dir, 'SegmentationClassPNG')):
        os.makedirs(osp.join(args.voc_dir, 'SegmentationClassPNG'))
    if not osp.exists(osp.join(args.voc_dir,
                               'SegmentationClassVisualization')):
        os.makedirs(osp.join(args.voc_dir, 'SegmentationClassVisualization'))
    print('Creating dataset:', args.voc_dir)

    class_names = []
    class_name_to_id = {}
    for i, line in enumerate(
            open(args.labels_file, "r", encoding='UTF-8').readlines()):
        class_id = i - 1  # starts with -1
        class_name = line.strip()
        class_name_to_id[class_name] = class_id
        if class_id == -1:
            assert class_name == '__ignore__'
            continue
        elif class_id == 0:
            assert class_name == '_background_'
        class_names.append(class_name)
    class_names = tuple(class_names)
    print('class_names:', class_names)
    out_class_names_file = osp.join(args.voc_dir, 'seg_class_names.txt')
    with open(out_class_names_file, 'w') as f:
        f.writelines('\n'.join(class_names))
    print('Saved class_names:', out_class_names_file)

    colormap = labelme.utils.label_colormap(255)

    # 3. Process Every Json File
    label_file_list = glob.glob(osp.join(args.jsons_dir, '*.json'))
    for i in progressbar.progressbar(range(len(label_file_list))):
        label_file = label_file_list[i]
        # print('Generating dataset from:', label_file)
        with open(label_file, "r", encoding='UTF-8') as f:
            base = osp.splitext(osp.basename(label_file))[0]
            out_img_file = osp.join(args.voc_dir, 'JPEGImages', base + '.jpg')
            out_lbl_file = osp.join(args.voc_dir, 'SegmentationClass',
                                    base + '.npy')
            out_png_file = osp.join(args.voc_dir, 'SegmentationClassPNG',
                                    base + '.png')
            out_viz_file = osp.join(args.voc_dir,
                                    'SegmentationClassVisualization',
                                    base + '.jpg')

            data = json.load(f)

            imageData = data.get(
                'imageData'
            )  # labelme annotated file contains source image data(serialized)
            if imageData:
                img = utils.img_b64_to_arr(imageData)
            else:
                img_file = osp.join(osp.dirname(label_file), data['imagePath'])
                img = np.asarray(PIL.Image.open(img_file))
            PIL.Image.fromarray(img).save(out_img_file)

            lbl = labelme.utils.shapes_to_label(
                img_shape=img.shape,
                shapes=data['shapes'],
                label_name_to_value=class_name_to_id,
            )
            labelme.utils.lblsave(out_png_file, lbl)

            np.save(out_lbl_file, lbl)

            viz = labelme.utils.draw_label(lbl,
                                           img,
                                           class_names,
                                           colormap=colormap)
            PIL.Image.fromarray(viz).save(out_viz_file)
コード例 #15
0
ファイル: upload_actions.py プロジェクト: antroy/Home
def addFolderAction(user, fields, root, admin_context, gallery_context):
    localroot = path.join(root, fields.getfirst('parent', user))
    if not path.exists(localroot):
        path.makedirs(localroot)
    
    return showEditFolderPageAction(user, fields, root)
コード例 #16
0
ファイル: test.py プロジェクト: gwk/ploy-cpp
    errFL('ERROR: could not run test case: {};\nexception: {}', path, e)
    if dbg:
      raise
    else:
      ok = False
  if not ok:
    fail_count += 1
    outL('=' * bar_width + '\n');
    if fail_fast:
      errFL('exiting fast.')
      sys.exit(1)


# setup
if not _path.exists(results_dir):
  _path.makedirs(results_dir)

# parse and run tests
for path in walk_all_paths(*args.paths, exts=('.test',)):
  if path == '<stdin>':
    errSL('reading test case from', path)
  try_case(path)
  if dbg: errL()

out('\n' + '#' * bar_width + '\nRESULTS: ')
if not any([ignore_count, skip_count, fail_count]):
  outFL('PASSED {} test{}', test_count, ('' if test_count == 1 else 's'))
  code = 0
else:
  outFL('{} tests; IGNORED {}; SKIPPED {}; FAILED {}', test_count, ignore_count, skip_count, fail_count)
  code = 1
コード例 #17
0
ファイル: misc.py プロジェクト: wecacuee/Bayesian_CBF
 def add_scalars(self, tag, var_dict, t):
     if not osp.exists(self.exp_dir): osp.makedirs(self.exp_dir)
     for k, v in var_dict.items():
         self.summary_writer.add_scalar("/".join((tag, k)), v, t)
コード例 #18
0
ファイル: misc.py プロジェクト: wecacuee/Bayesian_CBF
 def add_tensors(self, tag, var_dict, t):
     if not osp.exists(self.exp_dir): osp.makedirs(self.exp_dir)
     add_tensors(self.summary_writer, tag, var_dict, t)
コード例 #19
0
ファイル: pipe_bkup.py プロジェクト: muppetjones/rempipe
def pipe(file_list, genome, project_dir, force=False):

    timestamp = time.strftime("%y%m%d-%H%M%S")

    for f in file_list:
        name = f[0]
        files = f[1:]
        out_dir = os.path.join(project_dir, name)
        path.makedirs(out_dir)

        # # 1st fast qc
        # fastqc_1 = FastqcCmd(*files, o=out_dir)
        #
        # # trimming
        # out_prefix = os.path.join(out_dir, name)
        # trim = SkewerCmd(*files, o=out_prefix)
        # trimmed_fastq = trim.output()
        #
        # # 2nd fastqc
        # fastqc_2 = FastqcCmd(*trimmed_fastq, o=out_dir)
        #
        # # setup alignment
        # # NOTE: need to check for encoding
        # align_kwargs = {
        #     '-x': genome,
        #     '-S': '{}_{}.sam'.format(
        #         out_prefix,
        #         os.path.basename(genome),
        #     ),
        #     '-p': 3,  # set for local (should use pbs paramters on qsub)
        # }
        # if len(trimmed_fastq) == 1:
        #     align_kwargs['U'] = trimmed_fastq[0]
        # else:
        #     align_kwargs['1'], align_kwargs['2'] = trimmed_fastq
        # align = HisatCmd(timestamp=timestamp, **align_kwargs)
        # # human_kw = [m for m in ['human', 'sapien', 'G37RCh'] if m in genome]
        # # if human_kw:
        # #     align = HisatCmd(timestamp=timestamp, **align_kwargs)
        # # else:
        # #     align = Bowtie2Cmd(timestamp=timestamp, **align_kwargs)
        #
        # # samtools
        # sam_sort = SamtoolsSortCmd(*(align.output()))
        # sam_index = SamtoolsIndexCmd(*(sam_sort.output()))

        # UPDATED
        fastqc_1 = FastqcCmd(*files, o=out_dir)

        # trimming
        out_prefix = os.path.join(out_dir, name)
        trim = SkewerCmd(*files, o=out_prefix)

        # 2nd fastqc
        fastqc_2 = FastqcCmd(o=out_dir)

        # setup alignment
        # NOTE: need to check for encoding
        align_kwargs = {
            '-x': genome,
            '-S': '{}_{}.sam'.format(
                out_prefix,
                os.path.basename(genome),
            ),
            '-p': 3,  # set for local (should use pbs paramters on qsub)
        }
        align = HisatCmd(timestamp=timestamp, **align_kwargs)

        # samtools
        sam_sort = SamtoolsSortCmd()
        sam_index = SamtoolsIndexCmd()

        # count
        kwargs = {'-bed': genome}
        bedtools_multicov = BedtoolsMulticovCmd(**kwargs)

        # Setup pipe
        # NOTE: This is the alpha test of the pipe class.
        job_name = name + '_' + os.path.basename(genome)
        pipe = BasePipe(job_name=job_name, force=force)
        pipe.add(
            fastqc_1, trim, fastqc_2, align, sam_sort, sam_index,
            bedtools_multicov,
        )

        # write pbs file & run
        # pbs_file = '{}  , timestamp, os.path.basename(genome))
        pipe.write_script()
        pipe.run()