コード例 #1
0
def main(argv):

    helpstrings = {'-h', '--help'}

    command = listget(argv, 0, '').lower()

    # The user did not enter a command, or the entered command is not recognized.
    if command not in MODULE_DOCSTRINGS:
        print(DOCSTRING)
        if command == '':
            print('You are seeing the default help text because you did not choose a command.')
        elif command not in helpstrings:
            print('You are seeing the default help text because "%s" was not recognized' % command)
        return 1

    # The user entered a command, but no further arguments, or just help.
    argument = listget(argv, 1, '').lower()
    if argument in helpstrings:
        print(MODULE_DOCSTRINGS[command])
        return 1

    mkdir(os.path.join(PROJ_ROOT, 'res'))

    args = parser.parse_args(argv)
    args.func(args)

    return 0
def main():
    args = parse_args()
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(
            backend="nccl",
            init_method="env://",
        )
        comm.synchronize()

    cfg = get_default_cfg()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.output_dir
    if output_dir:
        misc.mkdir(output_dir)

    logger = setup_logger("EfficientDet", output_dir, comm.get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    output_config_path = os.path.join(output_dir, 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))
    misc.save_config(cfg, output_config_path)

    model = train(cfg, args.local_rank, args.distributed)
コード例 #3
0
def main():
    args = get_arguments()

    torch.manual_seed(args.seed)

    if 'cuda' in args.device and torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        torch.cuda.set_device(args.device_ids[0])
        cudnn.benchmark = True
    else:
        args.device_ids = None

    # Set logs
    misc.mkdir(args.save_path)
    misc.setup_logging(path.join(args.save_path, 'log.txt'))

    # Print logs
    logging.info(args)

    # Trainer
    trainer = Trainer(args)
    if args.train_cross_validation:
        trainer.train_cross_validation()

    elif args.test:
        trainer.test()

    else:
        # trainer.train()
        trainer.train_no_eval()
コード例 #4
0
ファイル: main.py プロジェクト: kligvasser/xUnit
def main():
    # arguments
    args = get_arguments()

    torch.manual_seed(args.seed)

    # cuda
    if 'cuda' in args.device and torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        torch.cuda.set_device(args.device_ids[0])
        cudnn.benchmark = True
    else:
        args.device_ids = None

    # set logs
    misc.mkdir(args.save_path)
    misc.mkdir(path.join(args.save_path, 'images'))
    misc.setup_logging(path.join(args.save_path, 'log.txt'))

    # print logs
    logging.info(args)

    # trainer
    trainer = Trainer(args)

    if args.evaluation:
        trainer.eval()
    else:
        trainer.train()
コード例 #5
0
ファイル: test.py プロジェクト: zlinzju/Btrfly-Net-Pytorch
def main():
    torch.cuda.empty_cache()
    # some configs, including yaml file
    parser = argparse.ArgumentParser(description='Btrfly Net Training with Pytorch')
    parser.add_argument(
        "--config_file",
        default="configs/btrfly.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--log_step", default=1, type=int, help="print logs every log_step")
    parser.add_argument("--save_step", default=50, type=int, help="save checkpoint every save_step")
    parser.add_argument("--eval_step", default=10, type=int, help="evaluate dataset every eval_step, disabled if eval_step <= 0")
    parser.add_argument("--use_tensorboard", default=1, type=int, help="use visdom to illustrate training process, unless use_visdom == 0")
    args = parser.parse_args()

    # enable inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware
    # so it helps increase training speed
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True

    # use YACS as the config manager, see https://github.com/rbgirshick/yacs for more info
    # cfg contains all the configs set by configs/defaults and overrided by config_file (see line 13)
    cfg.merge_from_file(args.config_file)
    cfg.freeze()
    # make output directory designated by OUTPUT_DIR if necessary
    if cfg.OUTPUT_DIR:
        mkdir(cfg.OUTPUT_DIR)

    # set up 2 loggers
    # logger_all can print time and logger's name
    # logger_message only print message
    # it will print info to stdout and to OUTPUT_DIR/log.txt (way: append)
    logger_all = setup_colorful_logger(
        "main",
        save_dir=os.path.join(cfg.OUTPUT_DIR, 'log.txt'),
        format="include_other_info")
    logger_message = setup_colorful_logger(
        "main_message",
        save_dir=os.path.join(cfg.OUTPUT_DIR, 'log.txt'),
        format="only_message")

    # print config info (cfg and args)
    # args are obtained by command line
    # cfg is obtained by yaml file and defaults.py in configs/
    separator(logger_message)
    logger_message.warning(" ---------------------------------------")
    logger_message.warning("|              Your config:             |")
    logger_message.warning(" ---------------------------------------")
    logger_message.info(args)
    logger_message.warning(" ---------------------------------------")
    logger_message.warning("|      Running with entire config:      |")
    logger_message.warning(" ---------------------------------------")
    logger_message.info(cfg)
    separator(logger_message)

    pred(cfg)
コード例 #6
0
ファイル: uq.py プロジェクト: xiaoguangqiang/pde-surrogate
    def plot_reliability_diagram(self,
                                 label='Conditional Glow',
                                 save_time=True):
        print("Plotting reliability diagram..................................")
        # percentage: p
        # p_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95]
        p_list = np.linspace(0.01, 0.99, 10)
        freq = []
        n_channels = self.mc_loader.dataset[0][1].shape[0]

        for p in p_list:
            count = 0
            numels = 0
            for batch_idx, (input, target) in enumerate(self.mc_loader):
                # only evaluate 2000 of the MC data to save time
                if save_time and batch_idx > 4:
                    continue
                pred_mean, pred_var = self.model.predict(
                    input.to(self.device),
                    n_samples=self.n_samples,
                    temperature=self.temperature)

                interval = scipy_norm.interval(
                    p,
                    loc=pred_mean.cpu().numpy(),
                    scale=pred_var.sqrt().cpu().numpy())

                count += ((target.numpy() >= interval[0])
                          & (target.numpy() <= interval[1])).sum(axis=(0, 2,
                                                                       3))
                numels += target.numel() / n_channels
                print('p: {}, {} / {} = {}'.format(
                    p, count, numels, np.true_divide(count, numels)))
            freq.append(np.true_divide(count, numels))
        reliability_dir = self.post_dir + '/uncertainty_quality'
        mkdir(reliability_dir)

        freq = np.stack(freq, 0)
        for i in range(freq.shape[-1]):
            plt.figure()
            plt.plot(p_list, freq[:, i], 'r', label=label)
            plt.xlabel('Probability')
            plt.ylabel('Frequency')
            x = np.linspace(0, 1, 100)
            plt.plot(x, x, 'k--', label='Ideal')
            plt.legend(loc='upper left')
            plt.savefig(reliability_dir + f"/reliability_diagram_{i}.pdf",
                        dpi=300)
            plt.close()

        reliability = np.zeros((p_list.shape[0], 1 + n_channels))
        reliability[:, 0] = p_list
        reliability[:, 1:] = freq
        np.savetxt(reliability_dir + "/reliability_diagram.txt", reliability)
        plt.close()
コード例 #7
0
def prec_recall(data, gt):
    search_engine = SearchEngine(data)

    print('\n> Running Evaluation...\n', end='')
    evaluator = Evaluator(search_engine, gt)
    prec, avg_prec_recall = evaluator.evaluate()

    mkdir(EVALUATION_PATH)
    save_to_csv(prec, os.path.join(EVALUATION_PATH, 'precision.csv'))
    save_to_csv(avg_prec_recall, os.path.join(EVALUATION_PATH, 'avg_prec_recall.csv'), index=True)
    print('\n Results of evaluation saved to directory "%s"' % os.path.relpath(EVALUATION_PATH, PROJ_ROOT))
コード例 #8
0
ファイル: uq.py プロジェクト: xiaoguangqiang/pde-surrogate
    def plot_dist(self, num_loc):
        """Plot distribution estimate in `num_loc` locations in the domain, 
        which are chosen by Latin Hypercube Sampling.
        Args:
            num_loc (int): number of locations where distribution is estimated
        """
        print(
            'Plotting distribution estimate.................................')

        assert num_loc > 0, 'num_loc must be greater than zero'
        locations = lhs(2, num_loc, criterion='c')
        print('Locations selected by LHS: \n{}'.format(locations))
        # location (ndarray): [0, 1] x [0, 1]: N x 2
        idx = (locations * self.imsize).astype(int)

        print('Propagating...')
        pred, target = [], []
        for _, (x_mc, t_mc) in enumerate(self.mc_loader):
            x_mc = x_mc.to(self.device)
            # S x B x C x H x W
            y_mc = self.model.sample(x_mc,
                                     n_samples=self.n_samples,
                                     temperature=self.temperature)
            # S x B x C x n_points
            pred.append(y_mc[:, :, :, idx[:, 0], idx[:, 1]])
            # B x C x n_points
            target.append(t_mc[:, :, idx[:, 0], idx[:, 1]])
        # S x M x C x n_points --> M x C x n_points
        pred = torch.cat(pred, dim=1).mean(0).cpu().numpy()

        print('pred size: {}'.format(pred.shape))
        # M x C x n_points
        target = torch.cat(target, dim=0).cpu().numpy()
        print('target shape: {}'.format(target.shape))
        dist_dir = self.post_dir + '/dist_estimate'
        mkdir(dist_dir)
        for loc in range(locations.shape[0]):
            print(loc)
            fig, _ = plt.subplots(1, 3, figsize=(12, 4))
            for c, ax in enumerate(fig.axes):
                sns.kdeplot(target[:, c, loc],
                            color='b',
                            ls='--',
                            label='Monte Carlo',
                            ax=ax)
                sns.kdeplot(pred[:, c, loc],
                            color='r',
                            label='Surrogate',
                            ax=ax)
                ax.legend()
            plt.savefig(dist_dir + '/loc_({:.5f}, {:.5f}).pdf'.format(
                locations[loc][0], locations[loc][1]),
                        dpi=300)
            plt.close(fig)
コード例 #9
0
ファイル: test.py プロジェクト: tkianai/StylizePoseGAN
def main(opt):
    data_loader = build_dataloader(opt.dataroot,
                                   opt.label_size,
                                   training=opt.isTrain,
                                   resolution=opt.test_size,
                                   batch_size=1)
    model = build_model(opt)
    save_dir = os.path.join(opt.results_dir, opt.name,
                            'test_{}_{}'.format(opt.test_size, opt.which_iter))
    misc.mkdir(save_dir)
    test(model, data_loader, save_dir)
コード例 #10
0
def run_wilcoxon_test_all(metrics, folder):
	
	b_folder = mkdir(folder + '/wilcoxon')
		
	for key, val in metrics.items():
		run_wilcoxon_test(val, key, b_folder)
	
	print(indent('\n- Wilcoxon tests results written to folder "%s"' % b_folder))
コード例 #11
0
def make_boxplot_all(metrics, folder):

    b_folder = mkdir(folder + '/boxplots')

    for key, val in metrics.items():
        make_boxplot(val, key, b_folder)

    print(indent('\n- Boxplots written to folder "%s"' % b_folder))
コード例 #12
0
ファイル: uq.py プロジェクト: xiaoguangqiang/pde-surrogate
    def plot_prediction_at_x(self, n_pred, plot_samples=False):
        r"""Plot `n_pred` predictions for randomly selected input from test dataset.
        - target
        - predictive mean
        - standard deviation of predictive output distribution
        - error of the above two

        Args:
            n_pred: number of candidate predictions
            plot_samples (bool): plot 15 output samples from p(y|x) for given x
        """
        save_dir = self.post_dir + '/predict_at_x'
        mkdir(save_dir)
        print('Plotting predictions at x from test dataset..................')
        np.random.seed(1)
        idx = np.random.permutation(len(self.test_loader.dataset))[:n_pred]
        for i in idx:
            print('input index: {}'.format(i))
            input, target = self.test_loader.dataset[i]
            pred_mean, pred_var = self.model.predict(
                input.unsqueeze(0).to(self.device),
                n_samples=self.n_samples,
                temperature=self.temperature)

            plot_prediction_bayes2(save_dir,
                                   target,
                                   pred_mean.squeeze(0),
                                   pred_var.squeeze(0),
                                   self.epochs,
                                   i,
                                   plot_fn=self.plot_fn)
            if plot_samples:
                samples_pred = self.model.sample(input.unsqueeze(0).to(
                    self.device),
                                                 n_samples=15)[:, 0]
                samples = torch.cat(
                    (target.unsqueeze(0), samples_pred.detach().cpu()), 0)
                save_samples(save_dir,
                             samples,
                             self.epochs,
                             i,
                             'samples',
                             nrow=4,
                             heatmap=True,
                             cmap='jet')
コード例 #13
0
def extract_data(file_path):
    print(
        '\n> Extracting names of classes, function and methods (for .cc and .py files) for source "%s"... '
        % os.path.basename(file_path))

    traverse(file_path, r'\.py$|\.cc$', collect_files)

    py_extractor = PyExtractor(py_files)
    print(py_extractor)

    clang_extractor = ClangExtractor(clang_files)
    print(clang_extractor)

    mkdir(RES_PATH)
    save_to_csv(py_extractor.get_df().append(clang_extractor.get_df()),
                EXTRACTED_DATA_PATH,
                columns=["Name", "File", "Path", "Type"])

    print('\n> Extracted data saved to file "%s"' %
          os.path.relpath(EXTRACTED_DATA_PATH, PROJ_ROOT))
コード例 #14
0
    def _run_tests(self):

        final_result = {}

        lsi_vs = []
        lsi_hues = []
        lsi_sizes = []

        d2v_vs = []
        d2v_hues = []
        d2v_sizes = []

        for q in range(len(self._ground_truth)):
            query = self._ground_truth[q]
            search_results = self._search_engine.query(query[0])

            # ----- Create Visualization (START)----- #
            words = query[0].lower().split()
            lsi_viz = self._search_engine.corpus.lsi_viz(words)
            d2v_viz = self._search_engine.corpus.doc2v_viz(words)

            for v in lsi_viz:
                vv = []
                for tup in v:
                    vv.append(tup[1])
                lsi_vs.append(vv)

            for v in d2v_viz:
                d2v_vs.append(v)

            sizes = ['query']
            sizes.extend(['hit'] * 5)

            lsi_hues.extend([query[1]] * 6)
            lsi_sizes.extend(sizes)

            d2v_hues.extend([query[1]] * 6)
            d2v_sizes.extend(sizes)
            # ----- Create Visualization (END)----- #

            final_result[q] = {}

            for algo, top5 in search_results.items():

                final_result[q][algo] = {}
                final_result[q][algo]["precision"] = 0

                for f in range(len(top5)):
                    file = top5[f]

                    path = os.path.join(LIB_PATH, query[2])

                    if file[0] == query[1] and file[1] == path:
                        final_result[q][algo]['precision'] = 1 / (f+1)
                        break

        mkdir(VISUALIZATION_PATH)
        save_plot(os.path.join(VISUALIZATION_PATH, 'LSI'), lsi_vs, lsi_hues, lsi_sizes)
        save_plot(os.path.join(VISUALIZATION_PATH, 'Doc2v'), d2v_vs, d2v_hues, d2v_sizes)
        print('\n Visualization images for FREQ and Doc2v are saved at "%s"' %
              os.path.relpath(VISUALIZATION_PATH, PROJ_ROOT))

        return final_result
コード例 #15
0
 def _save_image(self, image, path, epoch):
     directory = os.path.join(self.args.save_path, 'images',
                              'epoch_{}'.format(epoch))
     save_path = os.path.join(directory, os.path.basename(path))
     mkdir(directory)
     save_image(image.data.cpu(), save_path)
コード例 #16
0
def test(cfg, model=None):
    torch.cuda.empty_cache()  # TODO check if it helps
    cpu_device = torch.device("cpu")
    if cfg.VIS.FLOPS:
        # device = cpu_device
        device = torch.device("cuda:0")
    else:
        device = torch.device(cfg.DEVICE)
    if model is None:
        # load model from outputs
        model = Modelbuilder(cfg)
        model.to(device)
        checkpointer = Checkpointer(model, save_dir=cfg.OUTPUT_DIR)
        _ = checkpointer.load(cfg.WEIGHTS)
    data_loaders = make_data_loader(cfg, is_train=False)
    if cfg.VIS.FLOPS:
        model.eval()
        from thop import profile
        for idx, batchdata in enumerate(data_loaders[0]):
            with torch.no_grad():
                flops, params = profile(
                    model,
                    inputs=({
                        k: v.to(device) if isinstance(v, torch.Tensor) else v
                        for k, v in batchdata.items()
                    }, False))
                print('flops', flops, 'params', params)
                exit()
    if cfg.TEST.RECOMPUTE_BN:
        tmp_data_loader = make_data_loader(cfg,
                                           is_train=True,
                                           dataset_list=cfg.DATASETS.TEST)
        model.train()
        for idx, batchdata in enumerate(tqdm(tmp_data_loader)):
            with torch.no_grad():
                model(
                    {
                        k: v.to(device) if isinstance(v, torch.Tensor) else v
                        for k, v in batchdata.items()
                    },
                    is_train=True)
        #cnt = 0
        #while cnt < 1000:
        #    for idx, batchdata in enumerate(tqdm(tmp_data_loader)):
        #        with torch.no_grad():
        #            model({k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batchdata.items()}, is_train=True)
        #        cnt += 1
        checkpointer.save("model_bn")
        model.eval()
    elif cfg.TEST.TRAIN_BN:
        model.train()
    else:
        model.eval()
    dataset_names = cfg.DATASETS.TEST
    meters = MetricLogger()

    #if cfg.TEST.PCK and cfg.DOTEST and 'h36m' in cfg.OUTPUT_DIR:
    #    all_preds = np.zeros((len(data_loaders), cfg.KEYPOINT.NUM_PTS, 3), dtype=np.float32)
    cpu = lambda x: x.to(cpu_device).numpy() if isinstance(x, torch.Tensor
                                                           ) else x

    logger = setup_logger("tester", cfg.OUTPUT_DIR)
    for data_loader, dataset_name in zip(data_loaders, dataset_names):
        print('Loading ', dataset_name)
        dataset = data_loader.dataset

        logger.info("Start evaluation on {} dataset({} images).".format(
            dataset_name, len(dataset)))
        total_timer = Timer()
        total_timer.tic()

        predictions = []
        #if 'h36m' in cfg.OUTPUT_DIR:
        #    err_joints = 0
        #else:
        err_joints = np.zeros((cfg.TEST.IMS_PER_BATCH, int(cfg.TEST.MAX_TH)))
        total_joints = 0

        for idx, batchdata in enumerate(tqdm(data_loader)):
            if cfg.VIS.VIDEO and not 'h36m' in cfg.OUTPUT_DIR:
                for k, v in batchdata.items():
                    try:
                        #good 1 2 3 4 5 6 7 8 12 16 30
                        # 4 17.4 vs 16.5
                        # 30 41.83200 vs 40.17562
                        #bad 0 22
                        #0 43.78544 vs 45.24059
                        #22 43.01385 vs 43.88636
                        vis_idx = 16
                        batchdata[k] = v[:, vis_idx, None]
                    except:
                        pass
            if cfg.VIS.VIDEO_GT:
                for k, v in batchdata.items():
                    try:
                        vis_idx = 30
                        batchdata[k] = v[:, vis_idx:vis_idx + 2]
                    except:
                        pass
                joints = cpu(batchdata['points-2d'].squeeze())[0]
                orig_img = de_transform(
                    cpu(batchdata['img'].squeeze()[None, ...])[0][0])
                # fig = plt.figure()
                # ax = fig.add_subplot(111)
                ax = display_image_in_actual_size(orig_img.shape[1],
                                                  orig_img.shape[2])
                if 'h36m' in cfg.OUTPUT_DIR:
                    draw_2d_pose(joints, ax)
                    orig_img = orig_img[::-1]
                else:
                    visibility = cpu(batchdata['visibility'].squeeze())[0]
                    plot_two_hand_2d(joints, ax, visibility)
                    # plot_two_hand_2d(joints, ax)
                ax.imshow(orig_img.transpose((1, 2, 0)))
                ax.axis('off')
                output_folder = os.path.join("outs", "video_gt", dataset_name)
                mkdir(output_folder)
                plt.savefig(os.path.join(output_folder, "%08d" % idx),
                            bbox_inches="tight",
                            pad_inches=0)
                plt.cla()
                plt.clf()
                plt.close()
                continue
            #print('batchdatapoints-3d', batchdata['points-3d'])
            batch_size = cfg.TEST.IMS_PER_BATCH
            with torch.no_grad():
                loss_dict, metric_dict, output = model(
                    {
                        k: v.to(device) if isinstance(v, torch.Tensor) else v
                        for k, v in batchdata.items()
                    },
                    is_train=False)
            meters.update(**prefix_dict(loss_dict, dataset_name))
            meters.update(**prefix_dict(metric_dict, dataset_name))
            # udpate err_joints
            if cfg.VIS.VIDEO:
                joints = cpu(output['batch_locs'].squeeze())
                if joints.shape[0] == 1:
                    joints = joints[0]
                try:
                    orig_img = de_transform(
                        cpu(batchdata['img'].squeeze()[None, ...])[0][0])
                except:
                    orig_img = de_transform(
                        cpu(batchdata['img'].squeeze()[None, ...])
                        [0])  # fig = plt.figure()
                # ax = fig.add_subplot(111)
                ax = display_image_in_actual_size(orig_img.shape[1],
                                                  orig_img.shape[2])
                if 'h36m' in cfg.OUTPUT_DIR:
                    draw_2d_pose(joints, ax)
                    orig_img = orig_img[::-1]
                else:
                    visibility = cpu(batchdata['visibility'].squeeze())
                    if visibility.shape[0] == 1:
                        visibility = visibility[0]
                    plot_two_hand_2d(joints, ax, visibility)
                ax.imshow(orig_img.transpose((1, 2, 0)))
                ax.axis('off')
                output_folder = os.path.join(cfg.OUTPUT_DIR, "video",
                                             dataset_name)
                mkdir(output_folder)
                plt.savefig(os.path.join(output_folder, "%08d" % idx),
                            bbox_inches="tight",
                            pad_inches=0)
                plt.cla()
                plt.clf()
                plt.close()
                # plt.show()

            if cfg.TEST.PCK and cfg.DOTEST:
                #if 'h36m' in cfg.OUTPUT_DIR:
                #    err_joints += metric_dict['accuracy'] * output['total_joints']
                #    total_joints += output['total_joints']
                #    # all_preds
                #else:
                for i in range(batch_size):
                    err_joints = np.add(err_joints, output['err_joints'])
                    total_joints += sum(output['total_joints'])

            if idx % cfg.VIS.SAVE_PRED_FREQ == 0 and (
                    cfg.VIS.SAVE_PRED_LIMIT == -1
                    or idx < cfg.VIS.SAVE_PRED_LIMIT * cfg.VIS.SAVE_PRED_FREQ):
                # print(meters)
                for i in range(batch_size):
                    predictions.append((
                        {
                            k: (cpu(v[i]) if not isinstance(v, int) else v)
                            for k, v in batchdata.items()
                        },
                        {
                            k: (cpu(v[i]) if not isinstance(v, int) else v)
                            for k, v in output.items()
                        },
                    ))
            if cfg.VIS.SAVE_PRED_LIMIT != -1 and idx > cfg.VIS.SAVE_PRED_LIMIT * cfg.VIS.SAVE_PRED_FREQ:
                break

            # if not cfg.DOTRAIN and cfg.SAVE_PRED:
            #     if cfg.VIS.SAVE_PRED_LIMIT != -1 and idx < cfg.VIS.SAVE_PRED_LIMIT:
            #         for i in range(batch_size):
            #             predictions.append(
            #                     (
            #                         {k: (cpu(v[i]) if not isinstance(v, int) else v) for k, v in batchdata.items()},
            #                         {k: (cpu(v[i]) if not isinstance(v, int) else v) for k, v in output.items()},
            #                     )
            #             )
            #     if idx == cfg.VIS.SAVE_PRED_LIMIT:
            #         break
        #if cfg.TEST.PCK and cfg.DOTEST and 'h36m' in cfg.OUTPUT_DIR:
        #    logger.info('accuracy0.5: {}'.format(err_joints/total_joints))
        # dataset.evaluate(all_preds)
        # name_value, perf_indicator = dataset.evaluate(all_preds)
        # names = name_value.keys()
        # values = name_value.values()
        # num_values = len(name_value)
        # logger.info(' '.join(['| {}'.format(name) for name in names]) + ' |')
        # logger.info('|---' * (num_values) + '|')
        # logger.info(' '.join(['| {:.3f}'.format(value) for value in values]) + ' |')

        total_time = total_timer.toc()
        total_time_str = get_time_str(total_time)
        logger.info("Total run time: {} ".format(total_time_str))

        if cfg.OUTPUT_DIR:  #and cfg.VIS.SAVE_PRED:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            mkdir(output_folder)
            torch.save(predictions,
                       os.path.join(output_folder, cfg.VIS.SAVE_PRED_NAME))
            if cfg.DOTEST and cfg.TEST.PCK:
                print(err_joints.shape)
                torch.save(err_joints * 1.0 / total_joints,
                           os.path.join(output_folder, "pck.pth"))

    logger.info("{}".format(str(meters)))

    model.train()
    return meters.get_all_avg()
コード例 #17
0
ファイル: uq.py プロジェクト: xiaoguangqiang/pde-surrogate
    def propagate_uncertainty(self, manual_scale=False, var_samples=10):
        print("Propagate Uncertainty using pre-trained surrogate ...........")
        # compute MC sample mean and variance in mini-batch
        sample_mean_x = torch.zeros_like(self.mc_loader.dataset[0][0])
        sample_var_x = torch.zeros_like(sample_mean_x)
        sample_mean_y = torch.zeros_like(self.mc_loader.dataset[0][1])
        sample_var_y = torch.zeros_like(sample_mean_y)

        for _, (x_test_mc, y_test_mc) in enumerate(self.mc_loader):
            x_test_mc, y_test_mc = x_test_mc, y_test_mc
            sample_mean_x += x_test_mc.mean(0)
            sample_mean_y += y_test_mc.mean(0)
        sample_mean_x /= len(self.mc_loader)
        sample_mean_y /= len(self.mc_loader)

        for _, (x_test_mc, y_test_mc) in enumerate(self.mc_loader):
            x_test_mc, y_test_mc = x_test_mc, y_test_mc
            sample_var_x += ((x_test_mc - sample_mean_x)**2).mean(0)
            sample_var_y += ((y_test_mc - sample_mean_y)**2).mean(0)
        sample_var_x /= len(self.mc_loader)
        sample_var_y /= len(self.mc_loader)

        # plot input MC
        stats_x = torch.stack((sample_mean_x, sample_var_x)).cpu().numpy()
        fig, _ = plt.subplots(1, 2)
        for i, ax in enumerate(fig.axes):
            # ax.set_title(titles[i])
            ax.set_aspect('equal')
            ax.set_axis_off()
            # im = ax.imshow(stats_x[i].squeeze(0),
            #                interpolation='bilinear', cmap=self.args.cmap)
            im = ax.contourf(stats_x[i].squeeze(0), 50, cmap='jet')
            for c in im.collections:
                c.set_edgecolor("face")
                c.set_linewidth(0.000000000001)
            cbar = plt.colorbar(
                im,
                ax=ax,
                fraction=0.046,
                pad=0.04,
                format=ticker.ScalarFormatter(useMathText=True))
            cbar.formatter.set_powerlimits((0, 0))
            cbar.ax.yaxis.set_offset_position('left')
            cbar.update_ticks()
        plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
        out_stats_dir = self.post_dir + '/out_stats'
        mkdir(out_stats_dir)
        plt.savefig(out_stats_dir + '/input_MC.pdf',
                    di=300,
                    bbox_inches='tight')
        plt.close(fig)
        print("Done plotting input MC, num of training: {}".format(
            self.ntrain))

        # MC surrogate predictions
        y_pred_EE, y_pred_VE, y_pred_EV, y_pred_VV = self.model.propagate(
            self.mc_loader,
            n_samples=self.n_samples,
            temperature=self.temperature,
            var_samples=var_samples)
        print('Done MC predictions')

        # plot the 4 output stats
        # plot the predictive mean
        plot_MC2(out_stats_dir,
                 sample_mean_y,
                 y_pred_EE,
                 y_pred_VE,
                 True,
                 self.ntrain,
                 manual_scale=manual_scale)
        # plot the predictive var
        plot_MC2(out_stats_dir, sample_var_y, y_pred_EV, y_pred_VV, False,
                 self.ntrain)

        # save for MATLAB plotting
        scipy.io.savemat(
            out_stats_dir + '/out_stats.mat', {
                'sample_mean': sample_mean_y.cpu().numpy(),
                'sample_var': sample_var_y.cpu().numpy(),
                'y_pred_EE': y_pred_EE.cpu().numpy(),
                'y_pred_VE': y_pred_VE.cpu().numpy(),
                'y_pred_EV': y_pred_EV.cpu().numpy(),
                'y_pred_VV': y_pred_VV.cpu().numpy()
            })
        print('saved output stats to .mat file')
コード例 #18
0
ファイル: test.py プロジェクト: zlinzju/Btrfly-Net-Pytorch
def pred(cfg):
    device = torch.device(cfg.TEST.DEVICE)
    model = build_model(cfg).to(device)
    lr = cfg.SOLVER.LR
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    arguments = {"iteration": 0, "epoch": 0}
    checkpointer = CheckPointer(model, optimizer, cfg.OUTPUT_DIR)
    extra_checkpoint_data = checkpointer.load(is_val=True)
    arguments.update(extra_checkpoint_data)

    model.eval()
    is_test =  0
    dataset = ProjectionDataset(cfg=cfg, mat_dir=cfg.MAT_DIR_TEST if is_test else cfg.MAT_DIR_VAL
                                , input_img_dir=cfg.INPUT_IMG_DIR_TEST if is_test else cfg.INPUT_IMG_DIR_VAL,
                                transform=transforms.Compose([ToTensor()]))
    test_loader = DataLoader(dataset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False, num_workers=4)
    mkdir(os.path.join(cfg.OUTPUT_DIR, "jpg_val"))
    #os.system("rm " + os.path.join(cfg.OUTPUT_DIR, "jpg_val/*"))
    name_list = []
    whole_step_list = []
    score_list = []
    position_cor_list = []
    position_sag_list = []

    if is_test == 0:
        val_file_list = glob.glob(cfg.MAT_DIR_VAL + '*.mat')
        val_file_list.sort()

        gt_label_list = []
        for idx in range(len(val_file_list)):
            gt_label_list.append(json.load(
                open(cfg.ORIGINAL_PATH + 'pos/' + val_file_list[idx][len(cfg.MAT_DIR_VAL):-4] + '_ctd.json', "rb")))

    for idx, sample in enumerate(test_loader):
        print(idx)
        input_cor = sample["input_cor"].float().to(device)
        input_sag = sample["input_sag"].float().to(device)
        sag_pad = sample["sag_pad"]
        cor_pad = sample["cor_pad"]
        if is_test == 0:
            gt_cor = sample["gt_cor"].float().to(device)
            gt_sag = sample["gt_sag"].float().to(device)
        output_sag, output_cor = model(input_sag, input_cor)

        for batch_num in range(input_cor.shape[0]):
            output_sag[batch_num, :, :sag_pad[2][batch_num], :] = 0
            output_sag[batch_num, :, :, output_sag.shape[3] - sag_pad[1][batch_num]:] = 0
            output_sag[batch_num, :, output_sag.shape[2] - sag_pad[3][batch_num]:, :] = 0
            output_sag[batch_num, :, :, :sag_pad[0][batch_num]] = 0

            output_cor[batch_num, :, :cor_pad[2][batch_num], :] = 0
            output_cor[batch_num, :, :, output_cor.shape[3] - cor_pad[1][batch_num]:] = 0
            output_cor[batch_num, :, output_cor.shape[2] - cor_pad[3][batch_num]:, :] = 0
            output_cor[batch_num, :, :, :cor_pad[0][batch_num]] = 0

        if is_test:
            for j in range(input_cor.shape[0]):
                imageio.imwrite(cfg.OUTPUT_DIR + "jpg_val/" + sample['name'][j] + "_input_cor.jpg", torch.squeeze(input_cor[j, :, :]).cpu().detach().numpy())
                imageio.imwrite(cfg.OUTPUT_DIR + "jpg_val/" + sample['name'][j] + "_input_sag.jpg", torch.squeeze(input_sag[j, :, :]).cpu().detach().numpy())
                imageio.imwrite(cfg.OUTPUT_DIR + "jpg_val/" + sample['name'][j] + "_output_cor.jpg", 30 * np.max(torch.squeeze(output_cor[j, 1:25, :, :]).cpu().detach().numpy(), axis=0))
                imageio.imwrite(cfg.OUTPUT_DIR + "jpg_val/" + sample['name'][j] + "_output_sag.jpg", 30 * np.max(torch.squeeze(output_sag[j, 1:25, :, :]).cpu().detach().numpy(), axis=0))


        #for c_num in range(24):
            #output_sag[:, c_num + 1, :, :] = output_sag[:, c_num+1, :, :] * (output_sag[:, 0, :, :].max() - output_sag[:, 0, :, :])
            #output_cor[:, c_num + 1, :, :] = output_cor[:, c_num + 1, :, :] * (output_cor[:, 0, :, :].max() - output_cor[:, 0, :, :])
        position, position_batch_cor , position_batch_sag = pred_pos_3(device, output_sag[:, 1:25, :, :], output_cor[:, 1:25, :, :], sample['direction'],
                            sample['crop_info'], sample['spacing'], sample['cor_pad'], sample['sag_pad'])
        # position = pred_pos(device, output_sag[:, 1:25, :, :], output_cor[:, 1:25, :, :], sample['direction'],
        #                       sample['crop_info'], sample['spacing'], sample['cor_pad'], sample['sag_pad'])

        if idx == 0:
            for step in range(position.shape[0]):
                whole_step_list.append([])
                score_list.append([])

        for j in range(input_sag.shape[0]):
            position_cor_list.append(position_batch_cor[j, :, :])
            position_sag_list.append(position_batch_sag[j, :, :])

        for j in range(input_sag.shape[0]):
            for step in range(position.shape[0]):
                whole_step_list[step].append(
                    create_centroid_pos([sample['direction_sitk'][0][j], sample['direction_sitk'][1][j], sample['direction_sitk'][2][j],
                                         sample['direction_sitk'][3][j], sample['direction_sitk'][4][j], sample['direction_sitk'][5][j],
                                         sample['direction_sitk'][6][j], sample['direction_sitk'][7][j], sample['direction_sitk'][8][j]],
                                    [sample['spacing'][0][j], sample['spacing'][1][j], sample['spacing'][2][j]],
                                    [sample['size_raw'][0][j], sample['size_raw'][1][j], sample['size_raw'][2][j]],
                                    position[step, j, :, 0:3])
                )
                score_list[step].append(position[step, j, :, 3])
            name_list.append(sample["name"][j])

    id_rate = list(range(position.shape[0]))
    id_rate_gt = list(range(position.shape[0]))
    if is_test == 0:
        for step in range(position.shape[0]):
            id_rate[step], id_rate_gt[step] = Get_Identification_Rate(gt_label_list, whole_step_list[step])

    if is_test:
        torch.save({"pred_list": whole_step_list[0], 'score': score_list[0],
                   'pred_cor_list': position_cor_list, 'pred_sag_list':position_sag_list,
                   'name':name_list}, "pred_list/pred_test.pth")
    else:
        print("id_rate: ", id_rate)
        print("id_rate_gt: ", id_rate_gt)
        torch.save({"pred_list": whole_step_list[0], 'score': score_list[0], 'name': name_list, 'gt_list': gt_label_list,
                    'pred_cor_list': position_cor_list, 'pred_sag_list':position_sag_list},
                    "pred_list/pred.pth")
コード例 #19
0
    # load model
    if arch == 'res18':
        model = BiSeNet(37, context_path='resnet18', in_planes=32)
        load_state_dict(
            model,
            ckpt_path=
            'runs/SUNRGBD/kd_pi_lr1e-3_Jul28_002404/checkpoint.pth.tar')
    elif arch == 'res101':
        model = BiSeNet(37, context_path='resnet101', in_planes=64)
        load_state_dict(
            model,
            ckpt_path=
            'runs/SUNRGBD/res101_inp64_deconv_Jul26_205859/checkpoint.pth.tar')
    else:
        raise NotImplementedError
    model.eval().cuda()

    # infer dir
    exp = 'sun'
    img_dir = f'img/{exp}/rgb'
    save_dir = f'img/{exp}/seg_{arch}'
    mkdir(save_dir)

    for img in os.listdir(img_dir):
        print(img)
        img_name = img.split('.')[0]
        img = Image.open(f'{img_dir}/{img}').convert('RGB')
        predict = infer_img(img, vis=True)
        cv2.imwrite(f'{save_dir}/{img_name}.png', predict[:, :, ::-1])