Пример #1
0
def main_coco():
    #val_path = "data/Definitive/Hexagon/val"
    #val_path = "data/Definitive/Cube/val"
    #val_path = "data/Definitive/Octahedron/val"
    #val_path = "data/Definitive/Needle/val"
    val_path = "data/Random/Cube/val"
    #weights = "logs/Final/Hexagon/orientations_2900.h5"
    #weights = "logs/Final/Cube/orientations_2900.h5"
    #weights = "logs/Final/Needle/orientations_3000.h5"
    weights = "logs/Random/Cube/orientations_1200.h5"
    #weights = "logs/Final/Octahedron/orientations_2900.h5"
    evaluation_dir = "evaluation"
    config = Config()

    dataset_val = FiguresDataset()
    dataset_val.load_figures(val_path, "val_annotations.json")
    dataset_val.prepare()

    val_images, val_orientations, _ = load_figures_data(dataset_val, config, mask=False)

    # Loading model and weights
    or_model = model.OrientationModel("logs", config)

    or_model.compile(weights)

    # Inference
    predictions = detect.detect(or_model, val_images)

    gt_orientations = R.from_euler('ZYX', val_orientations, degrees=True).as_matrix()
    utils.evaluate(gt_orientations, predictions, dataset_val, evaluation_dir)

    coco_data.save_pred_annotations(predictions, dataset_val, val_path, evaluation_dir)

    visualize.show_results(val_images, predictions, evaluation_dir)
Пример #2
0
    def validate(self, epoch):
        self.model.eval()

        val_loss = AverageMeter()
        val_acc = AverageMeter()
        val_acc_cls = AverageMeter()
        val_mean_iu = AverageMeter()
        # inputs_all, gts_all, predictions_all = [], [], []

        for i, (inputs, gts) in enumerate(self.val_loader):
            N = inputs.size(0)
            inputs = inputs.to(self.device)
            gts = gts.to(self.device)
            # gts = gts.to(self.device, dtype=torch.float32)

            outputs = self.model(inputs)
            preds = torch.argmax(outputs, dim=1)
            # gts = F.upsample(torch.unsqueeze(gts, 0), outputs.size()[2:], mode='nearest')
            # gts = torch.squeeze(gts, 0).to(torch.int64)
            val_loss.update(self.criterion(outputs, gts).item(), N)
            val_metric = evaluate(preds.detach(), gts.detach(),
                                  self.num_classes)
            val_acc.update(val_metric[0])
            val_acc_cls.update(val_metric[1])
            val_mean_iu.update(val_metric[2])

        return val_loss, val_acc, val_acc_cls, val_mean_iu
Пример #3
0
    def train(self, epoch):
        self.model.train()

        train_loss = AverageMeter()
        train_acc = AverageMeter()
        train_acc_cls = AverageMeter()
        train_mean_iu = AverageMeter()

        for i, (inputs, targets) in enumerate(self.train_loader):
            inputs = inputs.to(self.device)
            targets = targets.to(self.device)
            # targets = targets.to(self.device, dtype=torch.float32)
            self.optim.zero_grad()
            outputs = self.model(inputs)
            preds = torch.argmax(outputs, dim=1)

            # targets = F.upsample(torch.unsqueeze(targets, 0), outputs.size()[2:], mode='nearest')
            # targets = torch.squeeze(targets, 0).to(torch.int64)
            loss = self.criterion(outputs, targets)
            loss.backward()
            self.optim.step()

            train_loss.update(loss.item(), inputs.size(0))
            train_metric = evaluate(preds.detach(), targets.detach(),
                                    self.num_classes)
            train_acc.update(train_metric[0])
            train_acc_cls.update(train_metric[1])
            train_mean_iu.update(train_metric[2])

            if epoch == 0 and i == 1:
                print('iteration is started on {}'.format(self.device))

        return train_loss, train_acc, train_acc_cls, train_mean_iu
Пример #4
0
 def train_model(self):
     dataloader, Evaluate_Mask, num_nodes, num_genes, data, truth_edges, TFmask2, gene_name = self.init_data(
     )
     adj_A_init = self.initalize_A(data)
     vae = VAE_EAD(adj_A_init, 1, self.opt.n_hidden,
                   self.opt.K).float().cuda()
     Tensor = torch.cuda.FloatTensor
     optimizer = optim.RMSprop(vae.parameters(), lr=self.opt.lr)
     optimizer2 = optim.RMSprop([vae.adj_A], lr=self.opt.lr * 0.2)
     scheduler = torch.optim.lr_scheduler.StepLR(
         optimizer, step_size=self.opt.lr_step_size, gamma=self.opt.gamma)
     best_Epr = 0
     vae.train()
     print(vae)
     for epoch in range(self.opt.n_epochs + 1):
         loss_all, mse_rec, loss_kl, data_ids, loss_tfs, loss_sparse = [], [], [], [], [], []
         if epoch % (self.opt.K1 + self.opt.K2) < self.opt.K1:
             vae.adj_A.requires_grad = False
         else:
             vae.adj_A.requires_grad = True
         for i, data_batch in enumerate(dataloader, 0):
             optimizer.zero_grad()
             inputs, data_id, dropout_mask = data_batch
             inputs = Variable(inputs.type(Tensor))
             data_ids.append(data_id.cpu().detach().numpy())
             temperature = max(0.95**epoch, 0.5)
             loss, loss_rec, loss_gauss, loss_cat, dec, y, hidden = vae(
                 inputs,
                 dropout_mask=dropout_mask.cuda(),
                 temperature=temperature,
                 opt=self.opt)
             sparse_loss = self.opt.alpha * torch.mean(torch.abs(vae.adj_A))
             loss = loss + sparse_loss
             loss.backward()
             mse_rec.append(loss_rec.item())
             loss_all.append(loss.item())
             loss_kl.append(loss_gauss.item() + loss_cat.item())
             loss_sparse.append(sparse_loss.item())
             if epoch % (self.opt.K1 + self.opt.K2) < self.opt.K1:
                 optimizer.step()
             else:
                 optimizer2.step()
         scheduler.step()
         if epoch % (self.opt.K1 + self.opt.K2) >= self.opt.K1:
             Ep, Epr = evaluate(vae.adj_A.cpu().detach().numpy(),
                                truth_edges, Evaluate_Mask)
             best_Epr = max(Epr, best_Epr)
             print('epoch:', epoch, 'Ep:', Ep, 'Epr:', Epr, 'loss:',
                   np.mean(loss_all), 'mse_loss:', np.mean(mse_rec),
                   'kl_loss:', np.mean(loss_kl), 'sparse_loss:',
                   np.mean(loss_sparse))
     extractEdgesFromMatrix(vae.adj_A.cpu().detach().numpy(), gene_name,
                            TFmask2).to_csv(self.opt.save_name +
                                            '/GRN_inference_result.tsv',
                                            sep='\t',
                                            index=False)
    def test(self, model, loader, adv_test=False, use_pseudo_label=False):
        # adv_test is False, return adv_acc as -1

        total_acc = 0.0
        num = 0
        total_adv_acc = 0.0

        with torch.no_grad():
            for data, label in loader:
                data, label = tensor2cuda(data), tensor2cuda(label)
                model.eval()
                output = model(data)
                # output = model(data, _eval=True)

                pred = torch.max(output, dim=1)[1]
                te_acc = evaluate(pred.cpu().numpy(),
                                  label.cpu().numpy(), 'sum')

                total_acc += te_acc
                num += output.shape[0]

                if adv_test:
                    # use predicted label as target label
                    with torch.enable_grad():
                        adv_data = self.attack.perturb(
                            data, pred if use_pseudo_label else label, 'mean',
                            False)
                    model.eval()
                    adv_output = model(adv_data)
                    # adv_output = model(adv_data, _eval=True)

                    adv_pred = torch.max(adv_output, dim=1)[1]
                    adv_acc = evaluate(adv_pred.cpu().numpy(),
                                       label.cpu().numpy(), 'sum')
                    total_adv_acc += adv_acc
                else:
                    total_adv_acc = -num

        return total_acc / num, total_adv_acc / num
Пример #6
0
def get_model_representation(models, patches, layers, sample_rate=10):

    # patches are in batches need to get the sampler's size and not the dataloader's
    loggers = [
        hook_model(model, layers[i], len(patches.sampler), sample_rate)
        for i, model in enumerate(models)
    ]

    with utils.evaluate(*models):
        for patch, _ in patches:
            [model(patch) for model in models]

    [
        hook_handle.remove() for model in models
        for hook_handle in model.hook_handles
    ]
    return [logger.get_representation() for logger in loggers]
Пример #7
0
def test_model():

    model_params = {
        'c_in': 1,
        'c_out': 1,
        'depth': 80,
        'width': 1,
        'dilations': [1, 2, 4, 8, 16],
        'loss': 'L2'
    }
    model_80 = MSDRegressionModel(**model_params)
    state_dicts = sorted(glob.glob(
        'model_weights/MSD_d80_walnuts_finetuned_1114125135/best*.h5'),
                         key=_nat_sort)
    model_80.msd.load_state_dict(torch.load(state_dicts[-1]))

    model_params = {
        'c_in': 1,
        'c_out': 1,
        'depth': 30,
        'width': 1,
        'dilations': [1, 2, 4, 8, 16],
        'loss': 'L2'
    }
    model_30 = MSDRegressionModel(**model_params)
    state_dicts = sorted(
        glob.glob('model_weights/MSD_d30_walnuts1113135028/best*.h5'),
        key=_nat_sort)
    model_30.msd.load_state_dict(torch.load(state_dicts[-1]))

    agd_ims, fdk_ims = utils.load_walnut_ds()
    # agd_ims, fdk_ims = utils.load_phantom_ds()
    random.seed(0)
    test_id = random.randrange(len(agd_ims))
    input_te, target_te = [fdk_ims.pop(test_id)], [agd_ims.pop(test_id)]

    te_ds = MultiOrbitDataset(input_te, target_te, data_augmentation=False)
    te_dl = DataLoader(te_ds, batch_size=8, sampler=ValSampler(len(te_ds)))

    model_80.set_normalization(te_dl)
    model_30.set_normalization(te_dl)

    mean, std = test(model_80, te_ds)
    print(
        f"Model d80 \n\tMSE: {mean[0]:.4e} +-{std[0]:.4e}, \n\tSSIM: {mean[1]:.4f} +-{std[1]:.4e}, \n\tDSC: {mean[2]:.4f} +-{std[2]:.4e}"
    )

    mean, std = test(model_30, te_ds)
    print(
        f"Model d30 \n\tMSE: {mean[0]:.4e} +-{std[0]:.4e}, \n\tSSIM: {mean[1]:.4f} +-{std[1]:.4e}, \n\tDSC: {mean[2]:.4f} +-{std[2]:.4e}"
    )

    sys.exit()

    model.msd.load_state_dict(torch.load(state_dicts[-1]))
    with evaluate(model):
        for i, (input_, target) in enumerate(te_dl):
            pred = model(input_)
            print(
                f"MSE: {mse(pred, target):.4e}, SSIM: {ssim(pred, target):.4f}, DSC: {dsc(pred, target):.4f}"
            )

            imsave(
                f'outputs/test_pred_{i+1}.tif',
                np.clip(
                    np.concatenate([
                        input_[0, 0].cpu().numpy(), pred[0, 0].cpu().numpy(),
                        target[0, 0].cpu().numpy()
                    ],
                                   axis=-1), 0, None))

    plt.figure(figsize=(10, 10))
    plt.plot(epochs, losses)
    plt.savefig('outputs/training.png')
Пример #8
0
def run(args, callback=None):
    if args.atari:
        env = gym.vector.make(
            args.env_name,
            num_envs=args.num_envs,
            wrappers=[
                AtariPreprocessing,
                lambda x: FrameStack(x, args.framestack, lz4_compress=False),
            ])
    elif args.do_framestack:
        env = gym.vector.make(
            args.env_name,
            num_envs=args.num_envs,
            wrappers=[
                lambda x: FrameStack(x, args.framestack, lz4_compress=False),
            ])
    else:
        env = gym.vector.make(args.env_name, num_envs=args.num_envs)

    env = StatsWrapper(env)
    env = TorchWrapper(env)
    env = TorchStepRunnerWrapper(env, args.train_steps, args.continuous)

    logger = Logger(
        args.log.split(",") if len(args.log) > 0 else [], env, args)

    policy = getattr(sys.modules[__name__], args.policy_name)(
        args.framestack
        if args.atari or args.do_framestack else env.state_size,
        env.action_size,
        continuous=args.continuous,
        stochastic_value=args.sv,
        feature_extraction=args.feature_extraction)
    agent = getattr(sys.modules[__name__], args.agent_name)(env,
                                                            policy,
                                                            **vars(args),
                                                            logger=logger)

    agent.learn(args.total_steps, callback, args.callback_interval)

    results = None
    if not args.skip_eval:
        if args.atari:
            env = make_atari_subproc_vecenv(args.env_name, 1)
        elif args.do_framestack:
            env = gym.vector.make(
                args.env_name,
                num_envs=1,
                wrappers=[
                    lambda x: FrameStack(
                        x, args.framestack, lz4_compress=False),
                ])
        else:
            env = gym.vector.make(args.env_name, num_envs=1)
        env = TorchWrapper(env)
        results = evaluate(env, agent, args.eval_steps)
        logger.log_results(results)

    if args.save:
        logger.save_model(policy)

    return results
Пример #9
0
    def train(self, model, tr_loader, va_loader=None, adv_train=False):
        args = self.args
        logger = self.logger

        opt = torch.optim.SGD(model.parameters(),
                              args.learning_rate,
                              weight_decay=args.weight_decay,
                              momentum=args.momentum)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            opt, milestones=[40000, 60000], gamma=0.1)
        _iter = 0

        begin_time = time()

        for epoch in range(1, args.max_epoch + 1):
            for data, label in tr_loader:
                data, label = tensor2cuda(data), tensor2cuda(label)

                if adv_train:
                    # When training, the adversarial example is created from a random
                    # close point to the original data point. If in evaluation mode,
                    # just start from the original data point.
                    adv_data = self.attack.perturb(data, label, 'mean', True)
                    output = model(adv_data, _eval=False)
                else:
                    output = model(data, _eval=False)

                loss = F.cross_entropy(output, label)

                opt.zero_grad()
                loss.backward()
                opt.step()

                if _iter % args.n_eval_step == 0:
                    t1 = time()

                    if adv_train:
                        with torch.no_grad():
                            stand_output = model(data, _eval=True)
                        pred = torch.max(stand_output, dim=1)[1]

                        # print(pred)
                        std_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                        pred = torch.max(output, dim=1)[1]
                        # print(pred)
                        adv_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                    else:

                        adv_data = self.attack.perturb(data, label, 'mean',
                                                       False)

                        with torch.no_grad():
                            adv_output = model(adv_data, _eval=True)
                        pred = torch.max(adv_output, dim=1)[1]
                        # print(label)
                        # print(pred)
                        adv_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                        pred = torch.max(output, dim=1)[1]
                        # print(pred)
                        std_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                    t2 = time()

                    logger.info(
                        f'epoch: {epoch}, iter: {_iter}, lr={opt.param_groups[0]["lr"]}, '
                        f'spent {time()-begin_time:.2f} s, tr_loss: {loss.item():.3f}'
                    )

                    logger.info(
                        f'standard acc: {std_acc:.3f}%, robustness acc: {adv_acc:.3f}%'
                    )

                    # begin_time = time()

                    # if va_loader is not None:
                    #     va_acc, va_adv_acc = self.test(model, va_loader, True)
                    #     va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0

                    #     logger.info('\n' + '='*30 + ' evaluation ' + '='*30)
                    #     logger.info('test acc: %.3f %%, test adv acc: %.3f %%, spent: %.3f' % (
                    #         va_acc, va_adv_acc, time() - begin_time))
                    #     logger.info('='*28 + ' end of evaluation ' + '='*28 + '\n')

                    begin_time = time()

                if _iter % args.n_store_image_step == 0:
                    tv.utils.save_image(
                        torch.cat([data.cpu(), adv_data.cpu()], dim=0),
                        os.path.join(args.log_folder, f'images_{_iter}.jpg'),
                        nrow=16)

                if _iter % args.n_checkpoint_step == 0:
                    file_name = os.path.join(args.model_folder,
                                             f'checkpoint_{_iter}.pth')
                    save_model(model, file_name)

                _iter += 1
                # scheduler depends on training interation
                scheduler.step()

            if va_loader is not None:
                t1 = time()
                va_acc, va_adv_acc = self.test(model, va_loader, True, False)
                va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0

                t2 = time()
                logger.info('\n'+'='*20 +f' evaluation at epoch: {epoch} iteration: {_iter} ' \
                    +'='*20)
                logger.info(
                    f'test acc: {va_acc:.3f}%, test adv acc: {va_adv_acc:.3f}%, spent: {t2-t1:.3f} s'
                )
                logger.info('=' * 28 + ' end of evaluation ' + '=' * 28 + '\n')
    def train(self, model, tr_loader, va_loader=None, adv_train=False):
        args = self.args
        logger = self.logger

        opt = torch.optim.Adam(model.parameters(),
                               args.learning_rate,
                               weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(opt,
                                                         milestones=[100, 150],
                                                         gamma=0.1)
        _iter = 0

        begin_time = time()

        for epoch in range(1, args.max_epoch + 1):
            scheduler.step()
            for data, label in tr_loader:
                data, label = tensor2cuda(data), tensor2cuda(label)

                if adv_train:
                    # When training, the adversarial example is created from a random
                    # close point to the original data point. If in evaluation mode,
                    # just start from the original data point.
                    adv_data = self.attack.perturb(data, label, 'mean', True)
                    # output = model(adv_data, _eval=False)
                    # ????????? don't know if this is the case###########
                    model.train()
                    output = model(adv_data)
                else:
                    # output = model(data, _eval=False)
                    model.train()
                    output = model(data)

                loss = F.cross_entropy(output, label)

                opt.zero_grad()
                loss.backward()
                opt.step()

                if _iter % args.n_eval_step == 0:
                    t1 = time()

                    if adv_train:
                        with torch.no_grad():
                            model.eval()
                            stand_output = model(data)
                            # stand_output = model(data, _eval=True)
                        pred = torch.max(stand_output, dim=1)[1]

                        # print(pred)
                        std_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                        pred = torch.max(output, dim=1)[1]
                        # print(pred)
                        adv_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                    else:

                        adv_data = self.attack.perturb(data, label, 'mean',
                                                       False)

                        with torch.no_grad():
                            model.eval()
                            adv_output = model(adv_data)
                            # adv_output = model(adv_data, _eval=True)
                        pred = torch.max(adv_output, dim=1)[1]
                        # print(label)
                        # print(pred)
                        adv_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                        pred = torch.max(output, dim=1)[1]
                        # print(pred)
                        std_acc = evaluate(pred.cpu().numpy(),
                                           label.cpu().numpy()) * 100

                    t2 = time()

                    print('%.3f' % (t2 - t1))

                    logger.info(
                        'epoch: %d, iter: %d, spent %.2f s, tr_loss: %.3f' %
                        (epoch, _iter, time() - begin_time, loss.item()))

                    logger.info(
                        'standard acc: %.3f %%, robustness acc: %.3f %%' %
                        (std_acc, adv_acc))

                    # begin_time = time()

                    # if va_loader is not None:
                    #     va_acc, va_adv_acc = self.test(model, va_loader, True)
                    #     va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0

                    #     logger.info('\n' + '='*30 + ' evaluation ' + '='*30)
                    #     logger.info('test acc: %.3f %%, test adv acc: %.3f %%, spent: %.3f' % (
                    #         va_acc, va_adv_acc, time() - begin_time))
                    #     logger.info('='*28 + ' end of evaluation ' + '='*28 + '\n')

                    begin_time = time()

                if _iter % args.n_store_image_step == 0:
                    tv.utils.save_image(
                        torch.cat([data.cpu(), adv_data.cpu()], dim=0),
                        os.path.join(args.log_folder, 'images_%d.jpg' % _iter),
                        nrow=16)

                if _iter % args.n_checkpoint_step == 0:
                    file_name = os.path.join(args.model_folder,
                                             'checkpoint_%d.pth' % _iter)
                    save_model(model, file_name)

                _iter += 1

            if va_loader is not None:
                t1 = time()
                va_acc, va_adv_acc = self.test(model, va_loader, True, False)
                va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0

                t2 = time()
                logger.info('\n'+'='*20 +' evaluation at epoch: %d iteration: %d '%(epoch, _iter) \
                    +'='*20)
                logger.info(
                    'test acc: %.3f %%, test adv acc: %.3f %%, spent: %.3f' %
                    (va_acc, va_adv_acc, t2 - t1))
                logger.info('=' * 28 + ' end of evaluation ' + '=' * 28 + '\n')