def __init__(self, opt: Namespace):
     super(ImageClassifier, self).__init__()
     self.opt = opt
     self.net = define_net(opt)
     self.loss_fun = nn.CrossEntropyLoss()
     self.log_pkl = {
         'step_train_loss': [],
         'epoch_val_loss': [],
         'epoch_val_score': [],
         'epoch_val_label': []
     }
Beispiel #2
0
    def __init__(self, opt):
        #super(RMEP, self).__init__()
        self.opt = opt
        self.isTrain = not opt.isTest
        self.device = torch.device('cuda') if torch.cuda.is_available(
        ) else torch.device('cpu')  # get device name: CPU or GPU

        # save all the checkpoints to save_dir
        self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
        mkdirs(self.save_dir)

        self.dataroot = opt.dataroot

        # criterion and output nc
        if self.opt.model_type == 'reg':
            self.output_nc = len(opt.output_params)
            self.criterion = nn.SmoothL1Loss()
            self.output_params = opt.output_params
        elif (self.opt.model_type == 'clf') or (self.opt.model_type
                                                == 'clf_multi'):
            self.criterion = nn.CrossEntropyLoss()
            self.output_nc = opt.output_nc
            self.output_params = ['label']
        else:
            raise ValueError(f'Unknown model type {opt.model_type}.')

        # get the neural network
        self.netRMEP = networks.define_net(opt.model_name, opt.input_nc,
                                           self.output_nc, opt.nrf, opt.norm,
                                           opt.init_type, opt.init_gain,
                                           opt.num_blk)
        self.netRMEP.to(self.device)
        self.model_names = ['RMEP']

        # get the optimizer
        self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                 self.netRMEP.parameters()),
                                          lr=opt.lr,
                                          betas=(opt.beta1, 0.999))
Beispiel #3
0
def run(pretrained_path,
        output_path,
        model_name='SRFBN',
        scale=4,
        degrad='BI',
        opt='options/test/test_SRFBN_example.json'):
    opt = option.parse(opt)
    opt = option.dict_to_nonedict(opt)
    # model = create_model(opt)
    model = define_net({
        "scale": scale,
        "which_model": "SRFBN",
        "num_features": 64,
        "in_channels": 3,
        "out_channels": 3,
        "num_steps": 4,
        "num_groups": 6
    })

    img = common.read_img('./results/LR/MyImage/chip.png', 'img')

    np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
    tensor = torch.from_numpy(np_transpose).float()
    lr_tensor = torch.unsqueeze(tensor, 0)

    checkpoint = torch.load(pretrained_path)
    if 'state_dict' in checkpoint.keys():
        checkpoint = checkpoint['state_dict']
    load_func = model.load_state_dict
    load_func(checkpoint)
    torch.save(model, './model.pt')

    with torch.no_grad():
        SR = model(lr_tensor)[0]
    # visuals = np.transpose(SR.data[0].float().cpu().numpy(), (1, 2, 0)).astype(np.uint8)
    visuals = np.transpose(SR.data[0].float().cpu().numpy(),
                           (1, 2, 0)).astype(np.uint8)
    imageio.imwrite(output_path, visuals)
Beispiel #4
0
def train(opt, data, device, k):
    cudnn.deterministic = True
    torch.cuda.manual_seed_all(2019)
    torch.manual_seed(2019)
    random.seed(2019)

    model = define_net(opt, k)
    optimizer = define_optimizer(opt, model)
    scheduler = define_scheduler(opt, optimizer)
    print(model)
    print("Number of Trainable Parameters: %d" % count_parameters(model))
    print("Activation Type:", opt.act_type)
    print("Optimizer Type:", opt.optimizer_type)
    print("Regularization Type:", opt.reg_type)

    use_patch, roi_dir = ('_patch_',
                          'all_st_patches_512') if opt.use_vgg_features else (
                              '_', 'all_st')

    custom_data_loader = PathgraphomicFastDatasetLoader(
        opt, data, split='train',
        mode=opt.mode) if opt.use_vgg_features else PathgraphomicDatasetLoader(
            opt, data, split='train', mode=opt.mode)
    train_loader = torch.utils.data.DataLoader(dataset=custom_data_loader,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               collate_fn=mixed_collate)
    metric_logger = {
        'train': {
            'loss': [],
            'pvalue': [],
            'cindex': [],
            'surv_acc': [],
            'grad_acc': []
        },
        'test': {
            'loss': [],
            'pvalue': [],
            'cindex': [],
            'surv_acc': [],
            'grad_acc': []
        }
    }

    for epoch in tqdm(range(opt.epoch_count, opt.niter + opt.niter_decay + 1)):

        if opt.finetune == 1:
            unfreeze_unimodal(opt, model, epoch)

        model.train()
        risk_pred_all, censor_all, survtime_all = np.array([]), np.array(
            []), np.array([])  # Used for calculating the C-Index
        loss_epoch, grad_acc_epoch = 0, 0

        for batch_idx, (x_path, x_grph, x_omic, censor, survtime,
                        grade) in enumerate(train_loader):

            censor = censor.to(device) if "surv" in opt.task else censor
            grade = grade.to(device) if "grad" in opt.task else grade
            _, pred = model(x_path=x_path.to(device),
                            x_grph=x_grph.to(device),
                            x_omic=x_omic.to(device))

            loss_cox = CoxLoss(survtime, censor, pred,
                               device) if opt.task == "surv" else 0
            loss_reg = define_reg(opt, model)
            loss_nll = F.nll_loss(pred, grade) if opt.task == "grad" else 0
            loss = opt.lambda_cox * loss_cox + opt.lambda_nll * loss_nll + opt.lambda_reg * loss_reg
            loss_epoch += loss.data.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if opt.task == "surv":
                risk_pred_all = np.concatenate(
                    (risk_pred_all, pred.detach().cpu().numpy().reshape(-1)
                     ))  # Logging Information
                censor_all = np.concatenate(
                    (censor_all, censor.detach().cpu().numpy().reshape(-1)
                     ))  # Logging Information
                survtime_all = np.concatenate(
                    (survtime_all, survtime.detach().cpu().numpy().reshape(-1)
                     ))  # Logging Information
            elif opt.task == "grad":
                pred = pred.argmax(dim=1, keepdim=True)
                grad_acc_epoch += pred.eq(grade.view_as(pred)).sum().item()

            if opt.verbose > 0 and opt.print_every > 0 and (
                    batch_idx % opt.print_every == 0
                    or batch_idx + 1 == len(train_loader)):
                print("Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".
                      format(epoch + 1, opt.niter + opt.niter_decay,
                             batch_idx + 1, len(train_loader), loss.item()))

        scheduler.step()
        # lr = optimizer.param_groups[0]['lr']
        #print('learning rate = %.7f' % lr)

        if opt.measure or epoch == (opt.niter + opt.niter_decay - 1):
            loss_epoch /= len(train_loader)

            cindex_epoch = CIndex_lifeline(
                risk_pred_all, censor_all,
                survtime_all) if opt.task == 'surv' else None
            pvalue_epoch = cox_log_rank(
                risk_pred_all, censor_all,
                survtime_all) if opt.task == 'surv' else None
            surv_acc_epoch = accuracy_cox(
                risk_pred_all, censor_all) if opt.task == 'surv' else None
            grad_acc_epoch = grad_acc_epoch / len(
                train_loader.dataset) if opt.task == 'grad' else None
            loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(
                opt, model, data, 'test', device)

            metric_logger['train']['loss'].append(loss_epoch)
            metric_logger['train']['cindex'].append(cindex_epoch)
            metric_logger['train']['pvalue'].append(pvalue_epoch)
            metric_logger['train']['surv_acc'].append(surv_acc_epoch)
            metric_logger['train']['grad_acc'].append(grad_acc_epoch)

            metric_logger['test']['loss'].append(loss_test)
            metric_logger['test']['cindex'].append(cindex_test)
            metric_logger['test']['pvalue'].append(pvalue_test)
            metric_logger['test']['surv_acc'].append(surv_acc_test)
            metric_logger['test']['grad_acc'].append(grad_acc_test)

            pickle.dump(
                pred_test,
                open(
                    os.path.join(
                        opt.checkpoints_dir, opt.exp_name, opt.model_name,
                        '%s_%d%s%d_pred_test.pkl' %
                        (opt.model_name, k, use_patch, epoch)), 'wb'))

            if opt.verbose > 0:
                if opt.task == 'surv':
                    print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format(
                        'Train', loss_epoch, 'C-Index', cindex_epoch))
                    print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format(
                        'Test', loss_test, 'C-Index', cindex_test))
                elif opt.task == 'grad':
                    print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format(
                        'Train', loss_epoch, 'Accuracy', grad_acc_epoch))
                    print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format(
                        'Test', loss_test, 'Accuracy', grad_acc_test))

            if opt.task == 'grad' and loss_epoch < opt.patience:
                print("Early stopping at Epoch %d" % epoch)
                break

    return model, optimizer, metric_logger
    pat_train = pnas_splits.index[
        pnas_splits[k] ==
        'Train'] if opt.make_all_train == 0 else pnas_splits.index
    pat_test = pnas_splits.index[pnas_splits[k] == 'Test']
    cv_splits[int(k)] = {}

    model = None
    if opt.use_vgg_features:
        load_path = os.path.join(opt.checkpoints_dir, opt.exp_name,
                                 opt.model_name,
                                 '%s_%s.pt' % (opt.model_name, k))
        model_ckpt = torch.load(load_path, map_location=device)
        model_state_dict = model_ckpt['model_state_dict']
        if hasattr(model_state_dict, '_metadata'):
            del model_state_dict._metadata
        model = define_net(opt, None)
        if isinstance(model, torch.nn.DataParallel): model = model.module
        print('Loading the model from %s' % load_path)
        model.load_state_dict(model_state_dict)
        model.eval()

    train_x_patname, train_x_path, train_x_grph, train_x_omic, train_e, train_t, train_g = getAlignedMultimodalData(
        opt, model, device, all_dataset, pat_train, pat2img)
    test_x_patname, test_x_path, test_x_grph, test_x_omic, test_e, test_t, test_g = getAlignedMultimodalData(
        opt, model, device, all_dataset, pat_test, pat2img)

    train_x_omic, train_e, train_t = np.array(train_x_omic).squeeze(
        axis=1), np.array(train_e,
                          dtype=np.float64), np.array(train_t,
                                                      dtype=np.float64)
    test_x_omic, test_e, test_t = np.array(test_x_omic).squeeze(
Beispiel #6
0
import networks

opt = {"scale": 4}
opt.update({
    "which_model": "RDN",
    "num_features": 64,
    "in_channels": 3,
    "out_channels": 3,
    "num_blocks": 16,
    "num_layers": 8
})

import torch

net = networks.define_net(opt)
print(net)

x = torch.randn(1, 3, 40, 40)
ret = net(x)

print("ype:", type(ret), len(ret))
for t in ret:
    print(t.shape)