Exemplo n.º 1
0
def mse_vs_epochs(coeff, key, data_loaders, data_lengths, only_test=True):
    mse = []
    f_list, epochs = get_fnames(coeff)
    for fname, e in zip(f_list[20:], epochs[20:]):
        print(fname, e)

        torch.cuda.empty_cache()
        model = rUNet(out_size=1)
        checkpoint = torch.load(os.path.join(saved_models,
                                             fname))['model_state_dict']
        model.load_state_dict(checkpoint)
        y_true, y_pred = inference_phase_rUNet(model,
                                               data_loaders,
                                               data_lengths,
                                               batch_size=16,
                                               notebook=False,
                                               test=only_test)
        np.savez_compressed(os.path.join(
            saved_models, '_'.join([
                'predicted', key,
                fname.split('_')[3],
                fname.split('_')[5],
                fname.split('_')[6]
            ]) + '.npz'),
                            true=y_true,
                            pred=y_pred)
        mse.append(mean_squared_error(y_true, y_pred))

    #plt.plot(epochs, mse)
    return mse, epochs
Exemplo n.º 2
0
def predict_dist(coeff,
                 epoch,
                 data_loaders,
                 data_lengths,
                 key,
                 only_test=True):
    filelist, epochs = get_fnames(coeff)
    try:
        sel_epoch = np.argwhere(epochs == epoch)[0]
        model_fname = filelist[int(sel_epoch)]
    except IndexError as ie:
        print(str(ie), coeff, epoch)
        return

    print(model_fname)

    torch.cuda.empty_cache()
    model = rUNet(out_size=1)
    try:
        checkpoint_file = torch.load(root_path(model_fname))
        checkpoint = checkpoint_file['model_state_dict']
    except RuntimeError as rte:
        print(str(rte), root_path(model_fname))
    except FileNotFoundError as fnf:
        print(str(fnf), root_path(model_fname))
    except KeyError as ke:
        print(str(ke), root_path(model_fname))
    else:
        out_filename = '_'.join([
            'predicted', key,
            model_fname.split('_')[3],
            model_fname.split('_')[5],
            model_fname.split('_')[6]
        ]) + '.npz'
        out_filepath = os.path.join(saved_models, out_filename)
        if os.path.exists(out_filepath):
            print('out file {} exists!'.format(out_filepath))
            return

        model.load_state_dict(checkpoint)

        y_true, y_pred = inference_phase_rUNet(model,
                                               data_loaders,
                                               data_lengths,
                                               batch_size=8,
                                               notebook=False,
                                               test=only_test)

        print('saving to {}'.format(out_filename))

        np.savez_compressed(out_filepath, true=y_true, pred=y_pred)

        mse = mean_squared_error(y_true, y_pred)
        print(mse)
Exemplo n.º 3
0
            ToTensor()
        ]
        print("Load dataset")

        data_loaders, data_length = define_dataset(
            root_folder=ROOT_DIR,
            base_transformers=base_transformers,
            train_transformers=train_transformers,
            batch_size=16,
            excluded_list=EXCLUDED,
            alldata=False,
            multi_processing=4)
        print("combined loss: {}*dice_loss + {} mse".format(coef, 1.0 - coef))
        torch.cuda.empty_cache()
        print("Train model")
        model = rUNet(out_size=1)
        optimizer = optim.Adam(model.parameters(), lr=1e-4)
        checkpoint_file = os.path.join(
            SRC_DIR, 'saved_models', 'trained_6positions_multi_loss',
            'Trained_rUNet_pytorch_6positions_dataset_100epochs_{}coeff_mask.pkl'
            .format(coef))
        print(torch.load(checkpoint_file).keys())
        history = retrain_rUNet_multi_loss(
            model=model,
            optimizer=optimizer,
            criterion_dist=nn.MSELoss(),
            criterion_mask=dice_loss,
            loss_coeff=coef,
            data_loaders=data_loaders,
            data_lengths=data_length,
            checkpoint_file=checkpoint_file,