Exemplo n.º 1
0
def load_model(target, model_name='Unet', device='cpu', args=None):
    model_path = os.path.join('pre-train-model', model_name)
    if not os.path.exists(model_path):
        raise NameError('Model not exists')
    else:

        target_model_path = os.path.join(model_path, target + ".pth")
        state = torch.load(target_model_path, map_location=device)

        with open(os.path.join(model_path, target + ".json"),
                  'r') as json_file:
            results = json.load(json_file)

        unmix = model.Unet(
            n_fft=results['args']['nfft'],
            n_hop=results['args']['nhop'],
            nb_channels=1,  #results['args']['nb_channels'],
            hidden_size=results['args']['hidden_size'],
            max_bin=1487,
            args=args)
        model_dict = unmix.state_dict()

        pretrained_dict = {k: v for k, v in state.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        unmix.load_state_dict(model_dict)
        unmix.stft.center = True
        unmix.eval()
        unmix.to(device)
        return unmix
Exemplo n.º 2
0
def trainer(args):
    """Training loop. 

    Handles model, optimizer, loss, and sampler generation.
    Handles data loading. Handles i/o and checkpoint loading.
        

    Parameters
    ----------
    conf : dict
        Miscellaneous parameters
    """

    ###############  Dataset ########################
    loader = data.load_denoising(args.data_root,
                                 train=True,
                                 batch_size=args.batch_size,
                                 transform=None)

    val_loader = data.load_denoising(args.data_root,
                                     train=False,
                                     batch_size=args.batch_size,
                                     transform=None)
    ##################################################

    ##### Model, Optimizer, Loss ############
    num_classes = 30
    network = model.Unet(args.unet_hidden, num_classes).to(args.device)

    optimizer = torch.optim.Adam(network.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()
    ##################################################

    ############## Training loop #####################
    for epoch in range(args.load_checkpoint + 1, args.epochs + 1):
        print('\nEpoch %d/%d' % (epoch, args.epochs))

        # Train
        network, optimizer, train_epoch_loss, train_epoch_dice = train(
            network, loader, criterion, optimizer, args.device)
        # Validate
        network, val_epoch_loss, val_epoch_dice = validate(
            network, val_loader, criterion, args.device)

        # Save checkpoints
        utils.save_checkpoint(epoch, network.state_dict(), optimizer.state_dict(), \
                train_epoch_loss, val_epoch_loss, args.filename, args.log_interval)
        utils.save_loss(epoch, train_epoch_loss, val_epoch_loss, args.run_dir)
        utils.save_loss(epoch, train_epoch_dice, val_epoch_dice, args.run_dir,
                        'dice')
        print("Epoch {}: test loss: {:.6f}, test dice: {:.6f}".format(
            epoch, val_epoch_loss, val_epoch_dice))
Exemplo n.º 3
0
def run_experiment():
    print('-- Current file: test.py -- testing experiment')

    #Load data
    if cfg.experiment == 'scene_split':
        X, y, X_test, y_test = data.scene_split(verbose=False)
    elif cfg.experiment == 'image_split':
        X, y = data.image_split(verbose=False)
        X, y, X_test, y_test = data.randomize_and_generate_test_split(X, y)
    elif cfg.experiment == 'static_image_split':
        X, y, X_test, y_test = data.static_image_split(verbose=False)
        X_test = X
        y_test = y

    # Remove X and y as it corresponds to training data
    del X
    del y

    # Compute the model
    if cfg.architecture == 'EURNet':
        model = m.EURNet()
    elif cfg.architecture == 'Unet_HC':
        model = m.Unet_HC()
    elif cfg.architecture == 'SUnet':
        model = m.SUnet()
    elif cfg.architecture == 'Unet':
        model = m.Unet()
    elif cfg.architecture == 'HC':
        model = m.HC()

    #Compute and save predictions to disk
    if cfg.save_predictions:
        compute_predictions_and_save_to_disk(X_test, y_test, model)

    #Evaluate the previously predicted images
    return evaluate_predicted_images()
Exemplo n.º 4
0
def my_main(params):
    print("***** Starting Programm *****")
    # initialize some variables
    train_loss = []
    val_loss = []
    best_loss = 10
    global udevice

    if params["cpu"]:
        udevice = torch.device("cpu")

    # use cudnn for better speed, if available
    if udevice.type == "cuda":
        cudnn.benchmark = True

    # 1: design model
    model = md.Unet().to(udevice)

    # 2: Construct loss and optimizer

    # Using a softmax layer at the end, applying the log and using NLLoss()
    # has the same loss as using no softmax layer, and calculating the CrossEntropyLoss()
    # the difference is in the output image of the model.
    # If you want to use the CrossEntropyLoss(), remove the softmax layer, and  the torch.log() at the loss

    criterion = nn.NLLLoss(
        weight=torch.tensor(params["classweight"])).to(udevice)

    optimizer = optim.SGD(model.parameters(),
                          lr=params["learningrate"],
                          momentum=params["momentum"],
                          weight_decay=params["weightdecay"])

    # Reduce learning rate when a metric has stopped improving, needs to be activated in epoch too
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=50, verbose=True)

    # load the ISBI 2012 training data
    # the CTC2015 Datasetloader is not finished yet
    # The length of the Dataset has to be set by yourself
    # gloob_dir_train, gloob_dir_label, length, is_pad, eval, totensor):

    trainset = ISBI.ISBIDataset("./ISBI 2012/Train-Volume/train-volume-*.tif",
                                "./ISBI 2012/Train-Labels/train-labels-*.tif",
                                length=22,
                                is_pad=params["padding"],
                                evaluate=False,
                                totensor=True)

    if not params["evaluate"]:
        valset = ISBI.ISBIDataset("./ISBI 2012/Val-Volume/train-volume-*.tif",
                                  "./ISBI 2012/Val-Labels/train-labels-*.tif",
                                  length=8,
                                  is_pad=params["padding"],
                                  evaluate=True,
                                  totensor=True)
    else:
        valset = ISBI.ISBIDataset("./ISBI 2012/Test-Volume/test-volume-*.tif",
                                  "./ISBI 2012/Test-Volume/test-volume-*.tif",
                                  length=30,
                                  is_pad=params["padding"],
                                  evaluate=True,
                                  totensor=True)

    # num of workers can represent the number of cores in cpu, pinned memory is page-locked memory
    # disable it  if system freezes, or swap is used a lot
    # https://discuss.pytorch.org/t/what-is-the-disadvantage-of-using-pin-memory/1702
    # batchsize is 1 for validation, to get a single output for loss and not a mean

    # shuffle input data with replacement
    # in Pytorch 0.41 only WeightedRandomSampler can do this
    # Therefore, create list with ones, which corresponds to the length of trainset.
    # take 30 samples per epoch.
    listones = [1] * trainset.length
    randomsampler = torch.utils.data.WeightedRandomSampler(listones, 30, True)
    trainloader = dl.DataLoader(trainset,
                                sampler=randomsampler,
                                batch_size=params["batch_size"],
                                num_workers=params["workers"],
                                pin_memory=True)
    valloader = dl.DataLoader(valset,
                              batch_size=1,
                              num_workers=params["workers"],
                              pin_memory=True)

    # 3: Training cycle forward, backward , update

    # load the model if set
    if params["resume"]:
        if os.path.isfile(params["resume"]):
            print("=> loading checkpoint '{}'".format(params["resume"]))
            checkpoint = torch.load(params["resume"])
            params["startepoch"] = checkpoint['epoch']
            best_loss = checkpoint['best_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            train_loss = checkpoint['train_loss']
            val_loss = checkpoint['val_loss']
            # scheduler.last_epoch = args.start_epoch
            print("=> loaded checkpoint '{}' (epoch {})".format(
                params["resume"], checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(params["resume"]))
            sys.exit(0)

    # print some info for console
    print('Dataset      :  ISBI 2012')
    print('Start Epoch  : ' + str(params["startepoch"]))
    print('End Epoch    : ' + str(params["epochs"]))
    print('Learning rate: ' + str(params["learningrate"]))
    print('Momentum     : ' + str(params["momentum"]))
    print('Weight decay : ' + str(params["weightdecay"]))
    print('Use padding  : ' + str(params["padding"]))

    #  save a txt file with the console info
    if params["txtinfo"] and not params["evaluate"]:
        with open(str(params["savedir"]) + "txtinfo.txt", "a") as myfile:
            myfile.write('Dataset      : ISBI2012')
            myfile.write('\n')
            myfile.write('Start Epoch  : ' + str(params["startepoch"]))
            myfile.write('\n')
            myfile.write('End Epoch    : ' + str(params["epochs"]))
            myfile.write('\n')
            myfile.write('Learning rate: ' + str(params["learningrate"]))
            myfile.write('\n')
            myfile.write('Momentum     : ' + str(params["momentum"]))
            myfile.write('\n')
            myfile.write('Weight decay : ' + str(params["weightdecay"]))
            myfile.write('\n')
            myfile.write('Use padding  : ' + str(params["padding"]))
            myfile.write('\n')
            myfile.close()

    if params["evaluate"]:
        predict(valloader, model)
        print("evaluation finished")
    else:
        print("***** Start Training *****")

        breakloss = 0
        # val loss and train loss are initialized with 0
        for epoch in range(params["startepoch"], params["epochs"]):
            start_time = time.time()

            train_loss.append(
                train(trainloader, model, criterion, optimizer, epoch))
            val_loss.append(evaluate(valloader, model, criterion))
            end_time = time.time()

            print(
                'Epoch [%5d] train_loss: %.4f val_loss: %.4f loop time: %.5f' %
                (epoch + 1, train_loss[epoch], val_loss[epoch],
                 end_time - start_time))
            if params["txtinfo"]:
                with open(str(params["savedir"]) + "txtinfo.txt",
                          "a") as myfile:
                    myfile.write(
                        'Epoche [%5d] train_loss: %.4f val_loss: %.4f loop time: %.5f'
                        % (epoch + 1, train_loss[epoch], val_loss[epoch],
                           end_time - start_time))
                    myfile.write('\n')
                    myfile.close()

            # see info at criterion above
            # scheduler.step(val_loss)
            # Data Augmentation

            if 0.6931 < train_loss[epoch] < 0.6932:
                breakloss += 1
                if breakloss > 7:
                    sys.exit()
            else:
                breakloss = 0

            # save best loss
            is_best_loss = val_loss[epoch] < best_loss
            best_loss = min(val_loss[epoch], best_loss)

            if is_best_loss:
                ex.log_scalar('best_epoch', epoch + 1)

            ex.log_scalar('val_loss', val_loss[epoch])
            ex.log_scalar('train_loss', train_loss[epoch])

            # save model
            filename = ""
            if is_best_loss:
                filename = 'best_loss.pth.tar'
            else:
                filename = 'current.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_loss': best_loss,
                    'train_loss': train_loss,
                    'val_loss': val_loss,
                    'optimizer': optimizer.state_dict(),
                },
                filename=filename)

        print("*****   End  Programm   *****")
Exemplo n.º 5
0
def one_headed_model(X, y, X_val, y_val):  #one-headed case
    import single_io_augmentation as iaug

    # Generate weights path if it does not exist
    if not os.path.isfile(cfg.path_save_weights) and not os.path.isdir(
            cfg.path_save_weights):
        os.mkdir(cfg.path_save_weights)
    kfold_weights_path = os.path.join(cfg.path_save_weights,
                                      'checkpoint' + '.hdf5')

    # Generate callbacks to save the best validation loss
    callbacks = [
        ModelCheckpoint(kfold_weights_path,
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=0),
        EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='auto')
    ]

    # Generate the model
    if cfg.architecture == 'SUnet':
        model = m.SUnet()
    elif cfg.architecture == 'Double_SUnet':
        model = m.Double_SUnet()
    elif cfg.architecture == 'Unet':
        model = m.Unet()
    elif cfg.architecture == 'HC':
        model = m.HC()

    if cfg.fit_model:
        # this will do preprocessing and realtime data augmentation
        datagen = iaug.ImageDataGenerator(featurewise_center=False,
                                          samplewise_center=False,
                                          featurewise_std_normalization=False,
                                          samplewise_std_normalization=False,
                                          zca_whitening=False,
                                          rotation_range=8.0,
                                          width_shift_range=0.0,
                                          height_shift_range=0.0,
                                          rescale=1. / 255,
                                          shear_range=0.0,
                                          zoom_range=0.0,
                                          horizontal_flip=True,
                                          vertical_flip=False,
                                          fill_mode='nearest',
                                          rdm_crop=True)

        print('-- Fitting the model...')
        model.fit_generator(
            datagen.flow(X, y, batch_size=cfg.batch_size),
            samples_per_epoch=len(X),
            nb_epoch=cfg.epoch,
            #validation_split               = 0.2,
            validation_data=(X_val, y_val),
            callbacks=callbacks,
            verbose=1)

        # Save the weights corresponding to the last epoch
        last_weight_path = os.path.join(
            cfg.path_save_weights, 'last_epoch_' + str(cfg.epoch) + '.hdf5')
        model.save_weights(last_weight_path)

    return model
Exemplo n.º 6
0
print "***** Starting Programm *****"

# 1: design model

# check if cuda is available

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if args.cpu:
    device = torch.device("cpu")

# .to(device) sends the data to the given device ( cuda or cpu )
if args.bn:
    model = model_bn.Unet().to(device)
else:
    model = model.Unet().to(device)

# use cudnn for better speed, if available
if device.type == "cuda":
    cudnn.benchmark = True

# 2: Construct loss and optimizer

# Using a softmax layer at the end, applying the log and using NLLoss()
# has the same loss as using no softmax layer, and calculating the CrossEntropyLoss()
# the difference is in the output image of the model.
# If you want to use the CrossEntropyLoss(), remove the softmax layer, and  the torch.log() at the loss

# criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.NLLLoss().to(device)
optimizer = optim.SGD(model.parameters(),
Exemplo n.º 7
0
# from piq import ssim, SSIMLoss
import model
import unit

input_dir = './data/Sony/short/'
gt_dir = './data/Sony/long/'
result_dir = './result/9/'
model_PATH = './save/weights_2999.pth'

# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get test IDs
test_fns = glob.glob(gt_dir + '/1*.ARW')
test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]

model = model.Unet(4)
model.load_state_dict(torch.load(model_PATH))
model.eval()

for test_id in test_ids:
    # test the first image in each sequence
    in_files = glob.glob(input_dir + '%05d_00*.ARW' % test_id)
    print(in_files)
    for k in range(len(in_files)):
        in_path = in_files[k]
        in_fn = os.path.basename(in_path)
        gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % test_id)
        gt_path = gt_files[0]
        gt_fn = os.path.basename(gt_path)
        in_exposure = float(in_fn[9:-5])
        gt_exposure = float(gt_fn[9:-5])
Exemplo n.º 8
0
                    type=int,
                    help='number of iterations')
args = parser.parse_args()

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

# Data
print('==> Preparing data..')
Train_data_loader = data_processing.get_dataloader('./Train_data/')
Val_data_loader = data_processing.get_dataloader('./Val_data/')
print('==>done')

#Model
print('==> Building model..')
net = model.Unet(in_dim=1, out_dim=1, num_filters=4)
net = net.cuda()
optimizer = optim.Adam(net.parameters(),
                       lr=args.lr,
                       weight_decay=5e-3,
                       betas=(0.9, 0.999))
print('==>done')


def get_weight(target):
    target_array = target.data.cpu().numpy()
    fg_num = np.sum(target_array)
    bg_num = target_array.size - fg_num
    ratio = fg_num / bg_num
    weight = torch.ones(2)
    weight[0] = 1 / (1 - ratio)
Exemplo n.º 9
0
        "_" + name_loss, date + "_256_" + str(training_num) + "_" +
        name_model[2] + "_" + name_loss, date + "_256_" + str(training_num) +
        "_" + name_model[3] + "_" + name_loss, date + "_256_" +
        str(training_num) + "_" + name_model[4] + "_" + name_loss, date +
        "_256_" + str(training_num) + "_" + name_model[5] + "_" + name_loss
    ]

    print("Loading data.")

    for i in range(len(name)):
        print("Building model.")
        input_shape = (256, 256, 3)

        if name_model[i] == "UNet":

            model_select = model.Unet(size=input_shape)  # 搭建新的模型
            model_select.load_weights(
                ".\\result\\model_record\\20210118_256_49461_UNet_CE.h5"
            )  # 載入現有完成訓練的權重
            batch = 10
            train_flag = 0
        elif name_model[i] == "SegNet":
            continue
            model_select = model.segnet(input_shape=input_shape,
                                        n_labels=1,
                                        kernel=3,
                                        pool_size=(2, 2),
                                        output_mode="sigmoid")
            model_select.load_weights(
                ".\\result\\model_record\\20210118_256_49461_SegNet_CE.h5")
            batch = 10
Exemplo n.º 10
0
def test():
    img_dir = "../dataset/Test/image/"
    gt_dir = "../dataset/Test/annot/"

    testloader = DataLoader(
        test_data(img_dir, gt_dir),
        batch_size=1, shuffle=False)

    model1 = model.Unet()
    model1.eval()
    model1.load_state_dict(torch.load("./epoch_2DUnet.pth"))
                                                                                # IoU GT 펼치고 이미지 펼치고 곱해서 output 0.5이상 thresholding해서 뽑아내야함
    total = 0
    correct = 0
    i=0
    RoadmIoU = 0
    WatermIoU = 0
    result_path = "./result/"
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            images = images.squeeze(0)
            org = Image.fromarray(images.byte().cpu().numpy())
            # org.save(result_path + "image_%d.png" %i)

            images = Image.fromarray(images.byte().cpu().numpy())
            preprocess = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ])
            images = preprocess(images)
            images = images.unsqueeze(0)
            if torch.cuda.is_available():
                images = images.to('cuda')
                model1.to('cuda')
                labels = labels.to('cuda')
            outputs = model1(images)[0]
            output_predictions = outputs.argmax(0)
            palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
            colors = torch.as_tensor([i for  i in range(21)])[:, None] * palette
            colors = (colors % 255).numpy().astype("uint8")
            pred = output_predictions.cpu().numpy()
            p_water = pred == 1
            p_water = p_water * 1.0  
            p_road = pred == 2
            p_road = p_road * 1.0
            labels = labels.cpu().numpy()
            water = labels == 1
            water = water * 1.0
            road = labels == 2
            road = road * 1.0
            # width, height = labels.shape[1:]
            # water = np.zeros(1,width,height)
            # road = np.zeros(1,width,height)
            # for i in range(width):
            #     for j in range(height):
            #         if labels[0,i,j] == 1:
            #             water[0,i,j] =1
            #         if labels[]


            RoadIoU = get_IoU(road, p_road)
            WaterIoU = get_IoU(water,p_water)
            RoadmIoU = RoadmIoU + RoadIoU
            WatermIoU = WatermIoU + WaterIoU
            # predict = Image.fromarray(output_predictions.byte().cpu().numpy())
            # predict.putpalette(colors)
            # # plt.imsave(result_path + "image_%d_0.png" %i, predict)
            i += 1
    RoadmIoU = RoadmIoU / i
    WatermIoU = WatermIoU / i
    print("RoadmIoU =  %f" %RoadmIoU)
    print("WatermIoU =  %f" %WatermIoU)

    return WatermIoU, RoadmIoU
def compute_iou_batch(outputs, labels, classes=None):
    '''computes mean iou for a batch of ground truth masks and predicted masks'''
    ious = []
    preds = np.copy(outputs)  # copy is imp
    labels = np.array(labels)  # tensor to np
    for pred, label in zip(preds, labels):
        ious.append(np.nanmean(compute_ious(pred, label, classes)))
    iou = np.nanmean(ious)
    return iou


import model

model = model.Unet("resnet18",
                   encoder_weights="imagenet",
                   classes=4,
                   activation=None)


class Trainer(object):
    '''This class takes care of training and validation of our model'''
    def __init__(self, model):
        self.num_workers = 6
        self.batch_size = {"train": 2, "val": 2}
        self.accumulation_steps = 32 // self.batch_size['train']
        self.lr = 1e-5
        self.num_epochs = 60
        self.best_loss = float("inf")
        self.phases = ["train", "val"]
        self.device = torch.device("cuda:1")
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
Exemplo n.º 12
0

# 封装我们已有的数据加载器并在读取数据批时将数据移动到所选设备。我们不需要扩展已有的类来创建 PyTorch 数据加载器。
# 我们只需要用 __iter__ 方法来检索数据批并使用 __len__ 方法来获取批数量即可。
class DeviceDataLoader():
    def __init__(self, dl, device):
        self.dl = dl
        self.device = device

    def __iter__(self):
        for b in self.dl:
            yield to_device(b, self.device)

    def __len__(self):
        return len(self.dl)


# 把数据集装入显存中并用GPU做后续运算
train_dl = DeviceDataLoader(train_dl, dev)
valid_dl = DeviceDataLoader(valid_dl, dev)

model = model.Unet(1, 1)  # 这行代码是用CNN模型训练的
to_device(model, dev)  # 把模型及其参数也都放到显存里,再用GPU运行

loss_func = loss_functions.loss_func()

# 选择优化器
opt = optim.RMSprop(model.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)

fit.fit(epochs, model, loss_func, opt, train_dl, valid_dl)