def closure(i):
    
        #global i
    
        if param_noise:
            for n in [x for x in net.parameters() if len(x.size()) == 4]:
                n.data += n.data.clone().normal_()*n.data.std()/50
    
        out = net(net_input)
   
        total_loss = mse(out * mask_var, img_var * mask_var)
        total_loss.backward()
    
        psrn = compare_psnr(utils.var_to_np(out), img_np)
        psnr_history.append(psrn)
    
        if report:


            print ('Iteration %05d    Loss %f   PSNR %.3f' % (i, total_loss.data[0], psrn), '\r', end='')
            if  i % show_every == 0:
                out_np = utils.var_to_np(out)
                utils.plot_image_grid([np.clip(out_np, 0, 1)], factor=figsize, nrow=1)
        
            #i += 1

        return total_loss
Пример #2
0
def test_batch_with_labels(net, file, resize = False, batch_size = 10, image_size = 384, smooth = 1.0, lam = 1.0):
    '''
    Test on a validation dataset (here we only consider BCE loss instead of focal loss).
    No TTA or ensemble used at this case.
    Parameters:
        @net: the object for network model.
        @file: root directory of the validation dataset.
        @resize: boolean flag for image resize.
        @batch_size: batch size
        @image_size: the size that the image is converted to.
        @smooth: number to be added on denominator and numerator when compute dice loss.
        @lam: weight to balance the dice loss in the final combined loss.
    Return: 
        average loss (BCE + dice) over batches
        F1 score of the test
    '''

    # keep original data
    data_augment = False
    rotate = False
    change_color = False
    test_dataset = utils.MyDataset(file, resize, data_augment, image_size, rotate, change_color)
    dataloader = utils_data.DataLoader(dataset = test_dataset, batch_size = batch_size, shuffle=False)
    epoch_loss = 0.0
    numer = 0.0
    denom = 0.0
    gamma = 0.0
    loss_type = 'bce'
    Loss = utils.loss(smooth, lam, gamma, loss_type)
    for i, batch in enumerate(dataloader):
        print('Test on batch %d'%i)
        image = utils.np_to_var(batch['image'])
        mask = utils.np_to_var(batch['mask'])
        pred = net.forward(image)
        
        loss = Loss.final_loss(pred, mask)
        epoch_loss += loss.data.item() * batch_size
        
        mask = utils.var_to_np(mask)
        pred = utils.var_to_np(pred)
        numer += (mask * (pred > 0.5)).sum()
        denom += mask.sum() + (pred > 0.5).sum()
        
    epoch_loss /= len(test_dataset)
    f1 = 2.0 * numer / denom
    return epoch_loss, f1
    def closure(i):
        if input_noise:
            net_input.data = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        total_loss = loss_fn(out, img_noisy_var)
        total_loss.backward()

        psrn = compare_psnr(utils.var_to_np(out), img_np)
        psnr_history.append(psrn)

        if report:
            print ('Iteration %05d    Loss %f   PSNR %.3f' % (i, total_loss.data[0], psrn), '\r', end='')
            if  i % show_every == 0:
                out_np = utils.var_to_np(out)
                utils.plot_image_grid([np.clip(out_np, 0, 1)], factor=figsize, nrow=1)

        return total_loss
def evaluate_model(net, net_input, img_np, img_noisy_np, num_iter=6000,
                   show_every=500, report=True, figsize=10):

    loss_fn=torch.nn.MSELoss().type(dtype)
    input_noise=True
    LR=0.01
    reg_noise_std=1./30.

    net_input_saved = net_input.data.clone()
    noise = net_input.data.clone()
    img_noisy_var = utils.np_to_var(img_noisy_np).type(dtype)

    psnr_history = []

    def closure(i):
        if input_noise:
            net_input.data = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        total_loss = loss_fn(out, img_noisy_var)
        total_loss.backward()

        psrn = compare_psnr(utils.var_to_np(out), img_np)
        psnr_history.append(psrn)

        if report:
            print ('Iteration %05d    Loss %f   PSNR %.3f' % (i, total_loss.data[0], psrn), '\r', end='')
            if  i % show_every == 0:
                out_np = utils.var_to_np(out)
                utils.plot_image_grid([np.clip(out_np, 0, 1)], factor=figsize, nrow=1)

        return total_loss

    print('Starting optimization with ADAM')
    optimizer = torch.optim.Adam(net.parameters(), lr=LR)

    for j in range(num_iter):
        optimizer.zero_grad()
        closure(j)
        optimizer.step()

    if report:
        out_np = utils.var_to_np(net(net_input))
        q = utils.plot_image_grid([np.clip(out_np, 0, 1), img_np], factor=13);

        data = {}
        data['psnr_history'] = psnr_history
        pickle.dump(data, open('denoising_psnr.p','wb'))

    max_index, max_value = max(enumerate(psnr_history), key=operator.itemgetter(1))
    return max_index, max_value
Пример #5
0
def validate(score, rate_num, user_item_matrix_test):
    sm = nn.Softmax(dim=0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test) ** 2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    
    return test_rmse
Пример #6
0
def validate(score, rate_num, user_item_matrix_test):
    sm = nn.Softmax(dim=0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    #     pred = np.load('./prediction.npy')

    ### test the performance
    #     user_item_matrix_test = np.load('./processed_dataset/user_item_matrix_test.npy')
    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test)**2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)

    return test_rmse
Пример #7
0
def test_single_image(net, file, size = 384, resize = True):
    '''
    Test a single image with the trained model.
    Parameters:
        @net: the object for network model.
        @file: name of the image file for test.
        @size: the size that the image is converted to.
        @resize: boolean flag for image resize.
    Return: 
        predicted segmentation mask
        original image covered by the red mask
    '''
    
    uint_image = io.imread(file)
    test_image_origin = np.array(uint_image).astype(np.float32) / 255.0
    
    # convert the resized image to a Torch tensor with batch size 1
    if resize:
        test_image_origin = transform.resize(test_image_origin, (size, size), mode = 'constant', anti_aliasing = True)
        test_image = np.moveaxis(test_image_origin, 2, 0).astype(np.float32) # tensor format  
    else:
        test_image = test_image_origin
        test_image = np.moveaxis(test_image, 2, 0).astype(np.float32) # tensor format  
    test_image = np.expand_dims(test_image, axis = 0)
    test_image = utils.np_to_var(torch.from_numpy(test_image))  
    
    pred_test = net.forward(test_image)
    pred_np = utils.var_to_np(pred_test)[0][0] # predicted confidence map
    
    # convert prediction to binary segmentation mask
    new_mask = (pred_np >= 0.5)

    # make a red cover on the original image for visualization of segmentation result
    channel = test_image_origin[:, :, 0]
    channel[new_mask] = 1.0
    test_image_origin[:, :, 0] = channel
    mask = new_mask * 255
    return mask.astype(np.uint8), test_image_origin
Пример #8
0
def test_single_with_TTA(net, file, size = 384, resize = True):
    '''
    Test a single image with the trained model, using test time augmentation (TTA).
    Parameters:
        @net: the object for network model.
        @file: name of the image file for test.
        @size: the size that the image is converted to.
        @resize: boolean flag for image resize.
    Return: 
        predicted segmentation mask
        original image covered by the red mask
    '''

    uint_image = io.imread(file)
    test_image_origin = np.array(uint_image).astype(np.float32) / 255.0

    # resize
    if resize:
        test_image = transform.resize(test_image_origin, (size, size), mode = 'constant', anti_aliasing = True)
    else:
        test_image = test_image_origin

    # create tensor for collecting 8 images (generated by TTA)
    image_set = []
    for i in range(8):
        b1 = i // 4
        b2 = (i - b1 * 4) // 2
        b3 = i - b1 * 4 - b2 * 2
        
        # flip and rotate the image based on perturbed choice
        tem_image = utils.flip_rotate(test_image, b1, b2, b3, inverse = False)
        
        # convert to tensor format
        tem_image = np.moveaxis(tem_image, 2, 0).astype(np.float32)
        
        image_set.append(tem_image)
    image_tensor = np.array(image_set)
    
    # convert to Torch tensor variable
    image_tensor = utils.np_to_var(torch.from_numpy(image_tensor))  
    
    pred_test = net.forward(image_tensor)
    pred_np = utils.var_to_np(pred_test)
    
    pred = np.squeeze(pred_np, axis = 1)
    
    # inversely transform each prediction back to the original orientation
    for i in range(8):
        b1 = i // 4
        b2 = (i - b1 * 4) // 2
        b3 = i - b1 * 4 - b2 * 2
        pred[i] = utils.flip_rotate(pred[i], b1, b2, b3, inverse = True)
        
    # merge into one prediction
    pred = np.median(pred, axis = 0)

    # convert prediction to binary segmentation mask
    new_mask = (pred >= 0.5)

    # make a red cover on the original image for visualization of segmentation result
    channel = test_image_origin[:, :, 0]
    channel[new_mask] = 1.0
    test_image_origin[:, :, 0] = channel
    mask = new_mask * 255
    return mask.astype(np.uint8), test_image_origin
Пример #9
0
def main(args):

    # get arguments
    rate_num = args.rate_num
    use_side_feature = args.use_side_feature
    lr = args.lr
    weight_decay = args.weight_decay
    num_epochs = args.num_epochs
    hidden_dim = args.hidden_dim
    side_hidden_dim = args.side_hidden_dim
    out_dim = args.out_dim
    drop_out = args.drop_out
    split_ratio = args.split_ratio
    save_steps = args.save_steps
    log_dir = args.log_dir
    saved_model_folder = args.saved_model_folder
    use_data_whitening = args.use_data_whitening
    use_laplacian_loss = args.use_laplacian_loss
    laplacian_loss_weight = args.laplacian_loss_weight

    # mark and record the training file, save the training arguments for future analysis
    post_fix = '/' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
    log_dir = log_dir + post_fix
    writer = SummaryWriter(log_dir=log_dir)
    f = open(log_dir + '/test.txt', 'a')
    f.write(str(vars(args)))
    f.close()

    print(log_dir)

    #get prepared data
    feature_u, feature_v, feature_dim, all_M_u, all_M_v, side_feature_u, side_feature_v, all_M, mask, user_item_matrix_train, user_item_matrix_test, laplacian_u, laplacian_v = prepare(
        args)

    if not os.path.exists(saved_model_folder):
        os.makedirs(saved_model_folder)
    weights_name = saved_model_folder + post_fix + '_weights'

    net = utils.create_models(feature_u, feature_v, feature_dim, hidden_dim,
                              rate_num, all_M_u, all_M_v, side_hidden_dim,
                              side_feature_u, side_feature_v, use_side_feature,
                              out_dim, drop_out)
    net.train()  # in train mode

    # create AMSGrad optimizer
    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
    Loss = utils.loss(all_M, mask, user_item_matrix_train,
                      laplacian_loss_weight)
    iter_bar = tqdm(range(num_epochs), desc='Iter (loss=X.XXX)')
    for epoch in iter_bar:

        optimizer.zero_grad()

        score = net.forward()

        if use_laplacian_loss:
            loss = Loss.laplacian_loss(score, laplacian_u, laplacian_v)
        else:
            loss = Loss.loss(score)

        loss.backward()

        optimizer.step()

        with torch.no_grad():
            rmse = Loss.rmse(score)

            val_rmse = validate(score, rate_num, user_item_matrix_test)
            iter_bar.set_description(
                'Iter (loss=%5.3f, rmse=%5.3f, val_rmse=%5.5f)' %
                (loss.item(), rmse.item(), val_rmse.item()))

            #             writer.add_scalars('scalar',{'loss': loss.item(), 'rmse': rmse.item(), 'val_rmse':val_rmse.item(),},epoch)
            writer.add_scalars('scalar', {'loss': loss.item()}, epoch)

        if epoch % save_steps == 0:
            torch.save(net.state_dict(), weights_name)

    rmse = Loss.rmse(score)
    print('Final training RMSE: ', rmse.data.item())
    torch.save(net.state_dict(), weights_name)

    sm = nn.Softmax(dim=0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    #     pred = np.load('./prediction.npy')

    ### test the performance
    #     user_item_matrix_test = np.load('./processed_dataset/user_item_matrix_test.npy')
    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test)**2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    print('Test RMSE: ', test_rmse)
def evaluate_model(net, net_input, img_np, img_mask_np, num_iter=6000,
                   show_every=500, report=True, figsize=10):
    pad = 'zero'
    OPTIMIZER = 'adam'

    INPUT = 'noise'
    input_depth = 32
    LR = 0.01 
    num_iter = 6001
    param_noise = False
    show_every = 500
    figsize = 10
    
    net_input_saved = net_input.data.clone()
    noise = net_input.data.clone()
    #img_noisy_var = utils.np_to_var(img_noisy_np).type(dtype)
    # img
    psnr_history = []
    
    # Loss
    mse = torch.nn.MSELoss().type(dtype)

    img_var = utils.np_to_var(img_np).type(dtype)
    mask_var = utils.np_to_var(img_mask_np).type(dtype)


    psnr_history = []
    def closure(i):
    
        #global i
    
        if param_noise:
            for n in [x for x in net.parameters() if len(x.size()) == 4]:
                n.data += n.data.clone().normal_()*n.data.std()/50
    
        out = net(net_input)
   
        total_loss = mse(out * mask_var, img_var * mask_var)
        total_loss.backward()
    
        psrn = compare_psnr(utils.var_to_np(out), img_np)
        psnr_history.append(psrn)
    
        if report:


            print ('Iteration %05d    Loss %f   PSNR %.3f' % (i, total_loss.data[0], psrn), '\r', end='')
            if  i % show_every == 0:
                out_np = utils.var_to_np(out)
                utils.plot_image_grid([np.clip(out_np, 0, 1)], factor=figsize, nrow=1)
        
            #i += 1

        return total_loss
    
    print('Starting optimization with ADAM')
    optimizer = torch.optim.Adam(net.parameters(), lr=LR)

    for j in range(num_iter):
        optimizer.zero_grad()
        closure(j)
        optimizer.step()
        
    if report:
        out_np = utils.var_to_np(net(net_input))
        q = utils.plot_image_grid([np.clip(out_np, 0, 1), img_np], factor=13);
        
        data = {}
        data['psnr_history'] = psnr_history
        pickle.dump(data, open('inpainting_validation_psnr.p','wb'))

    max_index, max_value = max(enumerate(psnr_history), key=operator.itemgetter(1))
    return max_index, max_value
Пример #11
0
def main(args):
    
    # 获取参数
    rate_num = args.rate_num
    use_side_feature = args.use_side_feature  # using side feature or not
    use_GAT = args.use_GAT
    lr = args.lr
    weight_decay = args.weight_decay
    num_epochs = args.num_epochs
    hidden_dim = args.hidden_dim
    side_hidden_dim = args.side_hidden_dim
    out_dim = args.out_dim
    drop_out = args.drop_out
    split_ratio = args.split_ratio
    save_steps = args.save_steps
    saved_model_folder = args.saved_model_folder
    laplacian_loss_weight = args.laplacian_loss_weight

    post_fix = '/' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())

    # 数据预处理
    feature_u, feature_v, feature_dim, all_M_u, all_M_v, side_feature_u, side_feature_v, all_M, mask,\
    user_item_matrix_train, user_item_matrix_test, laplacian_u, laplacian_v = prepare(args)

    if not os.path.exists(saved_model_folder):
        os.makedirs(saved_model_folder)  
    weights_name = saved_model_folder + post_fix + '_weights'

    net = utils.create_models(feature_u, feature_v, feature_dim, hidden_dim, rate_num, all_M_u, all_M_v,
                              side_hidden_dim, side_feature_u, side_feature_v,
                              use_side_feature, use_GAT, out_dim, user_item_matrix_train, drop_out)
    net.train()

    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
    Loss = utils.loss(all_M, mask, user_item_matrix_train, laplacian_loss_weight)
    iter_bar = tqdm(range(num_epochs), desc='Iter (loss=X.XXX)')

    for epoch in iter_bar:

        optimizer.zero_grad()

        score = net.forward()

        loss = Loss.loss(score)

        loss.backward()

        optimizer.step()

        with torch.no_grad():
            rmse = Loss.rmse(score)
            
            val_rmse = validate(score, rate_num, user_item_matrix_test)
            iter_bar.set_description('Iter (loss=%5.3f, rmse=%5.3f, val_rmse=%5.5f)'%(loss.item(), rmse.item(), val_rmse.item()))


        if epoch % save_steps == 0:
            torch.save(net.state_dict(), weights_name)

    rmse = Loss.rmse(score)
    print('Final training RMSE: ', rmse.data.item())        
    torch.save(net.state_dict(), weights_name)
    
    sm = nn.Softmax(dim = 0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test) ** 2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    print('Test RMSE: ', test_rmse)
Пример #12
0
    # Compute loss
    loss = cross_entropy(torch.squeeze(output), labels)

    # Do back-propagation to get gradients of weights w.r.t. loss
    loss.backward()

    # Ask the optimizer to adjust the parameters in the direction of lower loss
    optimizer.step()

    # Every 10th iteration - print training loss
    if i % 10 == 0:
        network.eval()

        # Log to training loss/acc
        print("Iteration:", i, "Training loss:", utils.var_to_np(loss))
        if LOG_TENSORBOARD:
            logger.log_scalar("training_loss", utils.var_to_np(loss), i)
        for k, v in utils.compute_accuracy(torch.argmax(output, 1), labels).items():
            if LOG_TENSORBOARD:
                logger.log_scalar("training_" + k, v, i)
            print(" -", k, v, "%")

    # every 100th iteration
    if i % 100 == 0 and LOG_TENSORBOARD:
        network.eval()

        # Output predicted train/validation class/probability images
        for class_img in train_class_imgs + val_class_imgs:

            slice = class_img[1]