Beispiel #1
0
# d_cor_xx = file["d_cor_xx"]
#
# d_cor_xy_ordered = np.argsort(d_cor_xy)
# XTrain = np.delete(XTrain, d_cor_xy_ordered[:24],axis = 1)
# XTest = np.delete(XTest, d_cor_xy_ordered[:24],axis = 1)
linearRegression = linear_model.LinearRegression(fit_intercept=False, n_jobs = -1)
count=0
while len(num_informative)<n_informative and len(saved_indexes)<active_set:
    losses = np.array([])
    betas = np.array([])
    coeffs = np.array([])
    corrs = np.array([])
    blocks_generated = generate_samples(num_blocks, XTrain.shape[1], active_set, r, saved_indexes)
    for i in range(0, num_blocks):
        x_train_i, x_test_i = get_current_data(XTrain, XTest, blocks_generated[i,:])
        new_loss,beta,corr = compute_mse(linearRegression, x_train_i, YTrain,x_test_i, YTest)
        losses = np.append(losses, new_loss)

        if len(betas)==0:
            betas = beta
            corrs = corr
        else:
            betas = np.append(betas, beta, axis =1)
            corrs = np.append(corrs, corr,axis = 1)
    ordered_losses = np.argsort(losses)
    orderd_losses_ = np.sort(losses)
    ordered_loss_ten = extract_losses(ordered_losses)

    weights_indexes = get_common_indexes(ordered_loss_ten,blocks_generated,n_features,betas)
    ordered_weights_indexes = np.argsort(weights_indexes)[::-1]
    ordered_weights_indexes_values = np.sort(weights_indexes)[::-1]
Beispiel #2
0
def test(args, logger, device_ids):
    logger.info("Loading network")
    model = AdaMatting(in_channel=4)
    ckpt = torch.load("./ckpts/ckpt_best_alpha.tar")
    model.load_state_dict(ckpt["state_dict"])
    if args.cuda:
        device = torch.device("cuda:{}".format(device_ids[0]))
        if len(device_ids) > 1:
            logger.info("Loading with multiple GPUs")
            model = torch.nn.DataParallel(model, device_ids=device_ids)
        # model = model.cuda(device=device_ids[0])
    else:
        device = torch.device("cpu")
    model = model.to(device)
    torch.set_grad_enabled(False)
    model.eval()

    test_names = gen_test_names()

    with open(os.path.join(args.raw_data_path, "Combined_Dataset/Test_set/test_fg_names.txt")) as f:
        fg_files = f.read().splitlines()
    with open(os.path.join(args.raw_data_path, "Combined_Dataset/Test_set/test_bg_names.txt")) as f:
        bg_files = f.read().splitlines()

    out_path = os.path.join(args.raw_data_path, "pred/")
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    logger.info("Start testing")
    avg_sad = AverageMeter()
    avg_mse = AverageMeter()
    for index, name in enumerate(test_names):
        # file names
        fcount = int(name.split('.')[0].split('_')[0])
        bcount = int(name.split('.')[0].split('_')[1])
        img_name = fg_files[fcount]
        bg_name = bg_files[bcount]
        merged_name = bg_name.split(".")[0] + "!" + img_name.split(".")[0] + "!" + str(fcount) + "!" + str(index) + ".png"
        trimap_name = img_name.split(".")[0] + "_" + str(index % 20) + ".png"

        # read files
        merged = os.path.join(args.raw_data_path, "test/merged/", merged_name)
        alpha = os.path.join(args.raw_data_path, "test/mask/", img_name)
        trimap = os.path.join(args.raw_data_path, "Combined_Dataset/Test_set/Adobe-licensed images/trimaps/", trimap_name)
        merged = cv.imread(merged)
        # merged = cv.resize(merged, None, fx=0.75, fy=0.75)
        merged = cv.cvtColor(merged, cv.COLOR_BGR2RGB)
        trimap = cv.imread(trimap)
        # trimap = cv.resize(trimap, None, fx=0.75, fy=0.75)
        alpha = cv.imread(alpha, 0)
        # alpha = cv.resize(alpha, None, fx=0.75, fy=0.75)

        # process merged image
        merged = transforms.ToPILImage()(merged)
        out_merged = merged.copy()
        merged = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])(merged)
        h, w = merged.shape[1:3]
        h_crop, w_crop = h, w
        for i in range(h):
            if (h - i) % 16 == 0:
                h_crop = h - i
                break
        h_margin = int((h - h_crop) / 2)
        for i in range(w):
            if (w - i) % 16 == 0:
                w_crop = w - i
                break
        w_margin = int((w - w_crop) / 2)

        # write cropped gt alpha
        alpha = alpha[h_margin : h_margin + h_crop, w_margin : w_margin + w_crop]
        cv.imwrite(out_path + "{:04d}_gt_alpha.png".format(index), alpha)

        # generate and write cropped gt trimap
        gt_trimap = np.zeros(alpha.shape)
        gt_trimap.fill(128)
        gt_trimap[alpha <= 0] = 0
        gt_trimap[alpha >= 255] = 255
        cv.imwrite(out_path + "{:04d}_gt_trimap.png".format(index), gt_trimap)

        # concat the 4-d input and crop to feed the network properly
        x = torch.zeros((1, 4, h, w), dtype=torch.float)
        x[0, 0:3, :, :] = merged
        x[0, 3, :, :] = torch.from_numpy(trimap[:, :, 0] / 255.)
        x = x[:, :, h_margin : h_margin + h_crop, w_margin : w_margin + w_crop]

        # write cropped input images
        out_merged = transforms.ToTensor()(out_merged)
        out_merged = out_merged[:, h_margin : h_margin + h_crop, w_margin : w_margin + w_crop]
        out_merged = transforms.ToPILImage()(out_merged)
        out_merged.save(out_path + "{:04d}_input_merged.png".format(index))
        out_trimap = transforms.ToPILImage()(x[0, 3, :, :])
        out_trimap.save(out_path + "{:04d}_input_trimap.png".format(index))

        # test
        x = x.type(torch.FloatTensor).to(device)
        _, pred_trimap, pred_alpha, _, _ = model(x)

        cropped_trimap = x[0, 3, :, :].unsqueeze(dim=0).unsqueeze(dim=0)
        pred_alpha[cropped_trimap <= 0] = 0.0
        pred_alpha[cropped_trimap >= 1] = 1.0

        # output predicted images
        pred_trimap = (pred_trimap.type(torch.FloatTensor) / 2).unsqueeze(dim=1)
        pred_trimap = transforms.ToPILImage()(pred_trimap[0, :, :, :])
        pred_trimap.save(out_path + "{:04d}_pred_trimap.png".format(index))
        out_pred_alpha = transforms.ToPILImage()(pred_alpha[0, :, :, :].cpu())
        out_pred_alpha.save(out_path + "{:04d}_pred_alpha.png".format(index))
        
        sad = compute_sad(pred_alpha, alpha)
        mse = compute_mse(pred_alpha, alpha, trimap)
        avg_sad.update(sad.item())
        avg_mse.update(mse.item())
        logger.info("{:04d}/{} | SAD: {:.1f} | MSE: {:.3f} | Avg SAD: {:.1f} | Avg MSE: {:.3f}".format(index, len(test_names), sad.item(), mse.item(), avg_sad.avg, avg_mse.avg))
    
    logger.info("Average SAD: {:.1f} | Average MSE: {:.3f}".format(avg_sad.avg, avg_mse.avg))
        lasso_cv.fit(x_train_saved,YTrain_)
        best_alpha = lasso_cv.alpha_
        print(best_alpha)
        model = linear_model.Lasso(fit_intercept=False,alpha=best_alpha)
        flag_linear = 0
    else:
        model = linear_model.LinearRegression(fit_intercept=False)
        flag_linear = 1
    blocks_generated,active_set,num_blocks = generate_samples_dynamic_set(num_blocks, n_features_transf, r,saved_indexes,r1, min_set, max_set,active_set,max_active_set)

    for i in range(0, num_blocks):
        x_train_i, x_val_i = get_current_data(XTrain_, XVal_, blocks_generated[i,:])
        rand_vect = r1.choice(n_samples_val,active_set_samples, replace = False)
        x_val_i = x_val_i[rand_vect,:]
        YVal_i = YVal_[rand_vect]
        new_loss,beta= compute_mse(model, x_train_i, YTrain_,x_val_i, YVal_i, score)
        losses = np.append(losses, new_loss)

        if len(betas)==0:
            betas = beta
        else:
            betas = np.append(betas, beta, axis =1)
    ordered_losses = np.argsort(losses)
    orderd_losses_ = losses[ordered_losses]
    #losses_to_select = r3.choice(np.arange(100,200), 1, replace=False)[0]
    #print("loss scelte", losses_to_select)

    standard_deviation = np.std(orderd_losses_)
    mean_weights = np.mean(orderd_losses_)

    chosen_losses = len(orderd_losses_[orderd_losses_+standard_deviation<=mean_weights])
Beispiel #4
0
r1 = np.random.RandomState(12)
r2 = np.random.RandomState(13)
final_active_set = 500
active_set_samples = (int)(8./9.*n_samples_test)

saved_indexes = np.array([],dtype = "int64")
num_informative = np.array([])
coeffs = np.array([])
saved_indexes_list = []
mses = []
num_informative_list = []
weights_list = []
del_indexes = np.zeros(n_features)
linearRegression = linear_model.LinearRegression(fit_intercept=False, n_jobs = -1)
mse,_,_ = compute_mse(linearRegression, XTrain, YTrain,XTest,YTest)
print("start_mse", mse)
weights_indexes = np.zeros(n_features)
index = 1000
deleted_indexes = np.where(del_indexes>index)[0]
iter = 0
r3 = np.random.RandomState(14)

while len(saved_indexes)<=final_active_set-len(deleted_indexes):
    losses = np.array([])
    betas = np.array([])
    coeffs = np.array([])
    corrs = np.array([])

    blocks_generated = generate_samples_dynamic_set(num_blocks, n_features, r,saved_indexes,r1, deleted_indexes)
    for i in range(0, num_blocks):
ordered_final_weights = np.argsort(final_weights)[::-1]
if verbose:
    print("-------------")
    print("ranking of the featues:", ordered_final_weights)
    print("-------------")
ordered_indexes = np.argsort(weights_data)[::-1]
if verbose:
    print("position informarives", np.where(np.in1d(ordered_indexes, informative_indexes)==True)[0])


lasso_cv = linear_model.LassoCV(fit_intercept=False,  max_iter=10000, n_jobs = -1)
lasso_cv.fit(XTrain,YTrain)
best_alpha = lasso_cv.alpha_

model = linear_model.Lasso(fit_intercept=False,alpha=best_alpha)
new_loss,beta,_ = compute_mse(model, XTrain, YTrain,XTest, YTest)
beta = np.abs(beta[:,0])
beta_ord = np.sort(beta)[::-1]
beta_ordered = beta_ord[beta_ord>=1.0]
len_div_zero = len(beta_ordered)
beta_indexes = np.argsort(np.abs(beta))[::-1][:len_div_zero]

real_indexes = []

# for b in beta_indexes:
#     values_ = dict_.values()
#     count=0
#     for dict_value in values_:
#         if b in dict_value:
#             real_indexes.append(count)
#         count+=1
            best_alpha = lasso_cv.alpha_
            print(best_alpha)
        else:
            countIter+=1
        cv_flag=False
        model = linear_model.Lasso(fit_intercept=False,alpha=best_alpha)
    else:
        model = linear_model.LinearRegression(fit_intercept=False)
    blocks_generated = generate_samples_dynamic_set(num_blocks, n_features, r,saved_indexes,r1, deleted_indexes)

    for i in range(0, num_blocks):
        x_train_i, x_test_i = get_current_data(XTrain, XTest, blocks_generated[i,:])
        rand_vect = r1.choice(n_samples_test,active_set_samples, replace = False)
        x_test_i = x_test_i[rand_vect,:]
        YTest_i = YTest[rand_vect]
        new_loss,beta,_ = compute_mse(model, x_train_i, YTrain,x_test_i, YTest_i)
        losses = np.append(losses, new_loss)

        if len(betas)==0:
            betas = beta
        else:
            betas = np.append(betas, beta, axis =1)
    ordered_losses = np.argsort(losses)
    orderd_losses_ = np.sort(losses)
    #losses_to_select = r3.choice(np.arange(100,200), 1, replace=False)[0]
    #print("loss scelte", losses_to_select)
    first_loss = orderd_losses_[0]
    chosen_losses = len(orderd_losses_[orderd_losses_<=first_loss*7./6])
    if chosen_losses<10:
        chosen_losses=10
    print("losses scelte", chosen_losses)