예제 #1
0
def cross_validation(X, Y, boost):
    y_classifier = []
    y_actual = []

    ids = np.arange(len(Y))
    np.random.shuffle(ids)
    ids_batchs = np.array_split(ids, 5)

    for test_num in range(len(ids_batchs)):
        X_train, Y_train = data.train_dataset(X, Y, ids_batchs, test_num)
        boost.fit(X_train, Y_train)

        X_test, Y_test = X[ids_batchs[test_num]], Y[ids_batchs[test_num]]
        for i in range(len(ids_batchs[test_num])):
            y_prediction = boost.classify(X_test[i])
            y_classifier.append(y_prediction)
            y_actual.append(Y_test[i])

    return accuracy_score(y_actual, y_classifier)
예제 #2
0
def main():
    args = get_args()
    Atest, Btest = data.train_dataset(args.dir, args.batch_size,
                                      args.image_size, 1)
    B_test_iter = iter(Btest)
    A_test_iter = iter(Atest)
    B_test = Variable(B_test_iter.next()[0])
    A_test = Variable(A_test_iter.next()[0])

    G_12 = model.Generator(64)
    G_21 = model.Generator(64)

    checkpoint = torch.load(args.state_dict)
    G_12.load_state_dict(checkpoint['G_12_state_dict'])
    G_21.load_state_dict(checkpoint['G_21_state_dict'])

    if torch.cuda.is_available():
        test = test.cuda()
        noised = noised.cuda()
        G_12 = G_12.cuda()
        G_21 = G_21.cuda()

    G_12.eval()
    G_21.eval()

    generate_A_image = G_21(B_test.float())
    grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True)
    vutils.save_image(grid, "generate_A_image.png")

    generate_B_image = G_12(A_test.float())
    grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True)
    vutils.save_image(grid, "generate_B_image.png")

    loss = PSNR.PSNR()

    estimate_loss_generate_A = loss(generate_A_image, A_test)
    estimate_loss_generate_B = loss(generate_B_image, B_test)

    print(estimate_loss_generate_A)
    print(estimate_loss_generate_B)
예제 #3
0
def main():
    args = get_args()
    Atest, Btest = data.train_dataset(args.dir, args.batch_size, args.image_size, 1000)
    B_test_iter = iter(Btest)
    A_test_iter = iter(Atest)
    
    G_21 = model.Generator(args.batch_size)
    #G_21 = residual_model.Generator(1,1)

    checkpoint = torch.load(args.state_dict)
    G_21.load_state_dict(checkpoint['G_21_state_dict'])

    estimate_loss_generate = 0

    for i in range(1000):
      B_test = Variable(B_test_iter.next()[0])
      A_test = Variable(A_test_iter.next()[0])
      grid = vutils.make_grid(B_test, nrow=8)
      vutils.save_image(grid,"B_image.png")

      if torch.cuda.is_available():
        B_test = B_test.cuda()
        A_test = A_test.cuda()
        G_21 = G_21.cuda()

      G_21.eval()

      generate_A_image = G_21(B_test.float())
      grid = vutils.make_grid(generate_A_image, nrow=8)
      vutils.save_image(grid,"generate_A_image.png")

      loss = PSNR.PSNR()

      estimate_loss_generate = estimate_loss_generate +loss(generate_A_image, A_test)
    
    estimate_loss_generate = estimate_loss_generate /1000

    print(estimate_loss_generate)
예제 #4
0
def main(args):
    #writer = SummaryWriter(log_dir='/content/cycleGAN_seismic_noise/runs/'+ datetime.now().strftime('%b%d_%H-%M-%S'))
    writer = ""
    wandb.login()
    wandb.init(project="cycleGAN_seismic_noise")
    wandb.wath_called = False

    ##=== run with model package ====#
    G_12 = model.Generator(args.batch_size)
    G_21 = model.Generator(args.batch_size)
    D_1 = model.Discriminator(args.batch_size)
    D_2 = model.Discriminator(args.batch_size)

    G_12.weight_init(mean=0.0, std=0.02)
    G_21.weight_init(mean=0.0, std=0.02)
    D_1.weight_init(mean=0.0, std=0.02)
    D_2.weight_init(mean=0.0, std=0.02)

    if torch.cuda.is_available():
        G_12 = G_12.cuda()
        G_21 = G_21.cuda()
        D_1 = D_1.cuda()
        D_2 = D_2.cuda()

    #=== optimizer & learning rate schedulers
    optimizer_G = torch.optim.Adam(itertools.chain(G_12.parameters(),
                                                   G_21.parameters()),
                                   lr=args.lr,
                                   betas=(args.beta1, 0.999))
    optimizer_D_A = torch.optim.Adam(D_1.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, 0.999))
    optimizer_D_B = torch.optim.Adam(D_1.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, 0.999))

    lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
        optimizer_G,
        lr_lambda=residual_model.LambdaLR(args.num_epochs, 0,
                                          round(args.num_epochs / 2)).step)
    lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(
        optimizer_D_A,
        lr_lambda=residual_model.LambdaLR(args.num_epochs, 0,
                                          round(args.num_epochs / 2)).step)
    lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(
        optimizer_D_B,
        lr_lambda=residual_model.LambdaLR(args.num_epochs, 0,
                                          round(args.num_epochs / 2)).step)

    # load seismic dataset
    A_data, B_data = data.train_dataset(args.dir, args.batch_size,
                                        args.image_size, args.num_iter_train)

    model_state = args.state_dict

    if model_state != "":
        checkpoint = torch.load(model_state)
        G_12.load_state_dict(checkpoint['G_12_state_dict'])
        G_21.load_state_dict(checkpoint['G_21_state_dict'])
        D_1.load_state_dict(checkpoint['D_1_state_dict'])
        D_2.load_state_dict(checkpoint['D_2_state_dict'])
        optimizer_G.load_state_dict(checkpoint['optimizer_G'])
        optimizer_D_A.load_state_dict(checkpoint['optimizer_D_A'])
        optimizer_D_B.load_state_dict(checkpoint['optimizer_D_B'])
        lr_scheduler_G.load_state_dict(checkpoint['lr_scheduler_G'])
        lr_scheduler_D_A.load_state_dict(checkpoint['lr_scheduler_D_A'])
        lr_scheduler_D_B.load_state_dict(checkpoint['lr_scheduler_D_B'])
        cur_epoch = checkpoint['epoch']
    else:
        cur_epoch = 0

    train(G_12, G_21, D_1, D_2, optimizer_G, optimizer_D_A, optimizer_D_B,
          lr_scheduler_G, lr_scheduler_D_A, lr_scheduler_D_B, args.batch_size,
          cur_epoch, args.num_epochs, A_data, B_data, writer,
          args.num_iter_train)
예제 #5
0
    # classes = classes.reshape(xx1.shape)
    # classes = classifier.classify(x)

    plt.contourf(x, classes, alpha=0.4, cmap=cmap)

    for elem_x, elem_y in zip(x, y):
        if elem_y == -1:
            plt.scatter(elem_x[0], elem_x[1], s=40, color=colors[0])
        else:
            plt.scatter(elem_x[0], elem_x[1], s=40, color=colors[1])

    plt.title(title)
    plt.show()


for path in ['chips', 'geyser']:
    X, Y = data.read_data(TEST_PATH + path + '.csv')
    ids_batches = data.split_indices_data(len(Y))
    X_train, Y_train = data.train_dataset(X, Y, ids_batches, 0)

    classifier = core.SVM(X_train, Y_train,
                          kermels.linear_kernel(1.0)).evaluate(C)
    plot_decision(X, Y, classifier, path + ", linear kernel")

    classifier = core.SVM(X_train, Y_train,
                          kermels.polynomial_kernel(2)).evaluate(C)
    plot_decision(X, Y, classifier, path + ", polynomial kernel")

    classifier = core.SVM(X_train, Y_train,
                          kermels.gaussian_kernel(2)).evaluate(C)
    plot_decision(X, Y, classifier, path + ", gaussian kernel")
예제 #6
0
CALC_ACC_EVERY = 500

###########################################################
###  Get Food-101 dataset as train/validation lists
###  of img file paths and labels
###########################################################

train_data, val_data, classes = data.food101(DATASET_ROOT)
num_classes = len(classes)

###########################################################
###  Build training and validation data pipelines
###########################################################

train_ds, train_iters = data.train_dataset(train_data, TRAIN_BATCH_SIZE,
                                           TRAIN_EPOCHS, INPUT_SIZE,
                                           RANDOM_CROP_MARGIN)
train_ds_iterator = train_ds.make_one_shot_iterator()
train_x, train_y = train_ds_iterator.get_next()

val_ds, val_iters = data.val_dataset(val_data, VAL_BATCH_SIZE, INPUT_SIZE)
val_ds_iterator = val_ds.make_initializable_iterator()
val_x, val_y = val_ds_iterator.get_next()

###########################################################
###  Construct training and validation graphs
###########################################################

with tf.variable_scope('', reuse=tf.AUTO_REUSE):
    train_logits = model.vgg_19(train_x, num_classes, is_training=True)
    val_logits = model.vgg_19(val_x, num_classes, is_training=False)