Beispiel #1
0
def train(configPath, name):
    useGpu = os.environ.get('GNUMPY_USE_GPU', 'auto')
    if useGpu=="no":
        mode="cpu"
    else:
        mode="gpu"

    print '========================================================'
    print 'train %s' % name
    print "the program is on %s" % mode
    print '======================================================='

    config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
    config.read(configPath)
    model_name=config.get(name, 'model')
    if model_name == "ae":
        from ae import AE
        model = AE(config, name)
    elif model_name == "lae":
        from lae import LAE
        model = LAE(config, name)
    elif model_name == "pae":
        from pae import PAE
        model = PAE(config, name)
    elif model_name== "sae":
        from sae import SAE
        model=SAE(config, name)
    elif model_name== "msae":
        from msae import MSAE
        model=MSAE(config, name)

    model.train()
Beispiel #2
0
def main(_):
    #print(FLAGS.__flags)
    file_name =  'm[' + FLAGS.model + ']_lr[' + str(FLAGS.learning_rate) + ']_b[' + str(FLAGS.batch_size) + \
                 ']_ae' + FLAGS.ae_h_dim_list + '_z[' + str(FLAGS.z_dim) +  ']_dis' + FLAGS.dis_h_dim_list
    logger.info(file_name)

    with tf.device('/gpu:%d' % FLAGS.gpu_id):
        ### ===== Build model ===== ###
        if FLAGS.model == "AE":
            logger.info("Build AE model")
            model = AE(logger, FLAGS.learning_rate, FLAGS.input_dim, FLAGS.z_dim, eval(FLAGS.ae_h_dim_list))

        elif FLAGS.model == "VAE":
            logger.info("Build VAE model")

        elif FLAGS.model == "VAE_GAN":
            logger.info("Build VAE_GAN model")


        ### ===== Train/Test =====###

        if FLAGS.is_train:
            #logger.info("Start training")
            train_data = load_data(os.path.join(FLAGS.data_dir, 'train_data.npy'))
            val_data = load_data(os.path.join(FLAGS.data_dir, 'val_data.npy'))
            #print(train_data.shape)
            model.train(train_data, FLAGS.batch_size)
        else:
            logger.info("Start testing")
            test_data = load_data(os.path.join(FLAGS.data_dir, 'test_data.npy'))
Beispiel #3
0
def train(configPath, name):
    useGpu = os.environ.get('GNUMPY_USE_GPU', 'auto')
    if useGpu == "no":
        mode = "cpu"
    else:
        mode = "gpu"

    print '========================================================'
    print 'train %s' % name
    print "the program is on %s" % mode
    print '======================================================='

    config = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation())
    config.read(configPath)
    model_name = config.get(name, 'model')
    if model_name == "ae":
        from ae import AE
        model = AE(config, name)
    elif model_name == "lae":
        from lae import LAE
        model = LAE(config, name)
    elif model_name == "pae":
        from pae import PAE
        model = PAE(config, name)
    elif model_name == "sae":
        from sae import SAE
        model = SAE(config, name)
    elif model_name == "msae":
        from msae import MSAE
        model = MSAE(config, name)

    model.train()
Beispiel #4
0
train_dataset = datasets.MNIST('../data',transform=transforms.ToTensor())
test_dataset = datasets.MNIST('../data', False, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batchsz, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batchsz, shuffle=True)

net = AE()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, 5)
net.to(device)
criterion.to(device)
train_loss = []
viz = Visdom()
for epoch in range(epochs):
    train_loss.clear()
    net.train()
    for step, (x, _) in enumerate(train_loader):
        x = x.to(device)
        x_hat = net(x)

        loss = criterion(x_hat, x)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss.append(loss.item())

        if step%50==0:
            print('epoch:{} batch:{} loss:{:.6f}'.format(epoch, step, loss.item()))

    scheduler.step()
    net.eval()
Beispiel #5
0
    # Train loop
    for epoch in range(train_stage["num_epoch"]):
        print("Epoch {} of {}:".format(epoch + 1, train_stage["num_epoch"]))
        send_slack_notif("Epoch {} of {} Started!".format(epoch + 1, train_stage["num_epoch"]))

        for i, param_group in enumerate(optimizer.param_groups):
            print("Current LR: {} of {}th group".format(param_group['lr'], i))

        train_mse = 0.0
        train_msssim = 0.0
        train_hash = 0.0
        train_loss = 0.0

        with tqdm(total=len(train_loader), desc="Batches") as pbar:
            for i, (data) in enumerate(train_loader):
                model.train()

                img, label = data
                label = label.to(device)
                label = make_one_hot(label)
                img = img.to(device)

                _, output, hashed_layer = model(img)

                if (i % 100 == 0 and epoch == 0) or (i % 500 == 0 and epoch > 0):

                    # PATH ASSUMES ONLY 1 STAGE
                    save_image(torch.cat((img, output)), "../results/ae_hash/images/train_check/{}_{}.jpg".format(epoch, i), nrow=batch_size)

                loss, mse, msssim = criterion1(output, img)
                loss_hash = criterion2(hashed_layer, label)