예제 #1
0
def main():
    train, validation, test = fromCsv(
        TRACKS_DATA_FILE,
        2000,
        2000,
        skipHeader=0,
    )
    (auto, encoder, decoder) = autoencoder(
        train,
        validation,
        16,
        activation='selu',
        batchSize=4,
        epochs=100,
        hiddenDimension=22,
        learningRate=0.0005,
    )

    tfjs.converters.save_keras_model(
        auto,
        MODEL_SAVE_PATH + 'auto',
    )
    tfjs.converters.save_keras_model(
        encoder,
        MODEL_SAVE_PATH + 'encoder',
    )
    tfjs.converters.save_keras_model(
        decoder,
        MODEL_SAVE_PATH + 'decoder',
    )
예제 #2
0
def main():
    train, validation, test = fromCsv(
        TRACKS_DATA_FILE,
        40000,
        40000,
    )
    (
        auto,
        encoder,
        decoder
    ) = autoencoder(
        train,
        validation,
        13,
        batchSize=64,
        epochs=10,
        learningRate=0.0005,
    )

    tfjs.converters.save_keras_model(
        auto,
        MODEL_SAVE_PATH + 'auto',
    )
    tfjs.converters.save_keras_model(
        encoder,
        MODEL_SAVE_PATH + 'encoder',
    )
    tfjs.converters.save_keras_model(
        decoder,
        MODEL_SAVE_PATH + 'decoder',
    )
예제 #3
0
def main():
    # Create network
    model = autoencoder(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights_test(model.encoder['output_encoder'], path='weights_bigan_all_test15/biGAN_G_', epochtoload=12000)

    test(path_to_images='/media/yuandy/COCO_dataset/train_images/images', path_to_saliency='/media/yuandy/COCO_dataset/temp_test_max1.5', path_output_imgs='../test', model_to_test=model)
예제 #4
0
def train():
    """
    Train both generator and discriminator
    :return:
    """
    # Load data
    print 'Loading training data...'
    with open(
            '/home/yuandy/COCO_dataset/processed_data/128x128/trainData_resize_pool2gs.pickle',
            'rb') as f:
        # with open(TRAIN_DATA_DIR, 'rb') as f:
        train_data = pickle.load(f)
    print '-->done!'

    print 'Loading real data pair...'
    with open(
            '/home/yuandy/COCO_dataset/processed_data/128x128/realData_resize_pool2gs.pickle',
            'rb') as f:
        # with open(TRAIN_DATA_DIR, 'rb') as f:
        real_data = pickle.load(f)
    print '-->done!'

    # Create network
    if flag == 'auto':
        model = autoencoder(INPUT_SIZE[0], INPUT_SIZE[1])
        load_weights(model.decoder, path='scripts/gen_', epochtoload=90)
        #load_weights_test(model.encoder['output_encoder'], path='weights_content_new/auto_', epochtoload=21)
        load_weights_test(model.encoder['output_encoder'],
                          path='weights_auto_new_-9/auto_',
                          epochtoload=24)
        autoencoder_batch_iterator_separate(model, train_data)
    elif flag == 'bigan':
        print('ok')
        model = biGAN(INPUT_SIZE)
        load_weights(model.autoencoder.decoder,
                     path='scripts/gen_',
                     epochtoload=90)
        #load_weights_test(model.D, path='weights_bigan_noise_test10/biGAN_D_', epochtoload=29600)
        load_weights_test(model.autoencoder.encoder['output_encoder'],
                          path='weights_auto_new_-9/auto_',
                          epochtoload=24)
        #load_weights_test(model.autoencoder.encoder['output_encoder'], path='weights_content_new/auto_', epochtoload=24)
        #load_weights_test(model.autoencoder.encoder['output_encoder'], path='weights_bigan_noise_test10/biGAN_G_', epochtoload=29600)
        biGAN_batch_iterator(model, train_data, real_data)
    else:
        print('argument lost...')
예제 #5
0
from models.autoencoder import autoencoder
from tools.create_data import create_stack
from models.custom_loss import Bayes_BCE_Loss_With_Logits
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = autoencoder()
model = model.to(device)
img, seed, conf, gt = create_stack(10531)
img = torch.tensor(img, dtype=torch.float, requires_grad=True).unsqueeze(0)
img = img.to(device)
seed = torch.tensor(seed, dtype=torch.float, requires_grad=True).unsqueeze(0)
seed = seed.to(device)
conf = torch.tensor(conf, dtype=torch.float, requires_grad=True).unsqueeze(0)
conf = conf.to(device)
gt = torch.tensor(gt, dtype=torch.float).unsqueeze(0).unsqueeze(1)
gt = gt.to(device)

#loss_fn = torch.nn.BCEWithLogitsLoss()
loss_fn = Bayes_BCE_Loss_With_Logits.apply

logits, sigma = model(img, seed, conf)
print("Model ran")
loss = loss_fn(logits, gt, sigma)
print("Loss forward ran")
#print(loss)
loss.backward()
print("Loss backward ran")
예제 #6
0
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


def to_img(x):
    x = 0.5 * (x + 1)
    x = x.clamp(0, 1)
    x = x.view(x.size(0), 1, 28, 28)
    return x


checkpoint = torch.load(args.checkpoint, map_location="cuda:0")
netE = AE.encoder_(args).cuda()
netDA = AE.decoder_(args).cuda()
netDB = AE.decoder_(args).cuda()
modelA = AE.autoencoder(args, netE, netDA)
modelB = AE.autoencoder(args, netE, netDB)

modelA.load_state_dict(checkpoint['modelA_state_dict'])
modelA.to(device)

modelB.load_state_dict(checkpoint['modelB_state_dict'])
modelB.to(device)

real_img = real_batch[0].to(device)[:32]
#real_img = torch.unsqueeze(real_img, 0)
out_imgA = modelA(real_img)
out_imgB = modelB(real_img)
print("saving.....")
save_image(out_imgA, './fake_imgB_from_modelA.png')
save_image(out_imgB, './fake_imgB_from_modelB.png')
예제 #7
0
    # feature-numbers, day,
    model_name = FLAGS.model_name
    datestr = datetime.datetime.now().__str__()
    FLAGS.run_name = model_name + "_".join([str(x) for x in FLAGS.indices
                                            ]) + '_' + strftime(
                                                "%Y_%m_%d_%H_%M_%S", gmtime())
    model = None
    if model_name == "rnn_inherited":
        model = rnn_inherited(FLAGS)
    elif model_name == 'rnn_bigger_inherited':
        model = rnn_bigger_inherited(FLAGS)
    elif model_name == 'rnn_autoencoder_inherited':
        model = rnn_autoencoder_inherited(FLAGS)
    elif model_name == "multiplicative_LSTM_rnn_inherited":
        model = multiplicative_LSTM_rnn_inherited(FLAGS)
    elif model_name == "multiplicative_LSTM_rnn_bigger_inherited":
        model = multiplicative_LSTM_rnn_bigger_inherited(FLAGS)
    elif model_name == "multiplicative_LSTM_rnn_state_classifier_inherited":
        model = multiplicative_LSTM_rnn_state_classifier_inherited(FLAGS)
    elif model_name == 'convolutional_inherited':
        model = convolutional_inherited(FLAGS)
    elif model_name == 'convolutional_multiplicative_inherited':
        model = convolutional_multiplicative_inherited(FLAGS)
    elif model_name == 'convolutional_multiplicative_bigger_inherited':
        model = convolutional_multiplicative_bigger_inherited(FLAGS)
    elif model_name == 'autoencoder':
        model = autoencoder(FLAGS)

    model.createModel()
    model.train()
예제 #8
0
파일: train.py 프로젝트: Mombin/Mom_Net
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


def to_img(x):
    x = 0.5 * (x + 1)
    x = x.clamp(0, 1)
    x = x.view(x.size(0), 1, 28, 28)
    return x


netE = AE.encoder_(args).cuda()
netDA = AE.decoder_(args).cuda()
netDB = AE.decoder_(args).cuda()

modelA = AE.autoencoder(args, netE, netDA).cuda()
modelB = AE.autoencoder(args, netE, netDB).cuda()

criterion = nn.MSELoss()

optimizerA = torch.optim.Adam(modelA.parameters(),
                              lr=args.lr,
                              weight_decay=1e-5)
optimizerB = torch.optim.Adam(modelB.parameters(),
                              lr=args.lr,
                              weight_decay=1e-5)

for epoch in range(args.num_epochs):
    start_time = time.time()
    for i, (data, _) in enumerate(dataloaderA):
        img = data