Exemplo n.º 1
0
def Train(Odir, TAdir, Mdir, ep, sv):
    imgsA = loadImgs(Odir) / 255.0
    imgsB = loadImgs(TAdir) / 255.0
    imgsA += imgsB.mean(axis=(0, 1, 2)) - imgsA.mean(axis=(0, 1, 2))
    try:
        encoder.load_weights(Mdir + "/encoder.h5")
        decoder_A.load_weights(Mdir + "/decoder_A.h5")
        decoder_B.load_weights(Mdir + "/decoder_B.h5")
        print("loaded existing model")
    except:
        print("No existing model")

    for epoch in range(int(ep)):
        # get next training batch
        batch_size = 64
        warped_A, target_A = get_training_data(imgsA, batch_size)
        warped_B, target_B = get_training_data(imgsB, batch_size)

        # train and calculate loss
        loss_A = autoencoder_A.train_on_batch(warped_A, target_A)
        loss_B = autoencoder_B.train_on_batch(warped_B, target_B)

        if epoch % int(sv) == 0:
            print("Training loss " + str(epoch) + " :")
            print(loss_A, loss_B)

            # save model every 100 steps
            save_model_weights(Mdir)
            test_A = target_A[0:14]
            test_B = target_B[0:14]
            # create image and write to disk

    # save our model after training has finished
    save_model_weights(Mdir)
Exemplo n.º 2
0


  while 1:
    pbar = tqdm(range(1000000))
    for epoch in pbar:

      
        warped_A, target_A, mask_A = get_training_data( images_A,  landmarks_A,landmarks_B, batch_size )
        warped_B, target_B, mask_B = get_training_data( images_B, landmarks_B,landmarks_A, batch_size )

      
        omask = numpy.ones((target_A.shape[0],64,64,1),float)


        loss_A = autoencoder_A.train_on_batch([warped_A,mask_A], [target_A,mask_A])
        loss_B = autoencoder_B.train_on_batch([warped_B,mask_B], [target_B,mask_B])

 
        pbar.set_description("Loss A [{}] Loss B [{}]".format(loss_A,loss_B))


        if epoch % 100 == 0:
          save_model_weights()
          test_A = target_A[0:8,:,:,:3]
          test_B = target_B[0:8,:,:,:3]

          test_A_i = []
          test_B_i = []
          
          for i in test_A:
Exemplo n.º 3
0
images_A = get_image_paths( "/input/data/data/trump" )
images_B = get_image_paths( "/input/data/data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )

    if epoch % 100 == 0:
        save_model_weights()
        test_A = target_A[0:14]
        test_B = target_B[0:14]

    figure_A = numpy.stack([
        test_A,
        autoencoder_A.predict( test_A ),
        autoencoder_B.predict( test_A ),
        ], axis=1 )
    figure_B = numpy.stack([
        test_B,
Exemplo n.º 4
0
images_A = get_image_paths( "data/trump" )
images_B = get_image_paths( "data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )

    if epoch % 100 == 0:
        save_model_weights()
        test_A = target_A[0:14]
        test_B = target_B[0:14]

    figure_A = numpy.stack([
        test_A,
        autoencoder_A.predict( test_A ),
        autoencoder_B.predict( test_A ),
        ], axis=1 )
    figure_B = numpy.stack([
        test_B,