Exemple #1
0
def main(args, logger):

    # Create the dataset by using ImageFolder(get extra point by using customized dataset)
    # remember to preprocess the image by using functions in pytorch
    # https://pytorch.org/docs/stable/torchvision/datasets.html#imagefolder
    # https://pytorch.org/docs/stable/torchvision/transforms.html?highlight=totensor#torchvision.transforms.ToTensor
    dataset = dset.ImageFolder(args.dataroot,
                               transform=transforms.Compose([
                                   transforms.Resize(64),
                                   transforms.CenterCrop(64),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
    # Create the dataloader
    dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

    # Create the generator and the discriminator()
    # Initialize them
    # Send them to your device
    generator = Generator(ngpu).to(device)
    discriminator = Discriminator(ngpu).to(device)

    # Setup optimizers for both G and D and setup criterion at the same time
    optimizer_g = optim.Adam(generator.parameters(), lr=args.lr)
    optimizer_d = optim.Adam(discriminator.parameters(), lr=args.lr)
    criterion = nn.BCELoss()

    # Start training~~

    train(dataloader, generator, discriminator, optimizer_g, optimizer_d,
          criterion, args.num_epochs, logger)
Exemple #2
0
def main(args):
	# print("num_class:", args.num_class)
	# data_loader     = DataLoader(args)
	data_loader     = Data_Loader(args)
	args.embeddings = data_loader.embeddings
	args.doc_size   = data_loader.doc_size

	model           = Model(args)
	generator       = Generator()
	discrimitor     = Discrimitor(args)

	train(model, data_loader, generator, discrimitor, args)
Exemple #3
0
 def __init__(self, batch_size):
     self.BS = batch_size
     self.G = Generator().cuda()
     self.D = Discriminator().cuda()
     self.optim_D = optim.RMSprop(self.D.parameters(), lr=0.0001)
     self.optim_G = optim.RMSprop(self.G.parameters(), lr=0.0001)
Exemple #4
0
                         drop_last=True)
loader_cartoon = DataLoader(data_cartoon,
                            batch_size=batch_size,
                            shuffle=True,
                            drop_last=True)
loader_no_edge = DataLoader(data_no_edge,
                            batch_size=batch_size,
                            shuffle=True,
                            drop_last=True)
loader_validation = DataLoader(data_validation,
                               batch_size=1,
                               shuffle=False,
                               drop_last=False)

# Models ======================================================================
G = Generator()
D = Discriminator()
P_VGG19 = VGG19(weight_PATH=vgg_PATH)

G.to(device)
D.to(device)
P_VGG19.to(device)

G.train()
D.train()
P_VGG19.eval()

# Loss ========================================================================
BCE_loss = nn.BCELoss().to(device)
L1_loss = nn.L1Loss().to(device)
Exemple #5
0
from torch.autograd import Variable
from Config import opt
from Model import Generator, LinearClassifier
from helpfunction.helper import listsumavg
import csv


with open('pickle/pointx.pickle', 'rb') as f:
    pointlistx = pickle.load(f)

# convert to lead
for i in range(len(pointlistx)):
    pointlistx[i] = pointlistx[i]/200

D = LinearClassifier(input_dim=1, hidden_dim=50, output_dim=1)
G = Generator(input_dim=5, hidden_dim=50, output_dim=1, num_layers=2)
if torch.cuda.is_available():
    D = D.cuda()
    G = G.cuda()

criterion = nn.BCELoss()
opt_G = torch.optim.Adam(G.parameters(), lr=opt.LR_G)
opt_D = torch.optim.Adam(D.parameters(), lr=opt.LR_D)

Filename = 'resultdata/dmlp.csv'
outputFile = open(Filename, 'w')
outputWriter = csv.writer(outputFile)
outputWriter.writerow(['Epoch', 'Dloss', 'Gloss'])
for epoch in range(opt.Epoches):
    dloss_list = []
    gloss_list = []
Exemple #6
0
def main(alpha_path, trimap_path, gt_path, val_alpha_path, val_trimap_path,
         val_gt_path):

    LEARNING_RATE = opt.learning_rate
    BATCH_SIZE = opt.batch_size
    NUM_EPOCHS = opt.num_epoch
    # The reason why we use batch size equal to 1 is here
    #https://medium.com/@yvanscher/pytorch-tip-yielding-image-sizes-6a776eb4115b
    train_set = TrainDatasetFromFolder(alpha_path, trimap_path, gt_path)
    train_loader = DataLoader(dataset=train_set,
                              num_workers=4,
                              batch_size=1,
                              shuffle=True)
    #val_set = ValDatasetFromFolder(val_alpha_path, val_trimap_path, val_gt_path)
    #val_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=1, shuffle=True)

    in_channel = 4
    os = 8
    netG = Generator(in_channel, os)
    netD = Discriminator()

    ad_loss = AD_Loss()

    if torch.cuda.is_available:
        print('Cuda!!!!')
        netG.cuda()
        netD.cuda()
        #generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters(), lr=opt.learning_rate)
    optimizerD = optim.Adam(netD.parameters(), lr=opt.learning_rate)

    # Need Evaluating data
    results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': []}

    for epoch in range(1, NUM_EPOCHS + 1):
        train_bar = tqdm(
            train_loader
        )  # batch_RGBsT, batch_trimapsT, batch_alphasT, batch_BGsT, batch_FGsT, RGBs_with_meanT

        running_results = {
            'batch_sizes': 0,
            'd_loss': 0,
            'g_loss': 0,
            'd_score': 0,
            'g_score': 0
        }
        # one train_bar is one batch
        a = 0.
        b = 0.
        for g_input, g_lable, g_alpha, g_trimap, g_image, in train_bar:

            #print('g_input', g_input.size())
            #print('g_lable', g_lable.size())
            #print('g_alpha', g_alpha.size())
            #print('g_image', g_image.size())
            #print('g_trimap', g_trimap.size())

            g_input = g_input.permute(0, 3, 1, 2)
            g_lable = g_lable.permute(0, 3, 1, 2)
            #g_alpha = g_alpha.permute(0, 3, 1, 2)
            g_image = g_image.permute(0, 3, 1, 2)
            #g_trimap = g_trimap.permute(0, 3, 1, 2)

            batch_size = opt.batch_size
            running_results['batch_sizes'] += batch_size

            g_alpha = Variable(g_alpha)
            g_image = Variable(g_image)
            g_trimap = Variable(g_trimap)
            g_lable = Variable(g_lable)  # image label
            g_input = Variable(g_input)  # image data

            ############################
            # (1) Update D network: maximize D(x)-1-D(G(z))
            ###########################

            real_image = Variable(g_alpha)
            real_image = real_image.float()
            real_image = real_image.unsqueeze_(-1)
            real_image = real_image.permute(0, 3, 1, 2)

            if torch.cuda.is_available():
                g_alpha = g_alpha.cuda()
                g_image = g_image.cuda()
                g_trimap = g_trimap.cuda()
                g_input = g_input.cuda()
                g_lable = g_lable.cuda()
                real_image = real_image.cuda()

            combine_loss, _pred_rgb, fake_image = netG(g_input, g_alpha,
                                                       g_trimap, g_image)

            netD.zero_grad()
            real_out = netD(real_image).mean()
            fake_out = netD(fake_image).mean()
            d_loss = 1 - real_out + fake_out
            d_loss.backward(retain_graph=True)
            optimizerD.step()

            #############################
            #(2) Update G network: minimize Compsite Loss and alpha loss
            # 	 Need to double check the loss update
            #############################

            netG.zero_grad()
            combined_loss, p_rgb, _ = netG(
                g_input, g_alpha, g_trimap,
                g_image)  # generator generate an imperfect alpha
            #optimizerG.step()
            #combined_loss, alpha_loss, p_rgb, _ = netG(g_input, g_alpha, g_trimap, g_image)
            combine_loss.backward()
            optimizerG.step()
            combine_loss, _pred_rgb, fake_image = netG(g_input, g_alpha,
                                                       g_trimap, g_image)
            fake_out = netD(fake_image).mean()

            d_loss = 1 - real_out + fake_out

            #############################
            # Update result here
            #
            #############################

            running_results['d_loss'] += d_loss.item() * batch_size

            if math.isnan(combined_loss.item()) is not True:
                #print('#'*10, alpha_loss.item())
                a = combined_loss.item() * batch_size
                #print('#'*10, a)
                running_results['g_loss'] += a
                b = b + a
                #print('#'*10, b)

            running_results['d_score'] += real_out.item() * batch_size
            running_results['g_score'] += fake_out.item() * batch_size

            train_bar.set_description(
                desc=
                '[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f' %
                (epoch, NUM_EPOCHS,
                 running_results['d_loss'] / running_results['batch_sizes'],
                 running_results['g_loss'] / running_results['batch_sizes'],
                 running_results['d_score'] / running_results['batch_sizes'],
                 running_results['g_score'] / running_results['batch_sizes']))

        epoch_path = 'epochs_den_rc/'
Exemple #7
0
    if save:
        plt.savefig(path)

    if show:
        plt.show()
    else:
        plt.close()


if __name__ == "__main__":
    # Create the generator
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and Args.num_gpu > 0) else "cpu")

    netG = Generator(Args.num_gpu).to(device)

    # Handle multi-gpu if desired
    if (device.type == 'cuda') and (Args.num_gpu > 1):
        netG = nn.DataParallel(netG, list(range(Args.num_gpu)))

    # Apply the weights_init function to randomly initialize all weights
    #  to mean=0, stdev=0.2.
    netG.apply(weights_init)

    # Print the model
    # print(netG)

    # Create the Discriminator
    netD = Discriminator(Args.num_gpu).to(device)
Exemple #8
0
          'cv2_k':cv2_k,
          'cv2_s':cv2_s,
          'p2_k':p2_k,
          'p2_s':p2_s,
          'num_epoch':num_epoch,
          'D_rounds': D_rounds,
          'G_rounds': G_rounds,  
          'learning_rate' : learning_rate
         }

  json = js.dumps(dict)
  f = open(path+"/settings.json","w")
  f.write(json)
  f.close()

  generator_1 = Generator(seq_length,sample_size,hidden_dim =  hidden_nodes_g, tanh_output = tanh_layer).to(device)
  discriminator_1 = Discriminator(seq_length, sample_size ,minibatch_normal_init = minibatch_normal_init_, minibatch = minibatch_layer,num_cv = num_cvs, cv1_out = cv1_out,cv1_k = cv1_k, cv1_s = cv1_s, p1_k = p1_k, p1_s = p1_s, cv2_out= cv2_out, cv2_k = cv2_k, cv2_s = cv2_s, p2_k = p2_k, p2_s = p2_s).to(device)
  #Loss function 
  loss_1 = torch.nn.BCELoss()

  generator_1.train()
  discriminator_1.train()
  
  d_optimizer_1 = torch.optim.Adam(discriminator_1.parameters(),lr = learning_rate)
  g_optimizer_1 = torch.optim.Adam(generator_1.parameters(),lr = learning_rate)

  G_losses = []
  D_losses = []
  mmd_list = []
  series_list = np.zeros((1,seq_length))
Exemple #9
0
        'p2_s': p2_s,
        'num_epoch': num_epoch,
        'D_rounds': D_rounds,
        'G_rounds': G_rounds,
        'learning_rate': learning_rate
    }
    #Printing the settings used to file
    json = js.dumps(dict)
    f = open(path + "/settings.json", "w")
    f.write(json)
    f.close()

    #Initialising the generator and discriminator
    generator_1 = Generator(seq_length,
                            sample_size,
                            hidden_dim=hidden_nodes_g,
                            tanh_output=tanh_layer,
                            bidirectional=bidir).cuda()
    discriminator_1 = Discriminator(
        seq_length,
        sample_size,
        minibatch_normal_init=minibatch_normal_init_,
        minibatch=minibatch_layer,
        num_cv=num_cvs,
        cv1_out=cv1_out,
        cv1_k=cv1_k,
        cv1_s=cv1_s,
        p1_k=p1_k,
        p1_s=p1_s,
        cv2_out=cv2_out,
        cv2_k=cv2_k,
Exemple #10
0
from Model import Generator, Discriminator, VGG19

from torchvision import transforms
from torchvision.datasets import ImageFolder

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.backends.cudnn.enabled:
    torch.backends.cudnn.benchmark = True

Generator_PATH = "./Saved_model/pretrained_generator.pth"

src_PATH = "./Translate/src"
dst_PATH = "./Translate/dst"

G = Generator(weight_PATH=Generator_PATH)
G.to(device)

transform = transforms.Compose([
    transforms.Resize(size=(256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
])

data_set = ImageFolder(src_PATH, transform)
loader = DataLoader(data_set, batch_size=1, shuffle=False, drop_last=False)

with torch.no_grad():
    G.eval()

    for idx, (x, _) in enumerate(loader):
Exemple #11
0
from Model import Generator
import numpy as np

x = np.load('data/x.npy')
y = np.load('data/y.npy')

generator = Generator()

generator.batch_size(50)

generator.load_model('models/poem_1000_model_2.h5')

generator.fit(x, y, number_of_epochs=500, plotTrainingHistory=False)
generator.save_model('models/poem_1500_model_2.h5')

print(generator.predict(input('Enter seed text : '), seed=100))