##model.compile() WIDTH,HEIGHT = 256,256 train_data_dir = './crowdai/' N_CLASSES = 38 nb_train_samples = 4125 batch_size = 8 epochs = 10 model = create_model(N_CLASSES) model.compile(loss = "categorical_crossentropy", optimizer = optimizers.Adam(lr=0.1), metrics =["accuracy"] ) print(model.summary()) # # train_datagen = ImageDataGenerator(horizontal_flip = True, # fill_mode = "nearest", # zoom_range = 0.3, # width_shift_range = 0.3, # height_shift_range=0.3, # rotation_range=30) train_datagen = ImageDataGenerator()
def main(batch_size, epochs, learning_rate, beta1, beta2, data_path, num_workers): # Create train and test dataloaders for images from the two domains X and Y # image_type = directory names for our data dataloader_X, test_dataloader_X = get_data_loader(image_type='summer', image_dir=data_path, batch_size=batch_size) dataloader_Y, test_dataloader_Y = get_data_loader(image_type='winter', image_dir=data_path, batch_size=batch_size) # call the function to get models G_XtoY, G_YtoX, D_X, D_Y = create_model() # print all of the models print_models(G_XtoY, G_YtoX, D_X, D_Y) g_params = list(G_XtoY.parameters()) + list( G_YtoX.parameters()) # Get generator parameters # Create optimizers for the generators and discriminators g_optimizer = optim.Adam(g_params, learning_rate, [beta1, beta2]) d_x_optimizer = optim.Adam(D_X.parameters(), learning_rate, [beta1, beta2]) d_y_optimizer = optim.Adam(D_Y.parameters(), learning_rate, [beta1, beta2]) # train the network losses = training_loop(G_XtoY, G_YtoX, D_X, D_Y, g_optimizer, d_x_optimizer, d_y_optimizer, dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, epochs=epochs) fig, ax = plt.subplots(figsize=(12, 8)) losses = np.array(losses) print(losses) plt.plot(losses.T[0], label='Discriminator, X', alpha=0.5) plt.plot(losses.T[1], label='Discriminator, Y', alpha=0.5) plt.plot(losses.T[2], label='Generators', alpha=0.5) plt.title("Training Losses") plt.legend() import matplotlib.image as mpimg # helper visualization code def view_samples(iteration, sample_dir='samples_cyclegan'): # samples are named by iteration path_XtoY = os.path.join(sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) path_YtoX = os.path.join(sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) # read in those samples try: x2y = mpimg.imread(path_XtoY) y2x = mpimg.imread(path_YtoX) except: print('Invalid number of iterations.') fig, (ax1, ax2) = plt.subplots(figsize=(18, 20), nrows=2, ncols=1, sharey=True, sharex=True) ax1.imshow(x2y) ax1.set_title('X to Y') ax2.imshow(y2x) ax2.set_title('Y to X') # view samples at iteration 4000 view_samples(1, 'samples_cyclegan')
def generateNewModel(): model = create_model() train(model=model, database_path=database_path, save_path=save_path) model = keras.models.load_model(save_path)
mask = mask.unsqueeze(1) mask = mask.type(torch.FloatTensor) ZT = ZT.add_(z * mask.cuda()) del mask, z, category_map return ZT if __name__=='__main__': print(args) print('loading Dataset') train_data = SGNDataset(args) train_loader = data.DataLoader(train_data,batch_size=args.batch_size, shuffle=True,num_workers = args.num_threads) print('Connecting nodes , fabicrating network') if(not args.r): G, D = create_model(args) else: G,D = torch.load( args.save_filename + "_G_latest" ) , torch.save( args.save_filename + "_D_latest" ) start_epoch = 0 if(args.resume_train): rf = open('log.txt','r') log = rf.readline() log = log.split(' ') start_epoch = int(log[0]) print('loading last trained step') pretrained_dict = torch.load(args.save_filename + "_G_latest") model_dict = G.state_dict() for k,v in pretrained_dict.items(): if k in model_dict and v.size() == model_dict[k].size(): model_dict[k] = v else:
def create_seq_and_model(txt_filename, seq_filename, model_filename, units, epochs): gen_seq(txt_filename, seq_filename) create_model(seq_filename, model_filename, units, epochs)