Ejemplo n.º 1
0
def main():
    # cleaning
    utils.remove_all_files_inside_folder('./results/')
    utils.remove_all_files_inside_folder('./training_checkpoints/')
    # prepare dataset
    (train_images, _), (_, _) = utils.get_fmnist_data()
    train_dataset = utils.normalize_data(train_images)
    # create models
    generator = utils.Generator()
    discriminator = utils.Discriminator()
    # Defun gives 10 secs/epoch performance boost
    generator.call = tf.contrib.eager.defun(generator.call)
    discriminator.call = tf.contrib.eager.defun(discriminator.call)
    # training helpers
    checkpoint = utils.setup_checkpoint(generator, discriminator)
    random_vector = utils.generate_constant_random_vector(
        NOISE_DIM, NUM_EXAMPLES_TO_GENERATE)
    # training
    history = utils.train(dataset=train_dataset, epochs=EPOCHS, noise_dim=NOISE_DIM, generator=generator,
                          discriminator=discriminator, checkpoint=checkpoint, random_vector=random_vector)
    # reporting
    generator.summary()
    discriminator.summary()
    utils.plot_loss(history)
    utils.create_gif()
Ejemplo n.º 2
0
def run(flags_obj):
    data_aug_args = dict(rotation_range=0.2,
                         width_shift_range=0.05,
                         height_shift_range=0.05,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='nearest')

    train_gene = train_generator(flags_obj, data_aug_args)

    model = unet(flags_obj, n_filters=64)

    model.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=flags_obj.learning_rate),
                  loss=tf.keras.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])

    example = load_example(flags_obj)
    example_img = imageio.imread('data/membrane/test/image/0.png')
    # Save first prediction without training.
    save_prediction(model, example_img, example, 0)

    test_ds = load_test_dataset()

    history = model.fit_generator(train_gene,
                                  epochs=flags_obj.epoch,
                                  steps_per_epoch=flags_obj.steps_per_epoch,
                                  validation_data=test_ds,
                                  callbacks=[DisplayCallback(model, example)])

    create_gif()
    plot_history(history, flags_obj.epoch)
Ejemplo n.º 3
0
def main():
    """"""
    op = Options()
    args = op.parse()
    train_loader, test_loader = load_data(args)

    # visualize(train_loader)
    data_loader = {'train': train_loader, 'test': test_loader}
    model = SphereFace20()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay,
                          nesterov=True)

    criterion = AngularSoftmaxWithLoss()
    with SummaryWriter(args.log_dir) as writer:
        input_data = Variable(torch.rand(32, 1, 28, 28))
        writer.add_graph(model, (input_data, ))
        for epoch in range(1, args.epochs + 1):
            adjust_learning_rate(args, optimizer, epoch - 1)
            iter_count = len(train_loader) * (epoch - 1)
            train(args, model, data_loader, optimizer, epoch, criterion,
                  writer, iter_count)

    # Draw feature map.
    gif_name = {
        'train': './data/train/train_features.gif',
        'test': './data/test/test_features.gif'
    }
    filepath = {'train': './data/train/', 'test': './data/test/'}
    for i in ['train', 'test']:
        create_gif(gif_name[i], filepath[i])
Ejemplo n.º 4
0
def main(env_name=None):
    ENV_NAME = 'wumpus-v0'
    
    if env_name: ENV_NAME = env_name

    MODEL_DIR = f'models/{ENV_NAME}-dqn'
    MODEL_FILE = f'{ENV_NAME}-dqn.h5'
    CHECKPOINTS_DIR = f'models/{ENV_NAME}-dqn/checkpoints'
    TEST_IMG_DIR = f'tests/{ENV_NAME}-dqn'

    env = gym.make(ENV_NAME)
    env.reset()

    agent = Agent(learning_rate=0.01, gamma=0.95,
                    state_shape=env.observation_space.shape, actions=7,
                    batch_size=64,
                    epsilon_initial=0.0, epsilon_decay=0, epsilon_final=0.0,
                    replay_buffer_capacity=1000000,
                    model_name=MODEL_FILE, model_dir=MODEL_DIR,
                    ckpt_dir=CHECKPOINTS_DIR)
    agent.load_model()

    done = False
    score = 0
    steps_per_episode = 0
    state = env.reset()
    images = [env.render('rgb_array')]
    while not done:
        # Choose action according to policy, and execute
        action = agent.select_action(state)
        state, reward, done, _ = env.step(action)

        score += reward
        steps_per_episode += 1
        images.append(env.render('rgb_array'))

    # Generate GIF for the execution
    create_gif(
        f'{ENV_NAME}.gif',
        np.array(images),
        fps=1.0
    )

    print(
        f'Model \'{str(ENV_NAME)}\', score {score}, steps {steps_per_episode}')
Ejemplo n.º 5
0
def main():
    op = Options()
    args = op.parse()
    train_loader, test_loader = load_data(args)
    data_loader = {'train': train_loader, 'test': test_loader}

    model = SphereFace4(m=4).cuda()
    criterion = AngularSoftmaxWithLoss().cuda()
    gif_name = {'train': 'sphereface_train.gif', 'test': 'sphereface_test.gif'}

    # model = CosSphereFace4().cuda()
    # criterion = MarginCosineSoftmaxWithLoss().cuda()
    # gif_name = {
    #     'train': 'cosface_train.gif',
    #     'test': 'cosface_test.gif'
    # }

    # model = ArcSphereFace4().cuda()
    # criterion = ArcMarginSoftmaxWithLoss().cuda()
    # gif_name = {
    #     'train': 'arcface_train.gif',
    #     'test': 'arcface_test.gif'
    # }

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay,
                          nesterov=True)
    vis_record = dict()
    with SummaryWriter(args.log_dir) as writer:
        for epoch in range(1, args.epochs + 1):
            adjust_learning_rate(args, optimizer, epoch - 1)
            iter_count = len(train_loader) * (epoch - 1)
            train(args, model, data_loader, optimizer, epoch, criterion,
                  writer, iter_count, vis_record)
            save_model(model, 'runs/iter_{}.pth'.format(epoch))

    visual_feature_space_record(vis_record)
    filepath = {'train': 'train/', 'test': 'test/'}

    for phase in ['train', 'test']:
        create_gif(gif_name[phase], filepath[phase], duration=0.5)
Ejemplo n.º 6
0
    def generate_image(self, num_steps, output_file, gif_file, save_every_n):
        with tf.Session() as session:
            gif_frames = []

            session.run(tf.global_variables_initializer())
            pretrained_vgg_weights = VGG19(include_top=False,
                                           weights='imagenet').get_weights()
            self.vgg_content_shaped.set_weights(pretrained_vgg_weights)
            self.vgg_style_shaped.set_weights(pretrained_vgg_weights)

            if save_every_n is not None:
                intermediate_results_dir = os.path.splitext(
                    output_file)[0] + '_intermediate'
                if not os.path.exists(intermediate_results_dir):
                    os.makedirs(intermediate_results_dir)

            for i in range(1, num_steps + 1):
                _, image, content_loss, style_loss, denoising_loss = session.run(
                    [
                        self.train_step, self.image, self.content_loss,
                        self.style_loss, self.denoising_loss
                    ])
                print(
                    'iter %s: content loss %s, style loss %s, denoising loss %s'
                    % (i, content_loss, style_loss, denoising_loss))

                image = postprocess(image)
                image = np.clip(image, 0, 255).astype(np.uint8)

                if save_every_n is not None and i % save_every_n == 0:
                    file = os.path.join(intermediate_results_dir,
                                        'iter_%d.png' % i)
                    print('Saving intermediate result to %s' % file)
                    scipy.misc.imsave(file, image)

                if gif_file is not None and i % 10 == 0:
                    gif_frames.append(image)

            scipy.misc.imsave(output_file, image)
            if gif_file is not None:
                create_gif(gif_frames, gif_file)
Ejemplo n.º 7
0
def main():
    # Use argparse to decide if user wants to re-train VAE
    parser = argparse.ArgumentParser()
    parser.add_argument("-train", action='store_true')
    args = parser.parse_args()

    z_dim = 50
    epochs = 500
    learning_rate = 0.0003
    batch_size = 50

    mnist_path = 'mnist/mnist_28.pkl.gz'
    # Uses anglpy module from original paper (linked at top) to load the dataset
    train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(mnist_path, binarize_y=True)
    x_all = np.concatenate([train_x.T, valid_x.T, test_x.T], axis=0)
    y_all = np.concatenate([train_y.T, valid_y.T, test_y.T], axis=0)

    x_dim = x_all.shape[1]

    # Visualize how results change over time in the form of a GIF
    utils.create_gif("M1_model.gif")

    # Specify model path and setup VAE object
    M1_model_path = "./model/VAE.ckpt"
    vae = M1(x_dim=x_dim, z_dim=z_dim)

    # Train if user specifies with keyword
    if args.train:
        vae.train(x=x_all, epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, plot=True)

    # Visualize the latent space
    tf.reset_default_graph()
    with vae.session:
        vae.saver.restore(vae.session, M1_model_path)
        [sample, latent] = vae.session.run([vae.decoder_xhat, vae.z], feed_dict={vae.x: x_all, vae.phase: True})

        # Plot meshgrid at Epoch 500
        utils.plot_meshgrid(x_all[0:25,:], sample[0:25,:], epochs)
Ejemplo n.º 8
0
    def train(
        self,
        epoches,
        log_interval=100,
        output_dir='',
        verbose=True,
        save_checkpoints=True
    ):
        # Sets the module in training mode.
        self.NetG.train()
        self.NetD.train()
        viz_z = torch.zeros(
            (self.data_loader.batch_size, self.latent_dim), 
            device=self.device
        )
        viz_noise = torch.randn(
            self.data_loader.batch_size, 
            self.latent_dim, 
            device = self.device
        )
        nrows = self.data_loader.batch_size // 8
        print(f'nrows:{nrows}, batch_size:{self.data_loader.batch_size}')
        # generate visible data for image synsethesis
        viz_label = torch.LongTensor(
            np.array([num for _ in range(nrows) for num in range(8)])
        ).to(self.device)
        # transform label into one-hot vector
        viz_onehot = self._to_onehot(
            viz_label, 
            dim=self.classes
        )
        viz_code = torch.zeros(
            (self.data_loader.batch_size, self.code_dim), 
            device=self.device
        )
        #####  Set up training related parameters  #####
        n_train = len(self.data_loader.dataset)
        total_time = time.time()

        ##### logging training information #####
        logging.info(f'''Start training:
            epoches:          {epoches}
            Batch size:      {self.data_loader.batch_size}
            Learning rate:   {self.learning_rate}
            Training size:   {n_train}
            Checkpoints:     {save_checkpoints}
            Device:          {self.device.type}
            Verbose:         {verbose}
        ''')
        #####  Set up training loss record parameters #####
        train_hist = {}
        if self.name == 'cgan':
            train_hist['D_losses'] = []
            train_hist['G_losses'] = []
        elif self.name == 'infogan':
            train_hist['D_losses'] = []
            train_hist['G_losses'] = []
            train_hist['Info_losses'] = []
        #####  Core optimization loop  #####
        for epoch in range(epoches):
            batch_time = time.time()
            if self.name == 'cgan':
                D_losses = []
                G_losses = []
            elif self.name == 'infogan':
                D_losses = []
                G_losses = []
                Info_losses = []
            with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epoches}', unit='img') as pbar:
                for batch_idx, (data, target) in enumerate(self.data_loader):
                    data, target = data.to(self.device), target.to(self.device)
                    batch_size = data.size(0)
                    real_label = torch.full((batch_size, 1), 1., device=self.device)
                    fake_label = torch.full((batch_size, 1), 0., device=self.device)
                    
                    # Train Generator
                    self.NetG.zero_grad()
                    z_noise = torch.randn(batch_size, self.latent_dim, device=self.device)
                    '''
                    we sample a single feature c from a uniform distribution and convert it to a 1-hot vector.
                    then the generator uses this vector and z to generate an image.
                    '''
                    x_fake_labels = torch.randint(0, self.classes, (batch_size,), device=self.device)
                    if self.name == 'cgan':
                        x_fake = self.NetG(z_noise, x_fake_labels)
                        y_fake_g = self.NetD(x_fake, x_fake_labels)
                        g_loss = self.NetD.loss(y_fake_g, real_label)
                    elif self.name == 'infogan':
                        labels_onehot = self._to_onehot(x_fake_labels, dim=self.classes)
                        z_code = torch.zeros((batch_size, self.code_dim), device=self.device).normal_()
                        
                        x_fake = self.NetG(z_noise, labels_onehot, z_code)
                        y_fake_g, _, _ = self.NetD(x_fake)
                        g_loss = self.NetD.adv_loss(y_fake_g, real_label)

                    G_losses.append(g_loss.item()) # record generator loss
                    g_loss.backward()
                    self.optim_G.step()

                    # Train Discriminator
                    self.NetD.zero_grad()
                    if self.name == 'cgan':
                        y_real = self.NetD(data, target)
                        d_real_loss = self.NetD.loss(y_real, real_label)

                        y_fake_d = self.NetD(x_fake.detach(), x_fake_labels)
                        d_fake_loss = self.NetD.loss(y_fake_d, fake_label)
                    elif self.name == 'infogan':
                        y_real, _ , _= self.NetD(data) # unsupervised 
                        d_real_loss = self.NetD.adv_loss(y_real, real_label)

                        y_fake_d, _, _ = self.NetD(x_fake.detach())
                        d_fake_loss = self.NetD.adv_loss(y_fake_d, fake_label)
                        
                    d_loss = (d_real_loss + d_fake_loss) / 2
                    D_losses.append(d_loss.item()) # record discriminator loss
                    pbar.set_postfix(
                        **{
                        'Generator loss (batch)': g_loss.item(),
                        'Discriminator loss (batch)': d_loss.item()
                        }
                    )
                    d_loss.backward()
                    self.optim_D.step()
                    pbar.update(batch_size)
                    # update infogan's mutual information
                    if self.name == 'infogan':
                        self.optim_info.zero_grad()
                        z_noise.normal_()
                        x_fake_labels = torch.randint(0, self.classes, (batch_size,), device=self.device)
                        labels_onehot = self._to_onehot(x_fake_labels, dim=self.classes)
                        z_code.normal_()
                        x_fake = self.NetG(z_noise, labels_onehot, z_code)
                        _, label_fake, code_fake = self.NetD(x_fake)
                        info_loss = self.NetD.class_loss(label_fake, x_fake_labels) +\
                                self.NetD.continuous_loss(code_fake, z_code)
                        Info_losses.append(info_loss.item()) # record mutual information loss
                        info_loss.backward()
                        self.optim_info.step()

                    if verbose and batch_idx % (log_interval) == 0 and batch_idx > 0:
                        if self.name == 'cgan':
                            print('\nEpoch {} [{}/{}] loss_D: {:.4f} loss_G: {:.4f} time: {:.2f}'.format(
                                epoch, batch_idx, len(self.data_loader),
                                d_loss.mean().item(),
                                g_loss.mean().item(),
                                time.time() - batch_time)
                            )
                        elif self.name == 'infogan':
                            print('\nEpoch {} [{}/{}] loss_D: {:.4f} loss_G: {:.4f} loss_I: {:.4f} time: {:.2f}'.format(
                                epoch, batch_idx, len(self.data_loader),
                                d_loss.mean().item(),
                                g_loss.mean().item(),
                                info_loss.mean().item(),
                                time.time() - batch_time)
                            )
                        vutils.save_image(data, os.path.join(output_dir, 'real_samples.png'), normalize=True)
                    with torch.no_grad():
                        if self.name == 'cgan':
                            viz_sample = self.NetG(viz_noise, viz_label)
                            vutils.save_image(viz_sample, os.path.join(output_dir, 'fake_samples_{}.png'.format(epoch+1)), nrow=8, normalize=True)
                        elif self.name == 'infogan':
                            viz_sample = self.NetG(viz_noise, viz_onehot, viz_code)
                            vutils.save_image(viz_sample, os.path.join(output_dir, 'fake_samples_{}.png'.format(epoch+1)), nrow=8, normalize=True)
                    batch_time = time.time()
                if save_checkpoints:
                    self.save_to(path=output_dir, name=self.name, verbose=True)
            # record batch training loss
            if self.name == 'cgan':
                train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
                train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))
                plot_cgan_loss(
                    d_loss=train_hist['D_losses'], 
                    g_loss=train_hist['G_losses'], 
                    num_epoch=epoch + 1, 
                    epoches=epoches, 
                    save_dir=output_dir
                )
            elif self.name == 'infogan':
                train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
                train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))
                train_hist['Info_losses'].append(torch.mean(torch.FloatTensor(Info_losses)))
                plot_infogan_loss(
                    d_loss=train_hist['D_losses'], 
                    g_loss=train_hist['G_losses'], 
                    info_loss=train_hist['Info_losses'],
                    num_epoch=epoch + 1, 
                    epoches=epoches, 
                    save_dir=output_dir
                )
        # plot git of training loss and synsethesis images
        create_gif(
            epoches=epoches, 
            save_dir=output_dir,
            gan_name_prefix=self.name
        )
        if verbose:
            logging.info('Total train time: {:.2f} s'.format(time.time() - total_time))
Ejemplo n.º 9
0
    meta["name"] = (args.act_fn + str(args.layers) + args.kernel +
                    str(datetime.datetime.now()))
    meta["input_shape"], meta["output"], *dataset = utils.get_data(
        args.dataset)

    # build model
    model = utils.build_model(
        args.layers,
        args.act_fn,
        args.kernel,
        args.bias,
        meta["input_shape"],
        meta["output"],
    )
    model.compile(optimizer=args.opt, loss=args.loss, metrics=["accuracy"])

    if args.pretrain:
        model = utils.pretrain(
            model,
            dataset[0][0][:500],
            dataset[1][0][:N_EXAMPLES],
            100,
            meta["batch_size"],
            meta["opt"],
            meta["name"],
            utils.build_distribution,
            1,
        )
    utils.train(model, meta, dataset)
    utils.create_gif("./images/{}".format(meta["name"]))
    LSTM_INPUTS = LSTM_OUTPUTS[0].detach(), LSTM_OUTPUTS[1].detach()

    # Perform one step of the optimization (on the target network)
    optim_out = optimize_model()

    if optim_out is not None:
        _, loss_huber = optim_out
        losses.append(loss_huber)

    # Update the target network, copying all weights and biases in DQN
    if i_step % TARGET_UPDATE == 0:
        target_net.load_state_dict(policy_net.state_dict())

    # actually does the rendering
    # will save the rendering png in this function call
    env.render()

    if (i_step + 1) % 500 == 0:
        print('Creating GIF')
        create_gif(i_step + 1)
        print('Done\n')

print('Completed unrolling with gradient updates and rendering')

################################################################################
################################ SAVE FINAL MODEL ##############################
################################################################################

torch.save(policy_net.state_dict(), './lstm_jbw')
# policy_net.load_state_dict(torch.load('./lstm_jbw'))
Ejemplo n.º 11
0
        train_acc /= train_batchs
        print("epoch %2d---------------------------train accuracy:%.4f" %(epoch+1, train_acc))
        visualize(embeddings, nlabels, epoch, train_acc, picname="./image/%d/%d.jpg"%(loss_type, epoch))
    # testing process
    test_acc = 0.
    embeddings = np.zeros((test_batchs*batch_size, embedding_dim), dtype=np.float32)
    nlabels = np.zeros(shape=(test_batchs*batch_size,), dtype=np.int32)
    for batch in range(test_batchs):
        i,j = batch*batch_size, (batch+1)*batch_size
        batch_images, batch_labels = mnist.test.next_batch(batch_size)
        feed_dict = {images:batch_images, labels:batch_labels}
        _, batch_loss, batch_acc, embeddings[i:j,:] = sess.run([train_op, loss, accuracy, network.embeddings], feed_dict)
        nlabels[i:j] = batch_labels
        test_acc += batch_acc
    test_acc /= test_batchs
    print("test accuracy: %.4f" %test_acc)
    return test_acc, embeddings, nlabels



if __name__ == "__main__":

    gif = ['original_softmax_loss.gif', 'modified_softmax_loss.gif', 'angular_softmax_loss.gif']
    path = './image/%d/' %loss_type
    gif_name = './image/%s' %gif[loss_type]
    train(loss_type=loss_type)
    create_gif(gif_name, path)



Ejemplo n.º 12
0
import sys

sys.path.insert(0, "")

import argparse
import utils

# Just place the path of folder below
default = ""
parser = argparse.ArgumentParser(description="Create animation with the plots")
parser.add_argument("path",
                    default=default,
                    type=str,
                    help="path to folder",
                    nargs="?")
args = parser.parse_args()

if __name__ == "__main__":
    utils.create_gif(args.path)
    def algorithm(self, algorithm, color):
        if self.start is None or self.goal is None:
            self.curr_grid = self.clean_grid.copy()
            return

        self.reset_world('soft', new_run=True, keep_paths=True)

        # avoid increment when algorithm is applied two times on the same world
        if not self.applied_this_run[algorithm]:
            self.algorithm_runs[algorithm] += 1
        self.applied_this_run[algorithm] = True

        start = self.start
        goal = self.goal
        h = heuristic(self.clean_grid, goal)
        data_algo = {
            'h': h,
            'frontier': {start},
            'inner': set(),
            'g_score': {
                start: 0
            },
            'f_score': {
                start: h[start]
            },
            'come_from': {
                start: None
            }
        }
        steps = 0

        self.curr_grid = self.clean_grid.copy()

        while len(data_algo['frontier']) > 0:

            if self.step_by_step:

                for event in pygame.event.get():
                    if event.type == pygame.KEYDOWN:

                        if event.type == pygame.QUIT:
                            sys.exit(0)

                        if event.key == self.control["R"]:
                            self.paths = []
                            self.curr_grid = self.clean_grid
                            return

                        # update only when SPACE is pressed
                        if event.type == pygame.KEYDOWN and event.key == self.control[
                                "SPACE"]:

                            # apply one step of the selected algorithm
                            data_algo, done = self.step(
                                data_algo, algorithm, color)
                            steps += 1

                            if done:
                                self.print_this, self.printed_infos = \
                                    build_print_line(algorithm, steps, self.paths, self.run_num, self.printed_infos)
                                self.caption = self.print_this
                                return

            else:

                data_algo, done = self.step(data_algo, algorithm, color)
                steps += 1

                if done:
                    self.print_this, self.printed_infos = \
                        build_print_line(algorithm, steps, self.paths, self.run_num, self.printed_infos)
                    self.caption = self.print_this
                    self.update_screen()
                    if self.gif:
                        pygame.image.save(self.screen,
                                          './temp/{}.bmp'.format(steps))
                        create_gif(self.run_num, algorithm)

                    return

            self.update_screen()
            if self.gif:
                pygame.image.save(self.screen, './temp/{}.bmp'.format(steps))
Ejemplo n.º 14
0
results_log_file_path = output_dir + "results_log({:}).json".format(
    output_file_name)
tmp_img_path = output_dir + "tmp_" + output_file_name + ".png"

img = utils.read_img(inp_img_path)

algo = EA(img,
          small_imgs_assets_path=assets_dir,
          small_imgs_num=small_imgs_num,
          format_str="{:05d}.png")

init_time = time()
progress_imgs, out_img, score = algo.train(tmp_img_path=tmp_img_path)
process_time = time() - init_time

print("----------------------- Finish -----------------------")
print("Total processing time: {:}".format(process_time))
print("------------------------------------------------------")

with open(results_log_file_path, "w+") as f:
    results_dict = {
        "Error_score": score,
        "output_name": output_file_name,
        "Total_time": process_time
    }
    json.dump(results_dict, f, indent=4)

utils.preview_img(out_img)
utils.create_gif(out_gif_path, progress_imgs)
utils.write_img(out_img_path, out_img)