def train_cycle_gan(data_root, semi_supervised=False):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device,
                         models_prefix,
                         opt["lr"],
                         opt["b1"],
                         train=True,
                         semi_supervised=semi_supervised)
    data = DataLoader(data_root=data_root,
                      image_size=(opt['img_height'], opt['img_width']),
                      batch_size=opt['batch_size'])

    total_images = len(data.names)
    print("Total Training Images", total_images)

    total_batches = int(ceil(total_images / opt['batch_size']))

    for epoch in range(cycle_gan.epoch_tracker.epoch, opt['n_epochs']):
        for iteration in range(total_batches):

            if (epoch == cycle_gan.epoch_tracker.epoch
                    and iteration < cycle_gan.epoch_tracker.iter):
                continue

            y, x = next(data.data_generator(iteration))

            real_A = Variable(x.type(Tensor))
            real_B = Variable(y.type(Tensor))

            cycle_gan.set_input(real_A, real_B)
            cycle_gan.train()

            message = (
                "\r[Epoch {}/{}] [Batch {}/{}] [DA:{}, DB:{}] [GA:{}, GB:{}, cycleA:{}, cycleB:{}, G:{}]"
                .format(epoch, opt["n_epochs"], iteration, total_batches,
                        cycle_gan.loss_disA.item(), cycle_gan.loss_disB.item(),
                        cycle_gan.loss_genA.item(), cycle_gan.loss_genB.item(),
                        cycle_gan.loss_cycle_A.item(),
                        cycle_gan.loss_cycle_B.item(), cycle_gan.loss_G))
            print(message)
            logger.info(message)

            if iteration % opt['sample_interval'] == 0:
                cycle_gan.save_progress(images_prefix, epoch, iteration)
        cycle_gan.save_progress(images_prefix,
                                epoch,
                                total_batches,
                                save_epoch=True)
def test_cycle_gan(semi_supervised=True):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device,
                         models_prefix,
                         opt["lr"],
                         opt["b1"],
                         train=False,
                         semi_supervised=semi_supervised)
    data = DataLoader(data_root=data_root,
                      image_size=(opt['img_height'], opt['img_width']),
                      batch_size=1,
                      train=False)

    total_images = len(data.names)
    print("Total Testing Images", total_images)

    loss_A = 0.0
    loss_B = 0.0
    name_loss_A = []
    name_loss_B = []

    for i in range(total_images):
        print(i, "/", total_images)
        x, y = next(data.data_generator(i))
        name = data.names[i]

        real_A = Variable(x.type(Tensor))
        real_B = Variable(y.type(Tensor))

        cycle_gan.set_input(real_A, real_B)
        cycle_gan.test()
        cycle_gan.save_image(images_prefix, name)
        loss_A += cycle_gan.test_A
        loss_B += cycle_gan.test_B
        name_loss_A.append((cycle_gan.test_A, name))
        name_loss_B.append((cycle_gan.test_B, name))

    info = "Average Loss A:{} B :{}".format(loss_A / (1.0 * total_images),
                                            loss_B / (1.0 * total_images))
    print(info)
    logger.info(info)
    name_loss_A = sorted(name_loss_A)
    name_loss_B = sorted(name_loss_B)
    print("top 10 images")
    print(name_loss_A[:10])
    print(name_loss_B[:10])
Exemplo n.º 3
0
def train_cycle_gan(data_root, semi_supervised=False):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device, models_prefix, opt["lr"], opt["b1"],
                         train=True, semi_supervised=semi_supervised)
    # data = DataLoader(data_root=data_root,
    #                   image_size=(opt['img_height'], opt['img_width']),
    #                   batch_size=opt['batch_size'])
    dataset = TrainDataSet(data_root=data_root, image_size=(opt['img_height'], opt['img_width']))
    print("dataset : ", dataset)
    dataLoader = DataLoader(dataset, batch_size=1)

    total_images = len(dataset.names)
    print("Total Training Images", total_images)

    total_batches = int(ceil(total_images / opt['batch_size']))

    for epoch in range(5):
        for i, data in enumerate(dataLoader):
            real_A, real_B = data

            real_A = Variable(real_A.type(Tensor))
            real_B = Variable(real_B.type(Tensor))

            cycle_gan.set_input(real_A, real_B)
            cycle_gan.train()

            message = (
                "\r[Epoch {}/{}] [Batch {}/{}] [DA:{}, DB:{}] [GA:{}, GB:{}, cycleA:{}, cycleB:{}, G:{}]"
                    .format(epoch, opt["n_epochs"], iteration, total_batches,
                            cycle_gan.loss_disA.item(),
                            cycle_gan.loss_disB.item(),
                            cycle_gan.loss_genA.item(),
                            cycle_gan.loss_genB.item(),
                            cycle_gan.loss_cycle_A.item(),
                            cycle_gan.loss_cycle_B.item(),
                            cycle_gan.loss_G))
            print(message)
            logger.info(message)
import discord
import logging
from discord.ext import commands
import utils
import sys
import os
import random

CONFIG_FILE = 'discordbot.config'

options = utils.get_opts(sys.argv[1:])

if not utils.check_dir('logs'):
    os.mkdir('logs')
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)  # Change this to get DEBUG info if necessary
handler = logging.FileHandler(filename='logs/discordbot.log',
                              encoding='utf-8',
                              mode='w')
handler.setFormatter(
    logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)

# Read configuration file/command line options
if options.config:
    config = utils.read_config(file=options.config)
else:
    config = utils.read_config()
logger.info(f'Reading Configuration file: {config}')

# Instantiate Bot
import os
import numpy as np
import torch
from torch.autograd import Variable
from data_loader import DataLoader
from networks import GeneratorUNet, GeneratorResNet, Discriminator, ResNetBlock
from utils import ensure_dir, get_opts, weights_init_normal, sample_images
from logger import logger

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
) else torch.FloatTensor

data = DataLoader(data_root='../gta/', image_size=(512, 512), batch_size=16)
opt = get_opts()

ensure_dir('saved_images/%s' % 'GTA')
ensure_dir('saved_models/%s' % 'GTA')

criterion_GAN = torch.nn.MSELoss().to(device)
criterion_pixelwise = torch.nn.L1Loss().to(device)

lambda_pixel = 10

generator = GeneratorUNet().to(device)
discriminator = Discriminator().to(device)

generator = torch.nn.DataParallel(generator,
                                  list(range(torch.cuda.device_count())))
discriminator = torch.nn.DataParallel(discriminator,
                                      list(range(torch.cuda.device_count())))
Exemplo n.º 6
0
import torch

from easydict import EasyDict

from data_loader import CubDataset, FlickrDataset, DefaultCaptionTokenizer, BertCaptionTokenizer
from data_preprocess import CubDataPreprocessor, get_preprocessor
from self_attn.self_attn_gan import SelfAttnGAN, SelfAttnBert
from utils import get_opts

torch.set_default_tensor_type(torch.cuda.FloatTensor)

device = "cuda" if torch.cuda.is_available() else "cpu"
MAX_CAPTION_SIZE = 30

opts = EasyDict(get_opts("config/bert_attn_flickr.yaml"))

output_dir = os.path.join("checkpoints/", opts.CHECKPOINTS_DIR)
data_dir = 'dataset/'
epoch_file = "epoch.txt"
log_file = "logs.log"

print("Dataset: ", opts.DATASET_NAME, opts.DATA_DIR)
preprocessor = get_preprocessor(opts.DATASET_NAME, opts.DATA_DIR)

if opts.TEXT.ENCODER == "lstm":
    ixtoword = preprocessor.get_idx_to_word()
    tokenizer = DefaultCaptionTokenizer(preprocessor.get_word_to_idx(),
                                        MAX_CAPTION_SIZE)
else:
    tokenizer = BertCaptionTokenizer(MAX_CAPTION_SIZE)
Exemplo n.º 7
0
import torch

from easydict import EasyDict

from attnGan.attn_gan import AttnGAN
from data_loader import CubDataset, DefaultCaptionTokenizer

from data_preprocess import CubDataPreprocessor, get_preprocessor
from utils import get_opts

device = "cuda" if torch.cuda.is_available() else "cpu"

output_dir = "checkpoints/attnGAN"
data_dir = 'dataset/'
epoch_file = "epoch.txt"
log_file = "logs.log"

opts = EasyDict(get_opts("config/bird.yaml"))
MAX_CAPTION_SIZE = 30

preprocessor = get_preprocessor(opts.DATASET_NAME, opts.DATA_DIR)
ixtoword = preprocessor.get_idx_to_word()
tokenizer = DefaultCaptionTokenizer(preprocessor.get_word_to_idx(), MAX_CAPTION_SIZE)
train_set = CubDataset(preprocessor, opts, tokenizer, mode='train')
val_set = CubDataset(preprocessor, opts, tokenizer, mode='val')
train_loader = torch.utils.data.DataLoader(train_set, batch_size=opts.TRAIN.BATCH_SIZE, shuffle=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=opts.TRAIN.BATCH_SIZE, shuffle=True, drop_last=True)

attn_gan = AttnGAN(device, output_dir, opts, ixtoword, train_loader, val_loader)
attn_gan.train()
Exemplo n.º 8
0
from damsm.damsm import Damsm
from damsm.damsm_bert import DamsmBert

from data_loader import CubDataset, DefaultCaptionTokenizer, BertCaptionTokenizer, FlickrDataset
from data_preprocess import get_preprocessor
from utils import get_opts, make_dir, save_checkpoint, EpochTracker

device = "cuda" if torch.cuda.is_available() else "cpu"

output_directory = "checkpoints"
epoch_file = "epoch.txt"
log_file = "logs.log"

UPDATE_INTERVAL = 5
MAX_CAPTION_SIZE = 30
opts = EasyDict(get_opts("config/damsm_bert_bird.yaml"))


def create_loader(opts):
    print("Dataset: ", opts.DATASET_NAME)
    preprocessor = get_preprocessor(opts.DATASET_NAME, opts.DATA_DIR)

    if opts.TEXT.ENCODER != 'bert':
        ixtoword = preprocessor.get_idx_to_word()
        tokenizer = DefaultCaptionTokenizer(preprocessor.get_word_to_idx(),
                                            MAX_CAPTION_SIZE)
    else:
        tokenizer = BertCaptionTokenizer(MAX_CAPTION_SIZE)
        ixtoword = tokenizer.tokenizer.ids_to_tokens

    if opts.DATASET_NAME == "cub":
    model_trainer = Trainer(model, model.criterion, optimizer, config,
                            train_loader, config.n)

    final_train_loss = model_trainer.train_model()
    final_test_loss = model_trainer.eval_model(test_loader,
                                               directory="test",
                                               save_data=True)

    print("Final loss ->",
          "\t Train loss: ",
          final_train_loss,
          "\t Test loss: ",
          final_test_loss,
          "\n",
          "best_model_index: ",
          model_trainer.best_model_epoch,
          ", val loss: ",
          model_trainer.min_val_loss,
          " final epoch: ",
          model_trainer.last_epoch,
          " Run time: ",
          model_trainer.train_time,
          file=open("train.log", "a"),
          flush=True)


if __name__ == '__main__':

    config = get_opts()
    main(config)
Exemplo n.º 10
0
def main():
    global best_result
    model = DORN()
    opts = utils.get_opts()
    epoch_tracker = utils.EpochTracker(epoch_file)

    train_loader, val_loader = create_loader(opts)

    name = output_directory + 'checkpoint-' + str(
        epoch_tracker.epoch) + '.pth.tar'
    if os.path.exists(name):
        checkpoint = torch.load(name)

        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        optimizer = checkpoint['optimizer']
        iteration = checkpoint['iteration']

        # solve 'out of memory'
        model = checkpoint['model']

        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))

        # clear memory
        del checkpoint
        # del model_dict
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
    else:
        start_epoch = 0
        iteration = 0
        # different modules have different learning rate
        train_params = [{
            'params': model.get_1x_lr_params(),
            'lr': opts.lr
        }, {
            'params': model.get_10x_lr_params(),
            'lr': opts.lr * 10
        }]

        optimizer = torch.optim.SGD(train_params,
                                    lr=opts.lr,
                                    momentum=opts.momentum,
                                    weight_decay=opts.weight_decay)

        # You can use DataParallel() whether you use Multi-GPUs or not
        if torch.cuda.is_available():
            model = nn.DataParallel(model).cuda()

    # when training, use reduceLROnPlateau to reduce learning rate
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=opts.lr_patience)

    # loss function
    criterion = criteria.ordLoss(device)

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    best_txt = os.path.join(output_directory, 'best.txt')
    config_txt = os.path.join(output_directory, 'config.txt')

    # create log
    log_path = os.path.join(
        output_directory, 'logs',
        datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
    if os.path.isdir(log_path):
        shutil.rmtree(log_path)
    os.makedirs(log_path)
    logger = SummaryWriter(log_path)

    for epoch in range(start_epoch, opts.epochs):

        # remember change of the learning rate
        for i, param_group in enumerate(optimizer.param_groups):
            old_lr = float(param_group['lr'])
            logger.add_scalar('Lr/lr_' + str(i), old_lr, epoch)

        train(train_loader, model, criterion, optimizer, epoch, logger, device,
              opts, iteration)  # train for one epoch
        result, img_merge = validate(val_loader, model, epoch, logger,
                                     opts)  # evaluate on validation set
        iteration = 0
        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}, rmse={:.3f}, rml={:.3f}, log10={:.3f}, d1={:.3f}, d2={:.3f}, dd31={:.3f}, "
                    "t_gpu={:.4f}".format(epoch, result.rmse, result.absrel,
                                          result.lg10, result.delta1,
                                          result.delta2, result.delta3,
                                          result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        # save checkpoint for each epoch
        utils.save_checkpoint(
            {
                'args': opts,
                'epoch': epoch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
                'iteration': iteration
            }, is_best, epoch, output_directory)

        epoch_tracker.write(epoch)
        # when rml doesn't fall, reduce learning rate
        scheduler.step(result.absrel)

    logger.close()