Ejemplo n.º 1
0
def generate(args):
    # Create a new DCGAN object
    dcgan = DCGAN(config)

    # Load existing model from saved_models folder (you can pass different indexes to see the effect on the generated signal)
    dcgan.load()  #loads the last trained generator
    #dcgan.load(500)
    #dcgan.load(1000)
    #dcgan.load(2000)
    #dcgan.load(3000)

    # Create a DataLoader utility object
    data_loader = DataLoader(config)

    #
    # Generate a batch of new fake signals and evaluate them against the discriminator
    #

    # Select a random batch of signals
    signals = data_loader.get_training_batch()

    # Generate latent noise for generator
    noise = dcgan.generate_noise(signals)

    # Generate prediction
    gen_signal = dcgan.generator.predict(noise)

    # Evaluate prediction
    validated = dcgan.critic.predict(gen_signal)

    # Plot and save prediction
    plot_prediction(gen_signal)
    gen_signal = np.reshape(gen_signal,
                            (gen_signal.shape[0], gen_signal.shape[1]))
    np.savetxt('./output/generated_signal.csv', gen_signal, delimiter=",")
Ejemplo n.º 2
0
# Interactive Robotics and Vision Lab (http://irvlab.cs.umn.edu/)
# Any part of this repo can be used for academic and educational purposes only
"""
## python libs
import os
import numpy as np

## local libs
from utils.data_utils import DataLoader
from nets.funieGAN_up import FUNIE_GAN_UP
from utils.plot_utils import save_val_samples_unpaired

## configure data-loader
data_dir = "/mnt/data1/color_correction_related/datasets/EUVP/"
dataset_name = "Unpaired"
data_loader = DataLoader(os.path.join(data_dir, dataset_name), dataset_name)

## create dir for log and (sampled) validation data
samples_dir = os.path.join("data/samples/funieGAN_up/", dataset_name)
checkpoint_dir = os.path.join("checkpoints/funieGAN_up/", dataset_name)
if not os.path.exists(samples_dir): os.makedirs(samples_dir)
if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir)

## hyper-params
num_epoch = 50
batch_size = 4
val_interval = 2000
N_val_samples = 2
save_model_interval = data_loader.num_train // batch_size
num_step = num_epoch * save_model_interval
Ejemplo n.º 3
0
    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    model = model.to(device)
    tu.load_model_checkpoint(model,
                             args.model_path,
                             inference=False,
                             map_location=device)
    print("Model loaded")
    # create dataloader
    tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs,
                                      config.ts_flabels)
    tu_dataloader = DataLoader(tu_test_dataset, batch_size=2, shuffle=True)
    model.train()
    with torch.no_grad():

        for batchno, (frames, targets) in enumerate(tu_dataloader):
            frames = [f.to(device) for f in frames]

            output = model(frames)
            targets_ = targets.squeeze(1).long().to(device)

            print(
                "Loss:",
                nn.CrossEntropyLoss(
                    weight=torch.FloatTensor(cc.loss_weights).to(device))(
                        output, targets_))
            output = (torch.sigmoid(output[:, 1, :, :]) > .5).float()
        decoded = self.decoder(output, unpool_indices, unpool_sizes)

        # return a probability map of the same size of each frame input to the model
        return decoded  # (NOTE: softmax is applied inside loss for efficiency)


# this won't work if not run in parent directory
if __name__ == '__main__':
    root = '/Users/nick/Desktop/train_set/clips/'
    subdirs = ['0601', '0531', '0313-1', '0313-2']
    flabels = [
        '/Users/nick/Desktop/train_set/label_data_0601.json',
        '/Users/nick/Desktop/train_set/label_data_0531.json',
        '/Users/nick/Desktop/train_set/label_data_0313.json'
    ]

    tu_dataset = TUSimpleDataset(root, subdirs, flabels, shuffle_seed=9)

    # build data loader
    tu_dataloader = DataLoader(tu_dataset,
                               batch_size=3,
                               shuffle=True,
                               num_workers=2)
    model = SegnetConvLSTM()
    for batch_no, (list_batched_samples,
                   batched_targets) in enumerate(tu_dataloader):
        with torch.no_grad():
            out = model(list_batched_samples)
            print(out.size())
        if batch_no == 1:
            break
Ejemplo n.º 5
0
def main(args):
    np.random.seed(config.RANDOM_SEED)
    torch.manual_seed(config.RANDOM_SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(config.RANDOM_SEED)
        torch.backends.cudnn.deterministic = True

    data_path = config.DATASET[args.data_name]
    train_path = os.path.join(data_path, "train.txt")
    valid_path = os.path.join(data_path, "valid.txt")
    test_path = os.path.join(data_path, "test.txt")
    vocab_path = os.path.join(data_path, "vocab.pkl")
    max_len_delta = 40 if "penn" in args.data_name else 20
    train = DataLoader(train_path, vocab_path, max_len_delta, args.mode)
    valid = DataLoader(valid_path, vocab_path, max_len_delta, args.mode)
    test = DataLoader(test_path, vocab_path, max_len_delta, args.mode)

    save_path = '{}-{}-{}'.format(args.save, args.data_name, time.strftime("%Y%m%d-%H%M%S"))
    save_path = os.path.join(config.CHECKPOINT_DIR, save_path)

    if not os.path.exists(save_path):
        os.mkdir(save_path)
    script_path = os.path.join(save_path, 'scripts')
    scripts_to_save = [
        'train_lm.py', 'models/language_models.py', 'models/base_network.py',
        'utils/data_utils.py', 'config.py']

    if not os.path.exists(script_path):
        os.mkdir(script_path)
        for script in scripts_to_save:
            dst_file = os.path.join(script_path, os.path.basename(script))
            shutil.copyfile(script, dst_file)

    def logging(s, print_=True, log_=True):
        if print_:
            print(s)
        if log_:
            with open(os.path.join(save_path, 'log.txt'), 'a+') as f_log:
                f_log.write(s + '\n')

    if "penn" in args.data_name:
        hparams = config.HPARAMS["penn"]
    else:
        hparams = config.HPARAMS[args.data_name]

    hparams["bsz1"] = args.bsz1
    hparams["small_bsz1"] = args.small_bsz1
    hparams["bsz2"] = args.bsz2
    hparams["contrastive1"] = args.contrastive1
    hparams["contrastive2"] = args.contrastive2
    hparams["contrastive2_rl"] = args.contrastive2_rl
    hparams["rml"] = args.rml

    kwargs = {
        "train": train,
        "valid": valid,
        "test": test,
        "save_path": save_path,
        "data": None,
        "hparams": hparams
    }

    logging(str(kwargs))

    lm = LanguageModel(**kwargs)
    lm.init()
    if args.checkpoint != 'NA':
        lm.load(args.checkpoint)
    try:
        val_epoch, val_loss, val_acc1, val_acc2 = lm.fit()
        logging("val epoch: {}".format(val_epoch))
        logging("val loss : {}".format(val_loss))
        logging("val ppl  : {}".format(np.exp(val_loss)))
        logging("val acc1 : {}".format(val_acc1))
        logging("val acc2 : {}".format(val_acc2))
    except KeyboardInterrupt:
        logging("Exiting from training early")

    lm.load(lm.save_path)
    test_loss, test_acc1, test_acc2 = lm.evaluate(lm.test_dataloader, 1, args.bsz2)

    logging("test loss: {}".format(test_loss))
    logging("test ppl : {}".format(np.exp(test_loss)))
    logging("test acc1: {}".format(test_acc1))
    logging("test acc2: {}".format(test_acc2))
Ejemplo n.º 6
0
epochs = cc.epochs
init_lr = cc.init_lr
batch_size = cc.batch_size
workers = cc.workers
momentum = cc.momentum
weight_decay = cc.weight_decay
hidden_dims = cc.hidden_dims
decoder_config = cc.decoder_config

# **DATA**

tu_tr_dataset = TUSimpleDataset(config.tr_root, config.tr_subdirs, config.tr_flabels, shuffle=False)#, shuffle_seed=9)
# tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs, config.ts_flabels, shuffle=False)#, shuffle_seed=9)

# build data loader
tu_train_dataloader = DataLoader(tu_tr_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
# tu_test_dataloader = DataLoader(tu_test_dataset, batch_size=cc.test_batch, shuffle=False, num_workers=4)


# **MODEL**
# output size must have dimension (B, C..), where C = number of classes
model = SegnetConvLSTM(hidden_dims, decoder_out_channels=2, lstm_nlayers=len(hidden_dims), vgg_decoder_config=decoder_config)
if cc.load_model:
    trainu.load_model_checkpoint(model, '../train-results/model.torch', inference=False, map_location=device)

model.to(device)

# define loss function (criterion) and optimizer
# using crossentropy for weighted loss on background and lane classes
criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([0.02, 1.02])).to(device)
# criterion = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([17.])).to(device)
Ejemplo n.º 7
0
def train(config):

    # Create a new DCGAN object
    dcgan = DCGAN(config, training=True)

    # Create a DataLoader utility object
    data_loader = DataLoader(config)

    # Adversarial ground truths
    valid = np.ones((config["batch_size"], 1))
    fake = np.zeros((config["batch_size"], 1))

    metrics = []

    for epoch in range(config["epochs"]):

        # Select a random batch of signals
        signals = data_loader.get_training_batch()

        # Generate latent noise for generator
        noise = dcgan.generate_noise(signals)

        # Generate a batch of new fake signals and evaluate them against the discriminator
        gen_signal = dcgan.generator.predict(noise)
        validated = dcgan.critic.predict(gen_signal)

        #Sample real and fake signals

        # ---------------------
        #  Calculate metrics
        # ---------------------

        # Calculate metrics on best fake data
        metrics_index = np.argmax(validated)

        #Calculate metrics on first fake data
        #metrics_index = 0

        generated = gen_signal[metrics_index].flatten()
        reference = signals[metrics_index].flatten()
        fft_metric, fft_ref, fft_gen = loss_fft(reference, generated)
        dtw_metric = dtw_distance(reference, generated)
        cc_metric = cross_correlation(reference, generated)

        # ---------------------
        #  Train Discriminator
        # ---------------------
        d_loss_real = dcgan.critic.model.train_on_batch(
            signals, valid)  #train on real data
        d_loss_fake = dcgan.critic.model.train_on_batch(
            gen_signal, fake)  #train on fake data
        d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)  #mean loss

        # ---------------------
        #  Train Generator
        # ---------------------

        g_loss = dcgan.combined.train_on_batch(noise,
                                               valid)  #train combined model

        # Plot the progress
        print(
            "%d [D loss: %f, acc: %f] [G loss: %f] [FFT Metric: %f] [DTW Metric: %f] [CC Metric: %f]"
            % (epoch, d_loss[0], d_loss[1], g_loss, fft_metric, dtw_metric,
               cc_metric[0]))
        metrics.append([[d_loss[0]], [g_loss], [fft_metric], [dtw_metric],
                        [cc_metric[0]]])

        # If at save interval => save generated image samples
        if epoch % config["sample_interval"] == 0:
            if config["save_sample"]:
                dcgan.save_sample(epoch, signals)

            if config["plot_losses"]:
                plot_losses(metrics, epoch)

            if config["save_models"]:
                dcgan.save_critic(epoch)
                dcgan.save_generator(epoch)

    dcgan.save_sample(epoch, signals)
    dcgan.save_critic()
    dcgan.save_generator()
    plot_losses(metrics, epoch)