예제 #1
0
def loader(args):
    if args.grouping:
        if args.random_sampling:
            train_loader = torch.utils.data.DataLoader(
                data.DATA2(args, mode='train'),
                batch_size=args.train_batch,
                num_workers=args.workers,
                shuffle=True)
            gallery_loader = torch.utils.data.DataLoader(
                data.DATA2(args, mode='gallery'),
                batch_size=40,
                num_workers=args.workers,
                shuffle=False)
            query_loader = torch.utils.data.DataLoader(
                data.DATA2(args, mode='query'),
                batch_size=args.test_batch,
                num_workers=args.workers,
                shuffle=False)

        else:
            train_loader = torch.utils.data.DataLoader(
                data.DATA(args, mode='train'),
                batch_size=args.train_batch,
                num_workers=args.workers,
                shuffle=True)
            gallery_loader = torch.utils.data.DataLoader(
                data.DATA(args, mode='gallery'),
                batch_size=40,
                num_workers=args.workers,
                shuffle=False)
            query_loader = torch.utils.data.DataLoader(
                data.DATA(args, mode='query'),
                batch_size=args.test_batch,
                num_workers=args.workers,
                shuffle=False)
    else:
        train_loader = torch.utils.data.DataLoader(data_ng.DATA(args,
                                                                mode='train'),
                                                   batch_size=args.train_batch,
                                                   num_workers=args.workers,
                                                   shuffle=True)
        gallery_loader = torch.utils.data.DataLoader(data_ng.DATA(
            args, mode='gallery'),
                                                     batch_size=40,
                                                     num_workers=args.workers,
                                                     shuffle=False)
        query_loader = torch.utils.data.DataLoader(data_ng.DATA(args,
                                                                mode='query'),
                                                   batch_size=args.test_batch,
                                                   num_workers=args.workers,
                                                   shuffle=False)

    return train_loader, gallery_loader, query_loader
예제 #2
0
def main(args):
    src_train_loader = torch.utils.data.DataLoader(data.DATA(
        args, mode='train', dataset=args.source_dataset),
                                                   batch_size=args.train_batch,
                                                   num_workers=args.workers,
                                                   shuffle=True)
    src_test_loader = torch.utils.data.DataLoader(data.DATA(
        args, mode='test', dataset=args.source_dataset),
                                                  batch_size=args.train_batch,
                                                  num_workers=args.workers,
                                                  shuffle=False)

    feature_extractor, label_predictor, adda_discriminator = models.ADDA(args)
    feature_extractor.cuda(), label_predictor.cuda(), adda_discriminator.cuda()
    optim = torch.optim.Adam(
        list(feature_extractor.parameters()) +
        list(label_predictor.parameters()))
    lr_schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(optim,
                                                             patience=1,
                                                             verbose=True)
    criterion = torch.nn.CrossEntropyLoss()

    best_accuracy = 0
    for epoch in range(1, args.epochs + 1):
        feature_extractor.train(), label_predictor.train()
        train_loss, train_accuracy = do_epoch(feature_extractor,
                                              label_predictor,
                                              src_train_loader,
                                              criterion,
                                              optim=optim)

        feature_extractor.eval(), label_predictor.eval()
        with torch.no_grad():
            val_loss, val_accuracy = do_epoch(feature_extractor,
                                              label_predictor,
                                              src_test_loader,
                                              criterion,
                                              optim=None)

        print(
            f'EPOCH {epoch:03d}: train_loss={train_loss:.4f}, train_accuracy={train_accuracy:.4f} '
            f'val_loss={val_loss:.4f}, val_accuracy={val_accuracy:.4f}')

        if val_accuracy > best_accuracy:
            print('Saving model...')
            best_accuracy = val_accuracy
            torch.save(
                feature_extractor.state_dict(),
                os.path.join(args.save_dir, 'source_feature_extractor.pt'))
            torch.save(label_predictor.state_dict(),
                       os.path.join(args.save_dir, 'label_predictor.pt'))

        lr_schedule.step(val_loss)
예제 #3
0
def train():
    with tf.Graph().as_default():

        global_step = tf.Variable(0, trainable=False)

        z_h, images_h = Holder()

        D_logits_real, D_logits_fake, D_logits_fake_for_G = \
          arch.inference(images_h, z_h)

        sampler = eval('arch.' + FLAGS.G_type)(z_h, reuse=True, bn_train=False)

        if FLAGS.loss == 'lsgan':
            G_loss, D_loss = arch.loss_l2(D_logits_real, D_logits_fake,
                                          D_logits_fake_for_G)
        else:
            G_loss, D_loss = arch.loss_sigmoid(D_logits_real, D_logits_fake,
                                               D_logits_fake_for_G)

        G_vars, D_vars = GetVars()

        G_train_op, D_train_op = arch.train(G_loss, D_loss, G_vars, D_vars,
                                            global_step)

        data_set = data.DATA(FLAGS.batch_size).load()

        sess = sess_init()

        tf.train.start_queue_runners(sess=sess)

        saver = tf.train.Saver()

        for step in xrange(0, FLAGS.max_steps):
            z_v, images_v = GenValsForHolder(data_set, sess)

            _, errD = sess.run([D_train_op, D_loss],
                               feed_dict={
                                   z_h: z_v,
                                   images_h: images_v
                               })

            _, errG = sess.run([G_train_op, G_loss], feed_dict={z_h: z_v})

            if step % 100 == 0:
                print "step = %d, errD = %f, errG = %f" % (step, errD, errG)

            if step % 1000 == 0:
                samples = sess.run(sampler, feed_dict={z_h: z_v})
                save_images(samples, [8, 8],
                            './samples/train_{:d}.png'.format(step))
                if step <= 10000:
                    save_images(images_v, [8, 8],
                                './samples_real/train_{:d}.png'.format(step))

            if step % 10000 == 0:
                saver.save(sess,
                           '{0}/{1}.model'.format(FLAGS.checkpoint_dir,
                                                  step), global_step)
예제 #4
0
def train():
    with tf.Graph().as_default():

        global_step = tf.Variable(0, trainable=False)

        z_h, images_h = Holder()

        D_logits_real, D_logits_fake, D_logits_fake_for_G = \
          arch.inference(images_h, z_h)

        sampler = eval('arch.' + FLAGS.G_type)(z_h, reuse=True, bn_train=False)

        G_loss, D_loss = arch.loss_l2(D_logits_real, D_logits_fake,
                                      D_logits_fake_for_G)

        G_vars, D_vars = GetVars()

        G_train_op, D_train_op = arch.train(G_loss, D_loss, G_vars, D_vars,
                                            global_step)

        data_set = data.DATA(FLAGS.batch_size).load()

        sess = sess_init()

        tf.train.start_queue_runners(sess=sess)

        saver = tf.train.Saver()
        checkpoint_dir = FLAGS.checkpoint_dir
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))

        for step in xrange(100):
            z_v, images_v = GenValsForHolder(data_set, sess)

            samples = sess.run(sampler, feed_dict={z_h: z_v})
            save_images(samples, [8, 8],
                        './samples/train_{:d}.png'.format(step))
예제 #5
0
    #         gts.append(gt)

    # gts, preds = np.concatenate(gts), np.concatenate(preds)
    
    # return mean_iou_score(preds, gts)

if __name__ == '__main__':
    
    args = parser.arg_parse()

    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)

    ''' prepare data_loader '''
    print('===> prepare data loader ...')
    test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='test'), # for TA checks my code
                                              batch_size=args.test_batch, 
                                              num_workers=args.workers,
                                              shuffle=True)
    # test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='val'),
    #                                           batch_size=args.train_batch, 
    #                                           num_workers=args.workers,
    #                                           shuffle=True)
    ''' prepare mode '''
    model = models.Baseline_model(args).cuda()
    # model = models.deeplabv3p(input_channel=3, num_class=9, output_stride=16).cuda()


    ''' resume save model '''
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint)
예제 #6
0
import data

import numpy as np
# import torch.nn as nn
# import torch.nn.functional as F
# import torch.optim as optim

from PIL import Image

if __name__ == '__main__':
    args = parser.arg_parse()
    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)
    ''' prepare data_loader '''
    print('===> prepare data loader ...')
    save_loader = torch.utils.data.DataLoader(data.DATA(args, mode='save'),
                                              batch_size=args.test_batch,
                                              num_workers=args.workers,
                                              shuffle=False)
    ''' prepare mode '''
    if args.model == 'Net':
        model = models.Net(args)
        model.cuda()  # load model to gpu
    if args.model == 'Net_improved':
        model = models.Net_improved(args)
        model.cuda()  # load model to gpu
    ''' resume save model '''
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint)

    model.eval()
예제 #7
0
    if bn:
        output = Normalize('Discriminator.BN4', [0, 2, 3], output)
    output = nonlinearity(output)

    output = tf.reshape(output, [-1, 4 * 4 * 8 * dim])
    output = lib.ops.linear.Linear('Discriminator.Output', 4 * 4 * 8 * dim, 1,
                                   output)

    lib.ops.conv2d.unset_weights_stdev()
    lib.ops.deconv2d.unset_weights_stdev()
    lib.ops.linear.unset_weights_stdev()

    return tf.reshape(output, [-1])


data_set = data.DATA(64).load()

Generator, Discriminator = GeneratorAndDiscriminator()

config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as session:

    all_real_data_conv = tf.placeholder(tf.int32,
                                        shape=[BATCH_SIZE, 3, 64, 64])
    if tf.__version__.startswith('1.'):
        split_real_data_conv = tf.split(all_real_data_conv, len(DEVICES))
    else:
        split_real_data_conv = tf.split(0, len(DEVICES), all_real_data_conv)
    gen_costs, disc_costs = [], []
예제 #8
0
"""

import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import data
import model
import config
import datetime

if __name__ == "__main__":
    with open(os.path.join(config.LOG_DIR, str(datetime.datetime.now().strftime("%Y%m%d")) + "_" + str(config.BATCH_SIZE) + "_" + str(config.NUM_EPOCHS) + ".txt"), "w") as log:
        log.write(str(datetime.datetime.now()) + "\n")
        log.write("Use Pretrained Weights: " + str(config.USE_PRETRAINED) + "\n")
        log.write("Pretrained Model: " + config.PRETRAINED + "\n")
        # READ DATA
        train_data = data.DATA(config.TRAIN_DIR)
        print("Train Data Loaded")
        # BUILD MODEL
        model = model.MODEL()
        print("Model Initialized")
        model.build()
        print("Model Built")
        # TRAIN MODEL
        model.train(train_data, log)
        print("Model Trained")
        # TEST MODEL
        test_data = data.DATA(config.TEST_DIR)
        print("Test Data Loaded")
        model.test(test_data, log)
        print("Image Reconstruction Done")
예제 #9
0
def save_model(model, save_path):
    torch.save(model.state_dict(),save_path)    

save_dir = 'model_dir'
random_seed = 0
gpu_id = 0
epochs = 100

if __name__=='__main__':
    torch.cuda.set_device(gpu_id)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    print('=====>Prepare dataloader ...')
    train_loader = torch.utils.data.DataLoader(data.DATA(mode='train'),
                                               batch_size=1,
                                               shuffle=True)
    print('=====>Prepare model ...')
    model = network_new.AutoEncoder()
    model.cuda()
    #optimizer and log writter
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) 
    writer = SummaryWriter(os.path.join(save_dir, 'train_info'))
    print('Start training ...')
    print('=====>Start training ...')
    iters = 0
    for epoch in range(1, epochs+1):     
        model.train()
        for idx, motion in enumerate(train_loader):
            train_info = 'Epoch: [{0}][{1}/{2}]'.format(epoch, idx+1, len(train_loader))
예제 #10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 17:56:05 2018

@author: rahul
"""

import data
import model
import config
import utilities


if __name__ == "__main__":
    # READ DATA
    train_data = data.DATA()
    train_data.build_dataset(config.TRAIN_FILENAME)
    # BUILD MODEL
    net = model.MODEL()
    net.build()
    # TRAIN MODEL
    net.train(train_data)
    # PLOT EMBEDDINGS
    utilities.plot_with_labels(net.embeddings, train_data)
예제 #11
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 14:52:40 2019

@author: ashima.garg
"""
#Classifying Names with a Character Level RNN

import data
import model
import config

if __name__ == "__main__":
    data_obj = data.DATA()
    data_obj.read()
    print("Train Data Loaded")
    # BUILD MODEL
    rnn = model.RNN(data_obj.n_letters, config.N_HIDDEN, data_obj.n_categories)
    modeloperator = model.Operator(rnn, config.LEARNING_RATE)
    # TRAIN MODEL
    modeloperator.train(data_obj)
    print("Model Trained")
    # TEST MODEL
    print("all categories: ", len(data_obj.categories))
    modeloperator.predict(data_obj, 'Dovesky')
    modeloperator.predict(data_obj, 'Satoshi')
    modeloperator.predict(data_obj, 'Jackson')
예제 #12
0
    args = parser.arg_parse()

    device = torch.device("cuda:" + str(args.gpu) if (
        torch.cuda.is_available()) else "cpu")
    '''create directory to save trained model and other info'''
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)
    ''' setup random seed '''
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    ''' load dataset and prepare data loader '''
    print('===> prepare dataloader ...')
    train_loader = torch.utils.data.DataLoader(data.DATA(args),
                                               batch_size=args.train_batch,
                                               num_workers=args.workers,
                                               shuffle=True)
    ''' load model '''
    print('===> prepare model ...')
    netG, netD = models.GAN(args)
    netG, netD = netG.cuda(), netD.cuda()
    ''' define loss '''
    adversarial_loss = nn.BCELoss()

    # Create batch of latent vectors that we will use to visualize
    #  the progression of the generator
    fixed_noise = torch.randn(args.samples_num, args.nz, 1, 1, device=device)

    # Establish convention for real and fake labels during training
예제 #13
0
def main(args):
    src_feature_extractor, label_predictor, adda_discriminator = models.ADDA(
        args)
    src_feature_extractor.cuda(), label_predictor.cuda(
    ), adda_discriminator.cuda()
    src_feature_extractor.load_state_dict(
        torch.load(os.path.join(args.src_model,
                                'source_feature_extractor.pt')))
    label_predictor.load_state_dict(
        torch.load(os.path.join(args.src_model, 'label_predictor.pt')))
    src_feature_extractor.eval(), label_predictor.eval()
    set_requires_grad(src_feature_extractor, requires_grad=False)
    set_requires_grad(label_predictor, requires_grad=False)

    tar_feature_extractor, _, _ = models.ADDA(args)
    tar_feature_extractor.cuda()
    tar_feature_extractor.load_state_dict(
        torch.load(os.path.join(args.src_model,
                                'source_feature_extractor.pt')))

    src_train_loader = torch.utils.data.DataLoader(data.DATA(
        args, mode='train', dataset=args.source_dataset),
                                                   batch_size=args.train_batch,
                                                   num_workers=args.workers,
                                                   shuffle=True)
    tar_train_loader = torch.utils.data.DataLoader(data.DATA(
        args, mode='train', dataset=args.target_dataset),
                                                   batch_size=args.train_batch,
                                                   num_workers=args.workers,
                                                   shuffle=True)
    tar_test_loader = torch.utils.data.DataLoader(data.DATA(
        args, mode='test', dataset=args.target_dataset),
                                                  batch_size=args.train_batch,
                                                  num_workers=args.workers,
                                                  shuffle=False)

    discriminator_optim = torch.optim.Adam(adda_discriminator.parameters(),
                                           lr=args.lr_adda_d)
    target_optim = torch.optim.Adam(tar_feature_extractor.parameters(),
                                    lr=args.lr_tar)
    criterion = nn.BCEWithLogitsLoss()

    best_acc = 0.0

    for epoch in range(1, args.epochs + 1):
        batch_iterator = zip(loop_iterable(src_train_loader),
                             loop_iterable(tar_train_loader))
        tar_feature_extractor.train(), adda_discriminator.train()

        total_loss = 0
        total_accuracy = 0
        for _ in range(args.iterations):
            # Train discriminator
            set_requires_grad(tar_feature_extractor, requires_grad=False)
            set_requires_grad(adda_discriminator, requires_grad=True)
            for _ in range(args.k_disc):
                (source_x, _), (target_x, _) = next(batch_iterator)
                source_x, target_x = source_x.cuda(), target_x.cuda()

                source_features = src_feature_extractor(source_x).view(
                    source_x.shape[0], -1)
                target_features = tar_feature_extractor(target_x).view(
                    target_x.shape[0], -1)

                discriminator_x = torch.cat([source_features, target_features])
                discriminator_y = torch.cat([
                    torch.ones(source_x.shape[0]).cuda(),
                    torch.zeros(target_x.shape[0]).cuda()
                ])

                preds = adda_discriminator(discriminator_x).squeeze()
                loss = criterion(preds, discriminator_y)

                discriminator_optim.zero_grad()
                loss.backward()
                discriminator_optim.step()

                total_loss += loss.item()
                total_accuracy += ((
                    preds >
                    0).long() == discriminator_y.long()).float().mean().item()

            # Train classifier
            set_requires_grad(tar_feature_extractor, requires_grad=True)
            set_requires_grad(adda_discriminator, requires_grad=False)
            for _ in range(args.k_clf):
                _, (target_x, _) = next(batch_iterator)
                target_x = target_x.cuda()
                target_features = tar_feature_extractor(target_x).view(
                    target_x.shape[0], -1)

                # flipped labels
                discriminator_y = torch.ones(target_x.shape[0]).cuda()

                preds = adda_discriminator(target_features).squeeze()
                loss = criterion(preds, discriminator_y)

                target_optim.zero_grad()
                loss.backward()
                target_optim.step()

        mean_loss = total_loss / (args.iterations * args.k_disc)
        mean_accuracy = total_accuracy / (args.iterations * args.k_disc)
        print(f'EPOCH {epoch:03d}: discriminator_loss={mean_loss:.4f}, '
              f'discriminator_accuracy={mean_accuracy:.4f}')

        # Create the full target model and save it
        acc = eval(tar_feature_extractor, label_predictor, tar_test_loader)
        print("accuracy: ", acc)
        if acc > best_acc:
            torch.save(tar_feature_extractor.state_dict(),
                       os.path.join(args.save_dir, 'tar_feature_extractor.pt'))
            best_acc = acc
예제 #14
0
    elif mode == "test":
        if not os.path.exists(args.predictions):
            os.makedirs(args.predictions)
        for idx in range(len(outputs_list)):
            img = Image.fromarray(outputs_list[idx])
            img_path = os.path.join(args.predictions,
                                    img_name_list[idx] + '.jpg')
            img.save(img_path)
        return 'Images saved'


if __name__ == "__main__":
    args = argparser.arg_parse()

    # Load test images
    test_dataset = data.DATA(mode="test", train_status="TA")
    dataloader_test = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size_test,
        shuffle=False,
        num_workers=argparser.n_cpu)

    # Set device
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")

    model = model.Net()
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint)

    if torch.cuda.is_available():
예제 #15
0
    transform = transforms.Compose([
        transforms.ToTensor(),
    ])

    train_loader = DataLoader(
        MNIST('.', train=True, download=False, transform=transform),
        batch_size=train_batch_size,
        shuffle=True)
    val_loader = DataLoader(
        MNIST('.', train=False, download=False, transform=transform),
        batch_size=train_batch_size,
        shuffle=True)
    input_n=784
elif opt.motion:
    ### Human Motion Data
    data = data.DATA("h3.6m_3d", "h3.6m/dataset/")
    timepoints = opt.timepoints
    out_of_distribution = data.get_dct_and_sequences(input_n=timepoints, output_n=0, sample_rate=opt.sample_rate, dct_n=10, out_of_distribution_action=None)
    train_loader, val_loader, OoD_val_loader, test_loader = data.get_dataloaders(train_batch=train_batch_size, test_batch=test_batch_size)
    #input_n=data.node_n*timepoints
    #input_n=96*timepoints
    input_n=[96, timepoints]
else:
    ### Human PoseData
    data = data.DATA("h3.6m_3d", "h3.6m/dataset/")
    out_of_distribution = data.get_poses(input_n=1, output_n=1, sample_rate=opt.sample_rate, dct_n=2, out_of_distribution_action=None)
    train_loader, val_loader, OoD_val_loader, test_loader = data.get_dataloaders(train_batch=train_batch_size, test_batch=test_batch_size)
    input_n=data.node_n
print(">>> data loaded !")
# ===============================================================
# Instantiate model, and methods used fro training and valdation
예제 #16
0
import models
from matplotlib import pyplot as plt
import data
import parser
import torch
#torch.multiprocessing.set_sharing_strategy('file_system')
import numpy as np
from sklearn.manifold import TSNE


args = parser.arg_parse()
tar_test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='test', dataset=args.target_dataset),
                                               batch_size=args.train_batch,
                                               num_workers=args.workers,
                                               shuffle=False)
src_test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='test', dataset=args.source_dataset),
                                               batch_size=args.train_batch,
                                               num_workers=args.workers,
                                               shuffle=False)

feature_extractor, label_predictor, domain_classifier = models.DANN(args)
feature_extractor.cuda(), label_predictor.cuda()
feature_extractor.load_state_dict(torch.load(args.resume_folder + 'feature_extractor.pth.tar'))
label_predictor.load_state_dict(torch.load(args.resume_folder + 'label_predictor.pth.tar'))
feature_extractor.eval(), label_predictor.eval()


tar_features, tar_labels, src_features, src_labels = [], [], [], []
with torch.no_grad(): # do not need to calculate information for gradient during eval
    for idx, (imgs, classes) in enumerate(tar_test_loader):
        imgs = imgs.cuda()
예제 #17
0
import model
import utils
import os
import tensorflow as tf

if __name__ == "__main__":
    # LOAD EMBEDDING
    word_to_index, index_to_word, word_to_vec, emb_matrix = utils.read_glove_vecs(
        os.path.join(config.EMBEDDING_DIR, config.EMBEDDING_PATH))
    print("Pretrained Embedding Loaded")

    #LOAD CONFIG
    train_config = config.TrainConfig()
    test_config = config.TestConfig()
    # LOAD DATA
    train_data = data.DATA(train_config)
    train_data.read_file(config.TRAIN_PATH, word_to_index)
    print("Train data Loaded")
    test_data = data.DATA(test_config)
    test_data.read_file(config.TEST_PATH, word_to_index)
    print("Test data Loaded")

    # BUILD MODEL
    #initializer = tf.random_uniform_initializer(train_config.init_scale, train_config.init_scale)
    with tf.name_scope("Train"):
        with tf.variable_scope("Model", reuse=None):
            train_model = model.MODEL(train_config,
                                      len(word_to_index),
                                      training=True)
            train_model.build()
예제 #18
0
    return padded_sequence, label, n_frames


args = parser.arg_parse()
'''create directory to save trained model and other info'''
if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)
''' setup GPU '''
torch.cuda.set_device(args.gpu)
''' setup random seed '''
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
''' load dataset and prepare data loader '''
print('===> prepare data ...')
data_loader = data.DATA(args, split='split0')

splits = ['split0', 'split1', 'split2', 'split3']
splits.remove(splits[splits.index(args.val_split)])

train_data = torch.utils.data.ConcatDataset([
    data.DATA(args, split=splits[0]),
    data.DATA(args, split=splits[1]),
    data.DATA(args, split=splits[2])
])
val_data = data.DATA(args, split=args.val_split)

train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=args.train_batch,
                                           num_workers=4,
                                           shuffle=True)
예제 #19
0
import resource

rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))

if __name__ == '__main__':

    args = parser.arg_parse()
    '''create directory to save trained model and other info'''
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)
    ''' load dataset and prepare data loader '''
    print('===> prepare dataloader ...')
    train_loader = torch.utils.data.DataLoader(data.DATA(args, mode='train'),
                                               batch_size=args.data_batch,
                                               num_workers=args.workers,
                                               shuffle=False)

    val_loader = torch.utils.data.DataLoader(data.DATA(args, mode='test'),
                                             batch_size=args.data_batch,
                                             num_workers=args.workers,
                                             shuffle=False)
    ''' load model '''
    print('===> prepare model ...')
    feature_extractor, FC = models.P1()
    ''' extract feature '''
    #print('===> extract features for every videos ...')
    #utils.extract_feature_p1(feature_extractor, train_loader, val_loader, args)
    ''' define loss '''
예제 #20
0
    os.makedirs(args.save_dir)


''' setup GPU '''
torch.cuda.set_device(args.gpu)


''' setup random seed '''
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)


''' load dataset and prepare data loader '''
print('===> prepare data ...')
data_loader = data.DATA(args, split='split0')

splits = ['split0', 'split1', 'split2', 'split3']
splits.remove(splits[splits.index(args.val_split)])

train_data = torch.utils.data.ConcatDataset([data.DATA(args, split=splits[0]),
                                             data.DATA(args, split=splits[1]),
                                             data.DATA(args, split=splits[2])])
val_data =  data.DATA(args, split=args.val_split)

train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=args.train_batch,
                                           num_workers=4,
                                           shuffle=True)

val_loader = torch.utils.data.DataLoader(val_data,
예제 #21
0
# -*- coding: utf-8 -*-
"""
Created on Thu Feb  8 20:00:55 2018

@author: rahul.ghosh
"""

import data
import model
import config

if __name__ == "__main__":
    # READ DATA
    data = data.DATA()
    data.read(config.TRAIN_FILENAME)
    # BUILD MODEL
    model = model.MODEL()
    model.build()
    # TRAIN MODEL
    model.train(data)
    # TEST MODEL
    data.read(config.TEST_FILENAME)
    model.test(data)
예제 #22
0
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 12 17:55:53 2019

@author: ashima.garg
"""
import os
import data
import model
import tensorflow as tf
import utils

if __name__ == "__main__":
    # READ DATA
    data_ = data.DATA()
    data_.read()
    print("Train Data Loaded")
    # BUILD MODEL
    model = model.MODEL()
    print("Model Initialized")

    with tf.variable_scope("siamese") as scope:
        model.output_1 = model.build(model.inputs_1)
        scope.reuse_variables()
        model.output_2 = model.build(model.inputs_2)

    print("Model Built")
    # TRAIN MODEL
    model.train(data_)
    print("Model Trained")
    # TEST MODEL
예제 #23
0

if __name__ == '__main__':

    args = parser.arg_parse()

    # get input and output directory
    input_dir = args.input_dir

    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)

    ''' prepare data_loader '''
    print('===> prepare data loader ...')
    if input_dir == "val_test":
        test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='val'),
                                                  batch_size=args.test_batch,
                                                  num_workers=args.workers,
                                                  shuffle=False)
    else:
        test_loader = torch.utils.data.DataLoader(data_test.DATA_TEST(args, mode='test'),
                                                  batch_size=args.test_batch,
                                                  num_workers=args.workers,
                                                  shuffle=False)
    ''' prepare mode '''
    if args.model == "simple_baseline":
        model = simple_baseline_model.SimpleBaselineModel(args).cuda()
    else:
        model = baseline_model.BaselineModel(args).cuda()

    ''' resume save model '''
예제 #24
0
파일: train.py 프로젝트: caleb-llh/dlhw4
def main():
    ''' setup '''
    torch.manual_seed(args.random_seed)
    ''' load dataset and prepare data loader '''
    print('===> prepare dataloader ...')
    train_set = data.DATA(args, mode='train')
    val_set = data.DATA(args, mode='val')
    test_set = data.DATA(args, mode='test')
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.train_batch,
                                               num_workers=args.workers,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.test_batch,
                                             num_workers=args.workers,
                                             shuffle=False)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.test_batch,
                                              num_workers=args.workers,
                                              shuffle=False)
    ''' load models '''
    print('===> prepare models ...')
    A = Net('A')
    B = Net('B')
    C = Net('C')
    models = {'A': A, 'B': B, 'C': C}
    ''' define loss '''
    criterion = nn.CrossEntropyLoss()

    best_acc = {'A': 0, 'B': 0, 'C': 0}
    best_epoch = {'A': 0, 'B': 0, 'C': 0}
    train_losses = {'A': [], 'B': [], 'C': []}
    val_losses = {'A': [], 'B': [], 'C': []}
    val_accs = {'A': [], 'B': [], 'C': []}
    test_losses = {'A': 0, 'B': 0, 'C': 0}
    test_accs = {'A': 0, 'B': 0, 'C': 0}
    ''' training and validation iterations'''
    for mode in ['A', 'B', 'C']:
        model = models[mode]
        ''' setup optimizer '''
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
        if torch.cuda.is_available():
            model.cuda()

        for epoch in range(args.epoch):
            ''' train model and get averaged epoch loss '''
            train_loss = train(mode, epoch, model, train_loader, criterion,
                               optimizer)
            ''' evaluate the model '''
            val_loss, val_acc = test(epoch, model, val_loader, criterion)
            train_losses[mode].append(train_loss)
            val_losses[mode].append(val_loss)
            val_accs[mode].append(val_acc)
            print(
                '\nMode: {} Epoch: [{}] TRAIN_LOSS: {} VAL_LOSS: {} VAL_ACC:{}'
                .format(mode, epoch, train_loss, val_loss, val_acc))
            ''' save best model '''
            if val_acc > best_acc[mode]:
                save_model(
                    model,
                    os.path.join(args.save_dir,
                                 'model_best_{}.pth.tar'.format(mode)))
                best_acc[mode] = val_acc
                best_epoch[mode] = epoch
        print("Mode: {} Best acc: {}, epoch {}".format(mode, best_acc[mode],
                                                       best_epoch[mode]))
    ''' testing (best model) '''
    for mode in ['A', 'B', 'C']:
        model = models[mode]
        if torch.cuda.is_available():
            model.load_state_dict(
                torch.load(
                    os.path.join(args.save_dir,
                                 'model_best_{}.pth.tar'.format(mode))))
            model.cuda()
        else:
            model.load_state_dict(
                torch.load(os.path.join(args.save_dir,
                                        'model_best_{}.pth.tar'.format(mode)),
                           map_location=torch.device('cpu')))

        test_loss, test_acc = test(0, model, test_loader, criterion)
        test_losses[mode] = test_loss
        test_accs[mode] = test_acc
        print('Mode: {} TEST_LOSS:{} TEST_ACC:{}'.format(
            mode, test_loss, test_acc))
    ''' save train/val/test information as pickle files'''
    with open(os.path.join(args.save_dir, 'train_losses.pkl'), 'wb') as f:
        pickle.dump(train_losses, f)
    with open(os.path.join(args.save_dir, 'val_losses.pkl'), 'wb') as f:
        pickle.dump(val_losses, f)
    with open(os.path.join(args.save_dir, 'val_accs.pkl'), 'wb') as f:
        pickle.dump(val_accs, f)
    with open(os.path.join(args.save_dir, 'test_losses.pkl'), 'wb') as f:
        pickle.dump(test_losses, f)
    with open(os.path.join(args.save_dir, 'test_accs.pkl'), 'wb') as f:
        pickle.dump(test_accs, f)