コード例 #1
0
def main():
    """
  Testing the convolutional example on the mnist dataset.
  """

    dataset = ConvMNIST(64)
    print(dataset.get_train().x.shape)

    inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls=None)
    targets = Value(type=tf.int64, shape=(None), cls=10)
    learning_rate = 0.0001

    fc_hidden = [1024, 500]
    c_h = [(3, 3, 1, 32), (3, 3, 32, 64)]
    conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)

    config = Config(inputs, targets, conv_hidden, learning_rate)

    network = ConvNetworkBuilder(config)
    hidden = FFConvHiddenBuilder()
    _ = network.build_network(hidden)

    train_config = TrainerConfig(epochs=EPOCHS,
                                 display_after=DISPLAY_STEP,
                                 keep_prob=KEEP_PROB,
                                 checkpoint_path=None,
                                 summary_path=None)

    trainer = Trainer(network, train_config)
    trainer.train(dataset)
コード例 #2
0
def main():
    """
  Testing the convolutional example on the mnist dataset.
  """

    dataset = RNNMNIST(BATCH_SIZE)
    print(dataset.get_train().y.shape)

    in_shape = (None, N_STEPS, N_INPUT)

    inputs = Value(type=tf.float32, shape=in_shape, cls=None)
    targets = Value(type=tf.int32, shape=(None), cls=10)

    fc_hidden = [500, 150]
    rnn_config = RNNHidden(rnn_weights=RNN_HIDDEN,
                           depth=1,
                           fc_weights=fc_hidden)
    config = Config(inputs, targets, rnn_config, LEARNING_RATE)

    network = ConvNetworkBuilder(config)
    hidden = SimpleRNNBuilder()
    _ = network.build_network(hidden)

    train_config = TrainerConfig(epochs=EPOCHS,
                                 display_after=DISPLAY_STEP,
                                 keep_prob=KEEP_PROB,
                                 checkpoint_path=None,
                                 summary_path=None)

    trainer = Trainer(network, train_config)
    trainer.train(dataset)
コード例 #3
0
ファイル: main.py プロジェクト: wonlee2019/EGAD
def start_exp(args):
    trainer = Trainer(dataset_dict[args.dataset], args.cuda)
    results = trainer.train_model(args)
    if not path.exists('results'):
        os.mkdir('results')
    f = open(
        "results/" + args.dataset + '_teacher_emb_' +
        str(args.teacher_embed_size) + '_teacher_heads_' +
        str(args.teacher_n_heads) + '_window_' + str(args.window) +
        '_student_emb_' + str(args.student_emb) + '_student_heads_' +
        str(args.student_heads) + '_distillation_' + str(args.distillation) +
        '.pkl', "wb")
    pkl.dump(results, f)
    f.close()
コード例 #4
0
 def get_trainer(self, config, model, optimizer, loss, train_loader,
                 test_loader):
     if self.trainer_id == 0:
         return Trainer(config, model, optimizer, loss, train_loader,
                        test_loader)
     else:
         raise NameError('Trainer id unknown {}'.format(self.trainer_id))
コード例 #5
0
def main():
    opt = TrainOptions().parse()

    # create dataloaders for each phase
    dataloaders = create_dataloader(opt)

    print("type of subset: ", type(dataloaders[0]))

    # Create model
    model = create_model(opt)
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    visualizer = Visualizer(
        opt)  # create a visualizer that display/save images and plots

    # initialize trainer
    trainer = Trainer(dataloaders, model, visualizer, opt)
    trainer.train()
コード例 #6
0
def main():
    args = get_args()
    m_config = process_config(args.config)

    config = tf.ConfigProto(log_device_placement=False)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        # create_dirs([config.summary_dir, config.checkpoint_dir])
        data_loader = SiameseDataLoader(config=m_config)
        model = ConvNet(data_loader=data_loader, config=m_config)
        logger = Logger(sess=sess, config=m_config)

        trainer = Trainer(sess=sess,
                          model=model,
                          config=m_config,
                          logger=logger,
                          data_loader=data_loader)

        trainer.train()
コード例 #7
0
ファイル: fftest.py プロジェクト: unwosu/ThinkDeep
def main():
    """
    Testing the feedforward framework on the mnist dataset.
  """
    dataset = MNIST(BATCH_SIZE)

    inputs = Value(type=tf.float32, shape=(None, 784), cls=None)
    targets = Value(type=tf.int64, shape=(None), cls=10)
    fc_hidden = FCHidden(weights=[300, 150])

    config = Config(inputs, targets, fc_hidden, LEARNING_RATE)

    network_builder = FFNetworkBuilder(config)
    hidden_builder = FFHiddenBuilder()
    _ = network_builder.build_network(hidden_builder)

    train_config = TrainerConfig(epochs=EPOCHS,
                                 display_after=DISPLAY_STEP,
                                 keep_prob=KEEP_PROB,
                                 checkpoint_path=None,
                                 summary_path=None)
    trainer = Trainer(network_builder, train_config)
    trainer.train(dataset)
コード例 #8
0
        all_gram_idx = torch.arange(
            self.n_gram).cuda() if use_cuda else torch.arange(self.n_gram)
        all_vocab_idx = torch.arange(
            self.vocab_size).cuda() if use_cuda else torch.arange(
                self.vocab_size)
        position_matrix = self.C(all_gram_idx).reshape(-1, self.embedding_dim,
                                                       self.embedding_dim)
        context_features = torch.tensordot(context_word_features,
                                           position_matrix)
        all_word = self.embeddings_word_output(all_vocab_idx)
        decoded = torch.mm(
            context_features,
            all_word.T) + self.embedding_bias(all_vocab_idx).view(-1)
        logits = F.log_softmax(decoded, dim=1)
        return logits


TEXT, train_iter, val_iter, test_iter = get_iter(args.batch_size)
model = LBLModel(TEXT=TEXT,
                 embedding_dim=args.embedding_dim,
                 batch_size=args.batch_size,
                 n_gram=args.n_gram)
if use_cuda:
    model.cuda()
trainer = Trainer(train_iter=train_iter,
                  val_iter=val_iter,
                  TEXT=TEXT,
                  lr=args.lr,
                  n_gram=args.n_gram)
trainer.train_model(model=model, num_epochs=args.num_epochs)
コード例 #9
0
            self.vocab_size).cuda() if use_cuda else torch.arange(
                self.vocab_size)

        context_word_boxes.data = torch.mean(context_word_boxes.data,
                                             dim=1).view(
                                                 -1, 1, 2, self.embedding_dim)

        all_word = self.embeddings_word(all_vocab_idx * self.n_gram)
        all_word.data = all_word.data.view(1, self.vocab_size, 2,
                                           self.embedding_dim)

        dec = all_word.intersection_log_soft_volume(context_word_boxes)
        decoded = dec + self.embedding_bias(all_vocab_idx).view(-1)
        logits = F.log_softmax(decoded, dim=1)
        return logits


TEXT, train_iter, val_iter, test_iter = get_iter(args.batch_size)
model = BoxModel(TEXT=TEXT,
                 embedding_dim=args.embedding_dim,
                 batch_size=args.batch_size,
                 n_gram=args.n_gram)
if use_cuda:
    model.cuda()
trainer = Trainer(train_iter=train_iter,
                  val_iter=val_iter,
                  TEXT=TEXT,
                  lr=args.lr,
                  n_gram=args.n_gram)
trainer.train_model(model=model, num_epochs=args.num_epochs)
コード例 #10
0
                          smoothing=cfg['model']['smoothing'])

testset = PixWiseDataset(root_dir=cfg['dataset']['root'],
                         csv_file=cfg['dataset']['test_set'],
                         map_size=cfg['model']['map_size'],
                         transform=test_transform,
                         smoothing=cfg['model']['smoothing'])

trainloader = torch.utils.data.DataLoader(
    dataset=trainset,
    batch_size=cfg['train']['batch_size'],
    shuffle=True,
    num_workers=0)

testloader = torch.utils.data.DataLoader(dataset=testset,
                                         batch_size=cfg['test']['batch_size'],
                                         shuffle=True,
                                         num_workers=0)

trainer = Trainer(cfg=cfg,
                  network=network,
                  optimizer=optimizer,
                  loss=loss,
                  lr_scheduler=None,
                  device=device,
                  trainloader=trainloader,
                  testloader=testloader,
                  writer=writer)

trainer.train()
writer.close()
コード例 #11
0
                                          position_matrix,
                                          dim=1)

        if args.sep_output:
            all_word = self.embeddings_word_output(all_vocab_idx)
        else:
            all_word = self.embeddings_word(all_vocab_idx)

        decoded = torch.mm(
            context_features,
            all_word.T) + self.embedding_bias(all_vocab_idx).view(-1)
        logits = F.log_softmax(decoded, dim=1)
        return logits


TEXT, train_iter, val_iter, test_iter = get_iter(args.batch_size, args.dataset)
model = LBLModel(TEXT=TEXT,
                 embedding_dim=args.embedding_dim,
                 batch_size=args.batch_size,
                 n_gram=args.n_gram)
if use_cuda:
    model.cuda()
trainer = Trainer(train_iter=train_iter,
                  val_iter=val_iter,
                  TEXT=TEXT,
                  lr=args.lr,
                  n_gram=args.n_gram)
trainer.train_model(model=model,
                    num_epochs=args.num_epochs,
                    path=wandb.run.dir)
コード例 #12
0
if args["multiG_overwrite"] is True:
    DICT_DIR_train = f"{DIR}/data/dict/{id2lang[0]}/{id2lang[0]}-{id2lang[1]}.0-5000.txt"
    DICT_DIR_test = f"{DIR}/data/dict/{id2lang[0]}/{id2lang[0]}-{id2lang[1]}.5000-6500.txt"
    KG1 = KG()
    KG2 = KG()
    KG1.load_data(dir=KG_DIR, id=0, emb_path=args["emb_ew1"])
    KG2.load_data(dir=KG_DIR, id=1, emb_path=args["emb_ew2"])
    multiG1 = multiG(KG1, KG2, id2lang[0], id2lang[1])
    multiG1.load_align(KG_DIR=KG_DIR)
    multiG1.load_bidict(DICT_DIR_train=DICT_DIR_train,
                        DICT_DIR_test=DICT_DIR_test)
    # multiG1.save(f"{DIR}/data/KG/{DATASET}/{id2lang[0]}_{id2lang[1]}")
else:
    multiG1 = multiG()
    multiG1.load(args["multiG"])

if args["restore"] == False:
    this_trainer = Trainer(multiG1, args["SG_corpus0"], args["SG_corpus1"],
                           args, args_SG)
else:
    this_model = torch.load(args["load_path"])
    this_trainer = Trainer(multiG1,
                           args["SG_corpus0"],
                           args["SG_corpus1"],
                           args,
                           args_SG,
                           this_model,
                           restore=True)

this_trainer.train_all()
コード例 #13
0
 def __init__(self):
     Trainer.__init__(self)
コード例 #14
0
from model.network import build_d
from model.vunet.vae_unet import *
import yaml
from trainer.Trainer import Trainer
import os
from generator.dataIter import DataIter

os.environ['CUDA_VISIBLE_DEVICE'] = '0'

if __name__ == '__main__':
    with open('config/config.yaml') as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
    vunet = Vunet()
    g_net = vunet.net
    d_net = build_d(config)

    g_net.summary()
    d_net.summary()

    dataIter = DataIter(
        '/media/newbot/software/Pose-Transfer-master/fashion_data/train/',
        '/media/newbot/software/Pose-Transfer-master/fashion_data/trainK/',
        '/media/newbot/software/Pose-Transfer-master/fashion_data/fasion-resize-pairs-train.csv',
        batch_size=config['batch_size'])

    trainer = Trainer(config, g_net, d_net, dataIter)
    if not os.path.exists(
            '/media/newbot/software/Pose-Transfer-master/fashion_data'):
        raise Exception
    trainer.train()