Пример #1
0
def main(args):
    with open(args.config_path, "r") as conf:
        config = yaml.load(conf, Loader=yaml.FullLoader)
    config["action"] = "evaluation"
    config["config_path"] = args.config_path
    config["load_from"] = args.load_from
    config["data_path"] = args.data_path
    config["resume_from"] = None
    # Overwrite values
    check_args(args)
    config["evaluation"]["post_processing"] = args.post_processing
    if args.confidence_threshold is not None:
        config["evaluation"][
            "confidence_threshold"] = args.confidence_threshold

    if args.save_csv_path is None:
        model_dir, _ = os.path.split(args.load_from)
        args.save_csv_path = os.path.join(model_dir, "prediction_results.csv")
    config["save_csv_path"] = args.save_csv_path

    # Initializer trainer
    trainer = Trainer(config)

    # Start training
    trainer.eval()
Пример #2
0
def train(args):
    tf.get_logger().setLevel(logging.ERROR)

    mnist = MNIST()
    stylealae = StyleMNIST()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    controller = LevelController(NUM_LAYERS, EPOCHS_PER_LEVEL)
    trainer = Trainer(summary_path, ckpt_path, callback=controller)
    trainer.train(
        stylealae,
        args.epochs,
        mnist.datasets(
            args.batch_size, padding=2, flatten=False),
        mnist.datasets(
            args.batch_size, padding=2, flatten=False, train=False),
        trainlen=len(mnist.x_train) // args.batch_size)

    return 0
Пример #3
0
    def _setup(self, config):

        # one iteration is five training epochs, one test epoch
        self.epochs = EPOCHS // TUNE_EPOCH_CHUNKS

        print(config)

        args = Namespace(**config)
        self.traindataloader, self.validdataloader = prepare_dataset(args)

        nclasses = self.traindataloader.dataset.nclasses
        seqlength = self.traindataloader.dataset.sequencelength
        input_dims = self.traindataloader.dataset.ndims

        self.model, self.optimizer = prepare_model_and_optimizer(
            args, input_dims, seqlength, nclasses)

        self.criterion = prepare_loss_criterion(args)

        if torch.cuda.is_available():
            self.model = self.model.cuda()

        if "model" in config.keys():
            config.pop('model', None)
        #trainer = Trainer(self.model, self.traindataloader, self.validdataloader, **databases)

        self.trainer = Trainer(self.model,
                               self.traindataloader,
                               self.validdataloader,
                               self.optimizer,
                               self.criterion,
                               store=args.local_dir,
                               test_every_n_epochs=999,
                               visdomlogger=None)
Пример #4
0
    def train_session(idx, train_index, val_index):
        os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
        train_list = [imgs[x] for x in train_index]
        val_list = [imgs[x] for x in val_index]
        train_loader = get_loader(train_list, config, 'train')
        val_loader = get_loader(val_list, config, 'val')
        model = models.__dict__[config['model']](pretrained=config['pretrain'])
        model, last_layer, feature_layer = finetune(model, config)
        model = torch.nn.DataParallel(model).cuda()
        criterion = nn.CrossEntropyLoss().cuda()
        optimizer = torch.optim.Adam([{
            'params': last_layer.parameters(),
            'lr': 1e-3
        }, {
            'params': feature_layer.parameters(),
            'lr': 1e-4
        }])
        trainer = Trainer(model,
                          optimizer,
                          criterion,
                          config,
                          train_loader,
                          val_loader,
                          regime=None)

        trainer.run()
Пример #5
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--name', type=str, default='')
    parser.add_argument('--lr', type=float, default=1.0e-3)
    parser.add_argument('--weight-decay', type=float, default=0.0005)
    parser.add_argument('--momentum',
                        type=float,
                        default=0.99,
                        help='momentum')
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--in', type=int, default=5)
    parser.add_argument('--out', type=int, default=1)
    parser.add_argument('--batchsize', type=int, default=4)
    args = vars(parser.parse_args())
    if not os.path.exists('./model_logs'):
        os.makedirs('./model_logs')
    save_dir = './model_logs/logs_' + args['name']
    config = {
        'DEVICE': torch.device(args['device']),
        'IN_LEN': int(args['in']),
        'OUT_LEN': int(args['out']),
        'BATCH_SIZE': int(args['batchsize']),
        'SCALE': 0.2,
        'TASK': 'reg',
        'DIM': '3D',
    }
    torch.cuda.manual_seed(1337)

    # 1. dataset

    data_loader = DataGenerator(data_path=global_config['DATA_PATH'],
                                config=config)

    # 2. model
    model = UNet3D(in_channels=1,
                   out_channels=1,
                   final_sigmoid=False,
                   layer_order='gcr',
                   is_segmentation=False,
                   num_levels=4,
                   pool_kernel_size=(1, 2, 2))
    model = torch.nn.DataParallel(model, device_ids=[0, 3])
    model = model.to(config['DEVICE'])

    weight_path = '/home/warit/fcn/experiments/unet3d/model_logs/logs_5_5_04140057/model_7500.pth'
    model.load_state_dict(torch.load(weight_path, map_location='cuda'))

    # 3. optimizer

    optim = torch.optim.Adam(model.parameters(), lr=args['lr'])

    #4. train
    trainer = Trainer(config=config,
                      model=model,
                      optimizer=optim,
                      data_loader=data_loader,
                      save_dir=save_dir)
    trainer.train()
Пример #6
0
    def test_Trainer_Conv1D_TwoPatterns(self):
        cleanup()

        try:
            traindataset = UCRDataset("TwoPatterns",
                                      partition="train",
                                      ratio=.75,
                                      randomstate=0,
                                      augment_data_noise=.1)
            validdataset = UCRDataset("TwoPatterns",
                                      partition="valid",
                                      ratio=.75,
                                      randomstate=0)
            nclasses = traindataset.nclasses
            traindataloader = torch.utils.data.DataLoader(traindataset,
                                                          batch_size=8,
                                                          shuffle=True,
                                                          num_workers=0,
                                                          pin_memory=True)

            validdataloader = torch.utils.data.DataLoader(validdataset,
                                                          batch_size=8,
                                                          shuffle=False,
                                                          num_workers=0,
                                                          pin_memory=True)

            model = ConvShapeletModel(num_layers=3,
                                      hidden_dims=50,
                                      ts_dim=1,
                                      n_classes=nclasses)

            if torch.cuda.is_available():
                model = model.cuda()

            config = dict(epochs=2,
                          learning_rate=1e-3,
                          earliness_factor=.75,
                          visdomenv="unittest",
                          switch_epoch=1,
                          loss_mode="loss_cross_entropy",
                          show_n_samples=0,
                          store="/tmp")

            trainer = Trainer(model, traindataloader, validdataloader,
                              **config)
            trainer.fit()

        except Exception as e:
            self.fail(logging.exception(e))

        self.assertEqual(trainer.get_phase(), EARLINESS_PHASE_NAME)

        # should have written two model files
        self.assertTrue(
            os.path.exists(
                "/tmp/model_{}.pth".format(CLASSIFICATION_PHASE_NAME)))
        self.assertTrue(
            os.path.exists("/tmp/model_{}.pth".format(EARLINESS_PHASE_NAME)))
Пример #7
0
def run_code2vec(dataset_path: str):

    dataset_save_path = prepare_dataset(dataset_path, "code2vec.txt")

    trainer = Trainer(dataset_save_path)
    trainer.train_model()
    trainer.evaluate_model()

    code.interact(local=locals())
    def __init__(self, sess, components, params):

        self.sess = sess
        self.params = params
        self.trainer = Trainer(self.sess, components, self.params)
        self.__init_session(components)
        self.validator = Validator(self.sess, components, params)
        self.validation_loss = MAX_LOSS
        self.validation_rate_step = params['validation_rate_step']
Пример #9
0
def load_trainer(config_path, load_from):
    with open(config_path, "r") as conf:
        config = yaml.load(conf, Loader=yaml.FullLoader)
    config["action"] = "predict"
    config["config_path"] = config_path
    config["load_from"] = load_from
    config["resume_from"] = None
    trainer = Trainer(config)
    return trainer
Пример #10
0
    def _setup(self, config):

        if config["dataset"] == "BavarianCrops":
            region = "HOLL_2018_MT_pilot"
            root = "/home/marc/data/BavarianCrops"
            nsamples = None
            traindataset = BavarianCropsDataset(root=root,
                                                region=region,
                                                partition="train",
                                                nsamples=nsamples)
            validdataset = BavarianCropsDataset(root=root,
                                                region=region,
                                                partition="valid",
                                                nsamples=nsamples)
        else:
            traindataset = UCRDataset(config["dataset"],
                                      partition="train",
                                      ratio=.8,
                                      randomstate=config["fold"],
                                      silent=False,
                                      augment_data_noise=0)

            validdataset = UCRDataset(config["dataset"],
                                      partition="valid",
                                      ratio=.8,
                                      randomstate=config["fold"],
                                      silent=False)

        nclasses = traindataset.nclasses

        self.epochs = config["epochs"]

        # handles multitxhreaded batching andconfig shuffling
        self.traindataloader = torch.utils.data.DataLoader(
            traindataset,
            batch_size=config["batchsize"],
            shuffle=True,
            num_workers=config["workers"],
            pin_memory=False)
        self.validdataloader = torch.utils.data.DataLoader(
            validdataset,
            batch_size=config["batchsize"],
            shuffle=False,
            num_workers=config["workers"],
            pin_memory=False)

        self.model = DualOutputRNN(input_dim=traindataset.ndims,
                                   nclasses=nclasses,
                                   hidden_dims=config["hidden_dims"],
                                   num_rnn_layers=config["num_layers"])

        if torch.cuda.is_available():
            self.model = self.model.cuda()

        self.trainer = Trainer(self.model, self.traindataloader,
                               self.validdataloader, **config)
Пример #11
0
def train(args, config):
    data_root = config['data_root']
    device = torch.device('cuda:0' if args.gpu else 'cpu')

    # get model
    model = PoseNet(config, device)

    # multi-GPU training
    if args.gpu:
        gpu_ids = list(range(torch.cuda.device_count()))
        model = WappedDataParallel(model,
                                   device_ids=gpu_ids,
                                   output_device=gpu_ids[0])
    model = model.to(device)

    # get dataset and set data loader
    transf = transforms.Compose([
        transforms.Resize((config['img_size'], config['img_size'])),
        transforms.ToTensor()
    ])

    train_data_loader = DataLoader(Dataset6Dof(data_root,
                                               n_class=config['n_class'],
                                               split='train',
                                               transform=transf),
                                   batch_size=(config['batch_size']),
                                   shuffle=True,
                                   num_workers=config['num_workers'],
                                   drop_last=True)

    val_data_loader = DataLoader(Dataset6Dof(data_root,
                                             n_class=config['n_class'],
                                             split='test',
                                             transform=transf),
                                 batch_size=config['batch_size'],
                                 shuffle=True,
                                 num_workers=config['num_workers'],
                                 drop_last=True)

    # get optimizer and trainer
    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(config['beta1'], config['beta2']),
                           eps=config['eps'])

    test_data = next(iter(val_data_loader))
    trainer = Trainer(config=config,
                      train_data_loader=train_data_loader,
                      val_data_loader=val_data_loader,
                      test_data=test_data,
                      device=device,
                      model=model,
                      optimizer=optimizer)
    trainer.train()
Пример #12
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--name', type=str, default='')
    parser.add_argument('--lr', type=float, default=1.0e-3)
    parser.add_argument('--weight-decay', type=float, default=0.0005)
    parser.add_argument('--momentum',
                        type=float,
                        default=0.99,
                        help='momentum')
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--in', type=int, default=5)
    parser.add_argument('--out', type=int, default=1)
    parser.add_argument('--batchsize', type=int, default=4)
    args = vars(parser.parse_args())

    if not os.path.exists('./model_logs'):
        os.makedirs('./model_logs')
    save_dir = './model_logs/logs_' + args['name']
    config = {
        'DEVICE': torch.device(args['device']),
        'IN_LEN': int(args['in']),
        'OUT_LEN': int(args['out']),
        'BATCH_SIZE': int(args['batchsize']),
        'SCALE': 0.25,
        'DIM': 'RR',
    }
    torch.cuda.manual_seed(1337)

    # 1. dataset

    data_loader = DataGenerator(data_path=global_config['DATA_PATH'],
                                config=config)

    # 2. model
    config['IN_HEIGHT'] = int(config['SCALE'] * global_config['DATA_HEIGHT'])
    config['IN_WIDTH'] = int(config['SCALE'] * global_config['DATA_WIDTH'])
    model = RRNet(config, 8)
    # model = torch.nn.DataParallel(model, device_ids=[0, 3])
    model = model.to(config['DEVICE'])

    # 3. optimizer

    optim = torch.optim.Adam(model.parameters(), lr=args['lr'])

    #4. train
    trainer = Trainer(config=config,
                      model=model,
                      optimizer=optim,
                      data_loader=data_loader,
                      save_dir=save_dir)
    trainer.train()
Пример #13
0
def main(args):
    with open(args.config_path, "r") as conf:
        config = yaml.load(conf, Loader=yaml.FullLoader)
    config["action"] = "training"
    config["resume_from"] = args.resume_from
    config["load_from"] = args.load_from
    config["config_path"] = args.config_path

    # Initializer trainer
    trainer = Trainer(config)

    # Start training
    trainer.train()
Пример #14
0
    def test_Trainer_TwoPatterns(self):

        try:
            traindataset = UCRDataset("TwoPatterns",
                                      partition="train",
                                      ratio=.75,
                                      randomstate=0,
                                      augment_data_noise=.1)
            validdataset = UCRDataset("TwoPatterns",
                                      partition="valid",
                                      ratio=.75,
                                      randomstate=0)
            nclasses = traindataset.nclasses
            traindataloader = torch.utils.data.DataLoader(traindataset,
                                                          batch_size=8,
                                                          shuffle=True,
                                                          num_workers=0,
                                                          pin_memory=True)

            validdataloader = torch.utils.data.DataLoader(validdataset,
                                                          batch_size=8,
                                                          shuffle=False,
                                                          num_workers=0,
                                                          pin_memory=True)

            model = DualOutputRNN(input_dim=1,
                                  nclasses=nclasses,
                                  hidden_dims=20,
                                  num_rnn_layers=1,
                                  dropout=.2)

            if torch.cuda.is_available():
                model = model.cuda()

            config = dict(
                epochs=2,
                learning_rate=1e-3,
                earliness_factor=.75,
                visdomenv="unittest",
                switch_epoch=1,
                loss_mode="twophase_linear_loss",
                show_n_samples=0,
                store="/tmp",
                overwrite=True,
            )

            trainer = Trainer(model, traindataloader, validdataloader,
                              **config)
            trainer.fit()
        except Exception as e:
            self.fail(logging.exception(e))
Пример #15
0
def main(params):
    logging.info("Loading the datasets...")
    train_iter, dev_iter, test_iterator, DE, EN = load_dataset(
        params.data_path, params.train_batch_size, params.dev_batch_size)
    de_size, en_size = len(DE.vocab), len(EN.vocab)
    logging.info("[DE Vocab Size]: {}, [EN Vocab Size]: {}".format(
        de_size, en_size))
    logging.info("- done.")

    params.src_vocab_size = de_size
    params.tgt_vocab_size = en_size
    params.sos_index = EN.vocab.stoi["<s>"]
    params.pad_token = EN.vocab.stoi["<pad>"]
    params.eos_index = EN.vocab.stoi["</s>"]
    params.itos = EN.vocab.itos
    params.SRC = DE
    params.TRG = EN

    # make the Seq2Seq model
    model = make_seq2seq_model(params)

    # default optimizer
    optimizer = optim.Adam(model.parameters(), lr=params.lr)

    if params.model_type == "Transformer":
        criterion = LabelSmoothingLoss(params.label_smoothing,
                                       params.tgt_vocab_size,
                                       params.pad_token).to(params.device)
        optimizer = ScheduledOptimizer(optimizer=optimizer,
                                       d_model=params.hidden_size,
                                       factor=2,
                                       n_warmup_steps=params.n_warmup_steps)
        scheduler = None
    else:
        criterion = nn.NLLLoss(reduction="sum", ignore_index=params.pad_token)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, patience=params.patience, factor=.1, verbose=True)

    # intialize the Trainer
    trainer = Trainer(model, optimizer, scheduler, criterion, train_iter,
                      dev_iter, params)

    if params.restore_file:
        restore_path = os.path.join(params.model_dir + "/checkpoints/",
                                    params.restore_file)
        logging.info("Restoring parameters from {}".format(restore_path))
        Trainer.load_checkpoint(model, restore_path, optimizer)

    # train the model
    trainer.train()
Пример #16
0
def run(args):
    np.random.seed(args.seed)
    random.seed(args.seed)

    train_data, train_data_label, test_data, test_data_label, label_dict = load_data(args.shared_nums)\
                                                                           if args.not_use_c else load_data_c(args.shared_nums)
    print(
        f"Feature nums:{train_data[0].shape[-1]}, Label nums:{len(label_dict.keys())}"
    )
    crf = LinearChainCRF(len(label_dict.keys()), train_data[0].shape[-1],
                         label_dict)

    trainer = Trainer(args.lr, args.batch_size, args.epoch, args.lamda)
    trainer.train(crf, train_data, train_data_label, test_data,
                  test_data_label)
Пример #17
0
def train(cfg, start_epoch):
    torch.manual_seed(cfg.SEED)
    device = torch.device('cuda' if cfg.GPU[0] >= 0 else 'cpu')
    if start_epoch == 1:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'w')
        train_log_title = "epoch,total_loss,hm_loss,wh_loss"
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'w')
        val_log_title = "epoch,precision,recall\n"
        if cfg.USE_OFFSET:
            train_log_title += ",offset_loss\n"
        else:
            train_log_title += "\n"
        train_log.write(train_log_title)
        train_log.flush()
        val_log.write(val_log_title)
        val_log.flush()
    else:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'a')
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'a')

    print('Creating model...')
    model = create_model(cfg, 'res_18')
    if start_epoch != 1:
        model = load_model(
            model, 'log/weights/model_epoch_{}.pth'.format(start_epoch - 1))
    optimizer = torch.optim.Adam(model.parameters(), cfg.LR)

    trainer = Trainer(cfg, model, optimizer)
    trainer.set_device(cfg.GPU, device)
    print('Setting up data...')
    train_loader = DataLoader(TrainCircleDataset(cfg),
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=True,
                              drop_last=True)
    val_loader = ValCircleDataset()
    print('Starting training...')
    epoch = start_epoch
    for epoch in range(start_epoch, start_epoch + cfg.NUM_EPOCHS):
        trainer.train(epoch, train_loader, train_log)
        model_path = os.path.join(cfg.WEIGHTS_DIR,
                                  'model_epoch_{}.pth'.format(epoch))
        save_model(model_path, epoch, model, optimizer)
        trainer.val(epoch, model_path, val_loader, val_log, cfg)

    save_model(os.path.join(cfg.WEIGHTS_DIR, 'model_last.pth'), epoch, model,
               optimizer)
Пример #18
0
    def _setup(self, config):

        traindataset = UCRDataset(config["dataset"],
                                  partition="train",
                                  ratio=.8,
                                  randomstate=config["fold"],
                                  silent=True,
                                  augment_data_noise=0)

        validdataset = UCRDataset(config["dataset"],
                                  partition="valid",
                                  ratio=.8,
                                  randomstate=config["fold"],
                                  silent=True)

        self.epochs = config["epochs"]

        nclasses = traindataset.nclasses

        # handles multitxhreaded batching andconfig shuffling
        self.traindataloader = torch.utils.data.DataLoader(
            traindataset,
            batch_size=config["batchsize"],
            shuffle=True,
            num_workers=config["workers"],
            pin_memory=False)
        self.validdataloader = torch.utils.data.DataLoader(
            validdataset,
            batch_size=config["batchsize"],
            shuffle=False,
            num_workers=config["workers"],
            pin_memory=False)

        self.model = ConvShapeletModel(
            num_layers=config["num_layers"],
            hidden_dims=config["hidden_dims"],
            ts_dim=1,
            n_classes=nclasses,
            use_time_as_feature=True,
            drop_probability=config["drop_probability"],
            scaleshapeletsize=False,
            shapelet_width_increment=config["shapelet_width_increment"])

        if torch.cuda.is_available():
            self.model = self.model.cuda()

        self.trainer = Trainer(self.model, self.traindataloader,
                               self.validdataloader, **config)
Пример #19
0
def main():
    # getting the customized configurations from the command-line arguments.
    args = KGEArgParser().get_args(sys.argv[1:])

    # Preparing data and cache the data for later usage
    knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling)
    knowledge_graph.prepare_data()

    # Extracting the corresponding model config and definition from Importer().
    config_def, model_def = Importer().import_model_config(args.model_name.lower())
    config = config_def(args=args)
    model = model_def(config)

    # Create, Compile and Train the model. While training, several evaluation will be performed.
    trainer = Trainer(model=model, debug=args.debug)
    trainer.build_model()
    trainer.train_model()
Пример #20
0
    def _setup(self, config):

        # one iteration is five training epochs, one test epoch
        self.epochs = RAY_TEST_EVERY

        print(config)

        args = Namespace(**config)
        self.traindataloader, self.validdataloader = prepare_dataset(args)

        args.nclasses = self.traindataloader.dataset.nclasses
        args.seqlength = self.traindataloader.dataset.sequencelength
        args.input_dims = self.traindataloader.dataset.ndims

        self.model = getModel(args)

        if torch.cuda.is_available():
            self.model = self.model.cuda()

        if "model" in config.keys():
            config.pop('model', None)
        #trainer = Trainer(self.model, self.traindataloader, self.validdataloader, **config)

        if args.experiment == "transformer":
            optimizer = ScheduledOptim(
                optim.Adam(filter(lambda x: x.requires_grad,
                                  self.model.parameters()),
                           betas=(0.9, 0.98),
                           eps=1e-09,
                           weight_decay=args.weight_decay,
                           lr=args.learning_rate), self.model.d_model,
                args.warmup)
        else:
            optimizer = optim.Adam(filter(lambda x: x.requires_grad,
                                          self.model.parameters()),
                                   betas=(0.9, 0.999),
                                   eps=1e-08,
                                   weight_decay=args.weight_decay,
                                   lr=args.learning_rate)

        self.trainer = Trainer(self.model,
                               self.traindataloader,
                               self.validdataloader,
                               optimizer=optimizer,
                               **config)
Пример #21
0
def train():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = get_transform((args.height, args.width), 0.5, 10,
                              (args.height, args.width), True, True)
    dataset = TrainKBatch(args.image_root, args.pkl_file, transform,
                          args.image_num, args.use_val)
    model = MGN(num_classes=len(dataset.pids), cnn=args.cnn, ckpt=args.ckpt)
    optimizer = optim.Adam(model.parameters(),
                           args.lr,
                           weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, args.lr_decay_epoch,
                                          args.gamma)
    celoss = CrossEntropyLoss()
    triloss = TripletLoss(margin=args.margin, mode=args.type).to(device)

    trainer = Trainer(args, dataset, model, optimizer, scheduler)

    def cal_loss(data, model):
        images, labels = data
        b, k, c, h, w = images.size()
        images, labels = images.view(-1, c, h,
                                     w).to(device), labels.view(-1).to(device)
        embs, logits = model(images)

        emb_loss = 0
        for emb in embs:
            emb_loss += triloss(emb, labels)[0]

        class_loss = 0
        for logit in logits:
            class_loss += celoss(logit, labels)[0]

        loss = emb_loss + class_loss

        log = ['loss', loss, 'classloss', class_loss, 'embloss', emb_loss]

        return loss, log

    trainer.train(cal_loss)
Пример #22
0
def main():

    batch_size = 10

    # generating our data
    train_inputs, train_targets = generate_data(1000, 2)
    test_inputs, test_targets = generate_data(1000, 2)

    # creating our loaders for training and test sets
    train_loader = DataLoader(train_inputs, train_targets, batch_size)
    test_loader = DataLoader(test_inputs, test_targets, batch_size)

    # defining our layers
    layers = [
        Linear(input_dim=train_inputs[0].shape[0], output_dim=25),
        Relu(),
        Linear(input_dim=25, output_dim=25),
        Relu(),
        Linear(input_dim=25, output_dim=2),
        Tanh()
    ]

    # creating our model
    model = Sequential(layers)

    # init our optimizer
    optimizer = SGD(model.get_params(), lr=0.01)

    # init our trainer
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      epochs=500,
                      loss=LossMSE(),
                      train_loader=train_loader,
                      test_loader=test_loader)

    # starting the training session
    trainer.train()

    return 0
Пример #23
0
def train(args):
    lsunbed = LsunBed(args.dataset)
    lsunbed_eval = LsunBed(args.evalset)
    stylealae = StyleLsunBed()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)

    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(args.ckptdir):
        os.makedirs(args.ckptdir)

    controller = LevelController(NUM_LAYERS, EPOCHS_PER_LEVEL)
    trainer = Trainer(summary_path, ckpt_path, args.ckpt_interval, controller)
    trainer.train(stylealae, args.epochs,
                  lsunbed.datasets(bsize=args.batch_size),
                  lsunbed_eval.datasets(bsize=args.batch_size),
                  lsunbed.count // args.batch_size)

    return 0
Пример #24
0
def main(config, modelParam):
    # create an instance of the model you want
    model = Model(config, modelParam)

    # create an instacne of the saver and resoterer class
    saveRestorer = SaverRestorer(config, modelParam)
    model        = saveRestorer.restore(model)

    # create your data generator
    dataLoader = DataLoaderWrapper(config, modelParam)

    # here you train your model
    if modelParam['inference'] == False:
        # create trainer and pass all the previous components to it
        trainer = Trainer(model, modelParam, config, dataLoader, saveRestorer)
        trainer.train()

    #plotImagesAndCaptions
    if modelParam['inference'] == True:
        plotImagesAndCaptions(model, modelParam, config, dataLoader)

    return
Пример #25
0
def train(args):
    mnist = MNIST()
    mlpalae = MnistAlae()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    trainer = Trainer(summary_path, ckpt_path)
    trainer.train(
        mlpalae,
        args.epochs,
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True),
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True, train=False),
        len(mnist.x_train) // args.batch_size)

    return 0
Пример #26
0
def train():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = get_transform((args.height, args.width), 0.5, 10,
                              (args.height, args.width), True, True)
    dataset = TrainKBatch(args.image_root, args.pkl_file, transform,
                          args.image_num, args.use_val)
    model = CNN(num_classes=len(dataset.pids),
                cnn=args.cnn,
                stride=args.stride)
    optimizer = optim.Adam(model.parameters(),
                           args.lr,
                           weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, args.lr_decay_epoch,
                                          args.gamma)
    celoss = CrossEntropyLoss()
    triloss = TripletLoss(margin=args.margin, mode=args.type).to(device)

    trainer = Trainer(args, dataset, model, optimizer, scheduler)

    def cal_loss(data, model):
        images, labels = data
        b, k, c, h, w = images.size()
        images, labels = images.view(-1, c, h,
                                     w).to(device), labels.view(-1).to(device)
        embs, logits = model(images)
        loss1, prec = celoss(logits, labels)
        loss2, top1 = triloss(embs, labels)
        loss = loss1 + loss2
        log = [
            'loss',
            loss.item(), 'classloss',
            loss1.item(), 'triloss',
            loss2.item(), 'top1', top1, 'prec', prec
        ]
        return loss, log

    trainer.train(cal_loss)
Пример #27
0
def train():
    device = torch.device('cuda' if cfg.GPU[0] >= 0 else 'cpu')

    start_epoch = 1
    if start_epoch == 1:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'w')
        train_log_title = "epoch,total_loss,classify_loss,angle_loss,iou_loss\n"
        train_log.write(train_log_title)
        train_log.flush()
    else:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'a')

    print('Creating model...')
    model = create_model()
    if start_epoch != 1:
        model = load_model(
            model, 'logs/weights/model_epoch_{}.pth'.format(start_epoch - 1))
    optimizer = torch.optim.Adam(model.parameters(), cfg.LR)

    trainer = Trainer(model, optimizer)
    trainer.set_device(device)
    print('Setting up data...')
    train_loader = DataLoader(LatexDataset(),
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=True,
                              drop_last=True)
    print('Starting training...')
    epoch = start_epoch
    for epoch in range(start_epoch, start_epoch + cfg.EPOCHS):
        trainer.train(epoch, train_loader, train_log)
        if epoch % 5 == 0:
            save_model('logs/weights/model_epoch_{}.pth'.format(epoch), epoch,
                       model)

    save_model(os.path.join(cfg.WEIGHTS_DIR, 'model_last.pth'), epoch, model)
Пример #28
0
def _create_trainer(config, model, optimizer, lr_scheduler, loss_criterion, eval_criterion, loaders, logger):
    assert 'trainer' in config, 'Could not find trainer configuration'
    trainer_config = config['trainer']

    resume = trainer_config.get('resume', None)
    pre_trained = trainer_config.get('pre_trained', None)
    validate_iters = trainer_config.get('validate_iters', None)

    inference_config = trainer_config.get('inference', None)

    if resume is not None:
        # continue training from a given checkpoint
        return Trainer.from_checkpoint(resume, model,
                                        optimizer, lr_scheduler, loss_criterion,
                                        eval_criterion, config['device'], loaders,
                                        logger=logger, inference_config = inference_config)
    elif pre_trained is not None:
        # fine-tune a given pre-trained model
        return Trainer.from_pretrained(pre_trained, model, optimizer, lr_scheduler, loss_criterion,
                                        eval_criterion, device=config['device'], loaders=loaders,
                                        max_num_epochs=trainer_config['epochs'],
                                        max_num_iterations=trainer_config['iters'],
                                        validate_after_iters=trainer_config['validate_after_iters'],
                                        log_after_iters=trainer_config['log_after_iters'],
                                        eval_score_higher_is_better=trainer_config['eval_score_higher_is_better'],
                                        logger=logger, validate_iters = validate_iters, inference_config = inference_config)
    else:
        # start training from scratch
        return Trainer(model, optimizer, lr_scheduler, loss_criterion, eval_criterion,
                            config['device'], loaders, trainer_config['checkpoint_dir'],
                            max_num_epochs=trainer_config['epochs'],
                            max_num_iterations=trainer_config['iters'],
                            validate_after_iters=trainer_config['validate_after_iters'],
                            log_after_iters=trainer_config['log_after_iters'],
                            eval_score_higher_is_better=trainer_config['eval_score_higher_is_better'],
                            logger=logger, validate_iters = validate_iters, inference_config = inference_config)
def main(config, modelParam):
    if config['network'] == 'CartPole_v1_image':
        env = EnvironmentWrapper_image(modelParam)
    else:
        env = EnvironmentWrapper(modelParam)

    # create an instance of the model you want
    model = Model(config, modelParam, env)

    # create an instacne of the saver and resoterer class
    saveRestorer = SaverRestorer(config, modelParam)
    model        = saveRestorer.restore(model)

    # here you train your model
    if modelParam['play'] == False:
        trainer = Trainer(model, modelParam, config, saveRestorer, env)
        trainer.train()

    #play
    if modelParam['play'] == True:
        player = Player(model, modelParam, config, saveRestorer, env)
        player.play_episode()

    return
Пример #30
0
def train(config):
    trainer = Trainer(config)
    trainer.run()