コード例 #1
0
ファイル: test_trainer.py プロジェクト: camjw/robot_pong
 def setUp(self):
     gameMock = mock.Mock()
     memoryMock = mock.Mock()
     networkMock = mock.Mock()
     competitorMock = mock.Mock()
     self.trainer = Trainer(gameMock, memoryMock, networkMock,
                            competitorMock, 1, 0, 0.5, 0.9, 0.9, 1.1)
コード例 #2
0
def main():
    params = vars(
        train_parse_args(hyperparameters_default=HYPERPARAMETERS,
                         paths_default=PATHS))

    if not params['disable_cuda'] and torch.cuda.is_available():
        params['device'] = torch.device('cuda:0')
    else:
        params['device'] = torch.device('cpu')

    print(torch.cuda.is_available(), params['device'])

    loaders = get_data_loaders(imgs_dir=params['imgs_dir'],
                               labels_filename=params['labels_filename'],
                               batch_size=params['batch_size'],
                               n_imgs=params['n_imgs'])

    model = ResNet()
    #model = SimpleClassifier()
    model.cuda()
    optimizer = Adam(model.parameters(), lr=3e-4)
    criterion = BCELoss()
    trainer = Trainer(params, model, optimizer, criterion)

    trainer.run(loaders)
コード例 #3
0
    def test_XOR_operation(self):
        network = Perceptron(input=2, hidden=[20], output=1)
        trainer = Trainer(network)
        error, epoch = trainer.XOR()
        self.assertTrue(error < 0.1)

        self.assertTrue(abs(sum(network.activate([0, 0])) - 0) < 0.1)
        self.assertTrue(abs(sum(network.activate([0, 1])) - 1) < 0.1)
        self.assertTrue(abs(sum(network.activate([1, 0])) - 1) < 0.1)
        self.assertTrue(abs(sum(network.activate([1, 1])) - 0) < 0.1)
コード例 #4
0
ファイル: ucn_trainer.py プロジェクト: zeta1999/open-ucn
    def __init__(
        self,
        config,
        data_loader,
        val_data_loader=None,
        test_data_loader=None,
    ):

        Trainer.__init__(self, config, data_loader, val_data_loader)
        self.best_val_metric = 'hit_ratio'
        self.train_max_iter = config.train_max_iter
        self.sift = cv2.xfeatures2d.SIFT_create()
コード例 #5
0
ファイル: test_speed.py プロジェクト: nghiattran/mentality
 def test_from_json(self):
     return
     fn = os.path.join(os.path.dirname(__file__), './test_network.json')
     settings = {
         'momentum': 0.99,
         'epoch': 5000,
         'log': 100,
         'error': 0.1,
         'rate': 0.2,
         'cost': Cost.SE
     }
     with open(fn) as data_file:
         network_json = json.load(data_file)
         network = Network.from_json(network_json)
         trainer = Trainer(network)
         start = time.time()
         error, epoch = trainer.XOR(settings)
         stop = time.time()
         print(stop - start)
コード例 #6
0
def main(args):
    logger = Logger(args.output_dir)
    args.logger = logger
    trainer = Trainer(args)
    evaluator = Evaluator(trainer)
    for i_epoch in range(0, args.epoch + 1):

        # train
        log_dict = {
            'i_epoch': i_epoch,
            'train_losses': [],  # per batch
            'test_bleus': []
        }  # per sample
        trainer.train_one_epoch(log_dict)

        # evaluation and logging
        logger.log('%d th epoch' % i_epoch)
        evaluator.bleu(log_dict)
        evaluator.sample_translation()
        log_dict_mean = {
            'i_epoch': log_dict['i_epoch'],
            'train_loss': np.mean(log_dict['train_losses']),
            'test_bleu': np.mean(log_dict['test_bleus'])
        }
        logger.dump(log_dict_mean)
        trainer.save_best(log_dict_mean)
        logger.log('-' * 10)
コード例 #7
0
def main(args):
    """
    Read config file and run the experiment.

    Parameters
    ----------
    config : str
        path to config JSON file.

    Returns
    -------
    None

    Notes
    -----
    See configs/example.json
    """
    with open(args.config, 'r') as f:
        config = json.load(f)

    mode = config['mode']

    trainer = Trainer(config)

    if mode == 'train':
        trainer.train()

    elif mode == 'test':
        trainer.test()

    return
コード例 #8
0
def main(_):

    prepare_dirs(config)

    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    model = JMOD2(config)
    trainer = Trainer(config, model, rng)

    if config.is_train:
        if config.resume_training:
            trainer.resume_training()
        else:
            trainer.train()
    else:
        trainer.test(showFigure=True)
コード例 #9
0
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  flatten=True,
                                                  one_hot_label=True)

# 28x28の画像を入力し、0-9の文字のいずれかを知りたい
network = MyNeuralNet(cross_entropy_error)

# レイヤを追加
network.add_layer(Affine(28 * 28, 50, "he"))
network.add_layer(BatchNormalization())
network.add_layer(Relu())
network.add_layer(Affine(50, 100, "he"))
network.add_layer(BatchNormalization())
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(BatchNormalization())
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(BatchNormalization())
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(BatchNormalization())
network.add_layer(Relu())
network.add_layer(Affine(100, 10, "he"))
network.add_layer(Softmax())

# 学習
trainer = Trainer(network, x_train, t_train, x_test, t_test)
trainer.train(lr=0.1, epoch_num=20, batch_size=100)
trainer.savefig("batch_normalization", "../data/batch_normalization.png")
コード例 #10
0
    parser.add_argument('--speed',
                        type=int,
                        default=defaultSpeed,
                        help=speedHelp)
    parser.add_argument('inmodel',
                        type=str,
                        default=defaultInput,
                        help=inputHelp)
    parser.add_argument('outmodel',
                        type=str,
                        default=defaultOutput,
                        help=outputHelp)
    args = parser.parse_args()

    # create a trainer
    trainer = Trainer(args.inmodel)

    # sleep - so keras and tensorflow will allow multiple models...
    time.sleep(1)

    # create an agent to train
    agent = Agent(args.outmodel + '/model-session{}.h5',
                  maxrecall=args.maxrecall)

    # sleep - so keras and tensorflow will allow multiple models...
    time.sleep(1)

    # Define environment/simulation
    sim = Drive(agent.width, agent.height, args.speed)

    # train in the environment
コード例 #11
0
def run_training(H):
    # torch.cuda.is_available = lambda : False
    # torch.backends.cudnn.enabled=False
    torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = True

    create_logger(H)

    random.seed(H.SEED)
    np.random.seed(H.SEED)
    torch.manual_seed(H.SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(H.SEED)
        torch.cuda.manual_seed_all(H.SEED)

    logger.info("Training start.")
    logger.info(repr(H))

    train_loader, valid_loader, vocab = create_data_pipelines(H)

    logger.info(train_loader.dataset)
    logger.info(valid_loader.dataset)

    m = Metric([('train_loss', np.inf), ('train_score', np.inf),
                ('valid_loss', np.inf), ('valid_score', 0), ('train_lr', 0),
                ('valid_cer', np.inf)])

    model = SpeechCNN(len(vocab),
                      input_size=256,
                      hidden_size=H.CNN_HIDDEN_SIZE,
                      dropout=H.CNN_DROPOUT,
                      initialize=torch_weight_init)
    if H.USE_CUDA:
        model.cuda()

    if H.PRELOAD_MODEL_PATH:
        path = os.path.join(H.EXPERIMENT, H.PRELOAD_MODEL_PATH)
        state = torch.load(path)
        model.load_state_dict(state)
        print("Preloaded model: {}".format(path))

    criterion = PytorchCTCLoss(vocab)

    optimizer = optim.SGD(list(
        filter(lambda p: p.requires_grad, model.parameters())),
                          lr=H.LR,
                          weight_decay=H.WEIGHT_DECAY,
                          momentum=H.MOMENTUM,
                          nesterov=H.NESTEROV)

    stopping = Stopping(model, patience=H.STOPPING_PATIENCE)

    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[H.LR_LAMBDA])

    ctc_decoder = CTCGreedyDecoder(vocab)

    scorer = Scorer(reduction='sum')

    tlogger = TensorboardLogger(root_dir=H.EXPERIMENT,
                                experiment_dir=H.TIMESTAMP)  # PytorchLogger()

    checkpoint = Checkpoint(model,
                            optimizer,
                            stopping,
                            m,
                            root_dir=H.EXPERIMENT,
                            experiment_dir=H.TIMESTAMP,
                            restore_from=-1,
                            interval=H.CHECKPOINT_INTERVAL,
                            verbose=0)

    trainer = Trainer(model, train_loader, optimizer, scheduler, criterion,
                      ctc_decoder, scorer, H.MAX_GRAD_NORM)

    evaluator = Evaluator(model, valid_loader, criterion, ctc_decoder, scorer)

    epoch_start = 1
    if H.CHECKPOINT_RESTORE:
        epoch_start = checkpoint.restore() + 1
        train_loader.batch_sampler.shuffle(epoch_start)

    epoch = epoch_start
    try:
        epoch_itr = tlogger.set_itr(range(epoch_start, H.MAX_EPOCHS + 1))

        for epoch in epoch_itr:

            with DelayedKeyboardInterrupt():

                m.train_loss, m.train_score, m.train_lr = trainer(epoch)

                m.valid_loss, m.valid_score = evaluator()

                if checkpoint:
                    checkpoint.step(epoch)

                stopping_flag = stopping.step(epoch, m.valid_loss,
                                              m.valid_score)

                epoch_itr.log_values(m.train_loss, m.train_score, m.train_lr,
                                     m.valid_loss, m.valid_score,
                                     stopping.best_score_epoch,
                                     stopping.best_score)

                if stopping_flag:
                    logger.info(
                        "Early stopping at epoch: %d, score %f" %
                        (stopping.best_score_epoch, stopping.best_score))
                    break

                train_loader.batch_sampler.shuffle(epoch)

    except KeyboardInterrupt:
        logger.info("Training interrupted at: {}".format(epoch))
        pass

    checkpoint.create(epoch)

    model.load_state_dict(stopping.best_score_state)
    torch.save(model.state_dict(),
               os.path.join(H.EXPERIMENT, H.MODEL_NAME + '.tar'))

    logger.info(repr(tlogger))
    logger.info(repr(stopping))
    logger.info(repr(checkpoint))

    logger.info("Training end.")
コード例 #12
0
from lib.my_neural_net import MyNeuralNet
from lib.layers import *
from lib.trainer import Trainer

# データセットのロード
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  flatten=True,
                                                  one_hot_label=True)

# 28x28の画像を入力し、0-9の文字のいずれかを知りたい
network = MyNeuralNet(cross_entropy_error)

# レイヤを追加
network.add_layer(Affine(28 * 28, 50, "he"))
network.add_layer(Relu())
network.add_layer(Affine(50, 100, "he"))
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(Relu())
network.add_layer(Affine(100, 100, "he"))
network.add_layer(Relu())
network.add_layer(Affine(100, 10, "he"))
network.add_layer(Softmax())

# 学習
trainer = Trainer(network, x_train, t_train, x_test, t_test)
trainer.train(lr=0.1, epoch_num=20, batch_size=100)
trainer.savefig("five_layer_network", "../data/five_layer_network.png")
コード例 #13
0
ファイル: tester.py プロジェクト: z80020100/OverlapPredator
 def __init__(self, args):
     Trainer.__init__(self, args)
コード例 #14
0
ファイル: test_trainer.py プロジェクト: camjw/robot_pong
class TrainerTest(unittest.TestCase):
    def setUp(self):
        gameMock = mock.Mock()
        memoryMock = mock.Mock()
        networkMock = mock.Mock()
        competitorMock = mock.Mock()
        self.trainer = Trainer(gameMock, memoryMock, networkMock,
                               competitorMock, 1, 0, 0.5, 0.9, 0.9, 1.1)

    def test_competitor_action(self):
        self.trainer.competitor.batch_prediction.return_value = [1, 0, 0]
        state = {
            'first': 0,
            'second': 1,
            'champion-paddle-y': 0,
            'ball-position-y': 0.5
        }
        self.assertEqual(self.trainer.competitor_action(state), -1)

    def test_calculate_reward_return(self):
        self.trainer.game.last_hit = 0
        self.trainer.game.collision = True
        state = {'score': 0, 'champion-paddle-y': 0, 'ball-position-y': 0.5}
        self.assertEqual(self.trainer.calculate_reward(state), 5)

    def test_calculate_reward_winner(self):
        self.trainer.game.last_hit = 1
        self.trainer.game.collision = False
        state = {'score': 1, 'champion-paddle-y': 0, 'ball-position-y': 0.5}
        self.assertEqual(self.trainer.calculate_reward(state), 5)

    def test_calculate_reward_negative(self):
        self.trainer.game.collision = False
        state = {'score': -100, 'champion-paddle-y': 0, 'ball-position-y': 0.5}
        self.assertEqual(self.trainer.calculate_reward(state), -5)

    def test_calculate_reward_neutral(self):
        self.trainer.game.collision = False
        state = {'score': 0, 'champion-paddle-y': 0, 'ball-position-y': 0.5}
        self.assertEqual(self.trainer.calculate_reward(state), 0)

    def test_calculate_reward_in_line(self):
        self.trainer.game.collision = False
        state = {'score': 0, 'champion-paddle-y': 0, 'ball-position-y': 0.05}
        self.assertEqual(self.trainer.calculate_reward(state), 0.1)

    def test_update_epsilon(self):
        initial_epsilon = self.trainer.epsilon
        self.trainer.total_steps += 1
        self.trainer.update_epsilon()
        after_epsilon = self.trainer.epsilon
        self.assertTrue(after_epsilon < initial_epsilon)

    def test_add_sample_done(self):
        memory = [{
            'x': 1,
            'y': 2,
            'z': 3
        }, {
            'x': 2,
            'y': 3,
            'z': 4
        }, 1, 2, True]
        self.trainer.add_sample(*memory)
        self.trainer.memory.add_memory.assert_called()

    def test_add_sample_not_done(self):
        memory = [{
            'x': 1,
            'y': 2,
            'z': 3
        }, {
            'x': 2,
            'y': 3,
            'z': 4
        }, 1, 2, False]
        self.trainer.add_sample(*memory)
        self.trainer.memory.add_memory.assert_called()

    def test_reward_predictions(self):
        self.trainer.champion.batch_prediction.return_value = [0.1, 0.8, 0.1]
        self.trainer.reward_predictions([1, 2, 3], [2, 3, 4])
        self.trainer.champion.batch_prediction.assert_called()

    def test_champion_action_random(self):
        self.trainer.epsilon = 2
        self.trainer.game.POSSIBLE_MOVES = ['a', 'b', 'c']
        self.assertTrue(
            self.trainer.champion_action({'x': 1}) in
            self.trainer.game.POSSIBLE_MOVES)

    def test_champion_action_prediction(self):
        self.trainer.epsilon = 0
        self.trainer.champion.batch_prediction.return_value = np.array(
            [1, 0, 0])
        self.trainer.champion_action({'x': 1, 'y': 2, 'z': 3})
        self.trainer.champion.batch_prediction.assert_called()

    def test_run_game(self):
        self.trainer.train_model = lambda: None
        self.trainer.update_epsilon = lambda: None
        self.trainer.add_sample = lambda a, b, c, d, e: None
        self.trainer.game.return_champion_state = lambda: {
            'x': 1,
            'y': 2,
            'z': 3,
            'champion-paddle-y': 0,
            'ball-position-y': 0.5
        }
        self.trainer.game.return_competitor_state = lambda: None
        self.trainer.calculate_reward = lambda x: None
        self.trainer.champion_action = lambda x: None
        self.trainer.competitor_action = lambda x: None
        self.trainer.game.reset_game = lambda: None
        self.trainer.game.game_over = True
        self.trainer.game.step = lambda a, b: None
        self.trainer.memory.buffer = [1]
        self.trainer.run_game()
        self.assertEqual(
            [self.trainer.total_steps, self.trainer.current_score], [1, 0])

    def test_test_game(self):
        self.trainer.train_model = lambda: None
        self.trainer.game.return_champion_state = lambda: {
            'x': 1,
            'y': 2,
            'z': 3,
            'score': 1,
            'champion-paddle-y': 0,
            'ball-position-y': 0.5
        }
        self.trainer.game.return_competitor_state = lambda: None
        self.trainer.champion_action = lambda x: None
        self.trainer.competitor_action = lambda x: None
        self.trainer.game.reset_game = lambda: None
        self.trainer.game.game_over = True
        self.trainer.game.step = lambda a, b: None
        self.assertEqual(self.trainer.test_game(), 1)

    def test_train_model(self):
        self.trainer.memory.sample_memory.return_value = [[1, 2, 3, 0]]
        self.trainer.reward_predictions = lambda x, y: [[[1]], [[1]]]
        self.trainer.champion.no_inputs = self.trainer.champion.no_actions = 1
        self.trainer.champion.batch_train.return_value = [1]
        self.trainer.train_model()
        self.trainer.champion.batch_train.assert_called()

    def test_train_model_done(self):
        self.trainer.memory.sample_memory.return_value = [[1, None, 3, 0]]
        self.trainer.reward_predictions = lambda x, y: [[[1]], [[1]]]
        self.trainer.champion.no_inputs = self.trainer.champion.no_actions = 1
        self.trainer.champion.batch_train.return_value = [1]
        self.trainer.train_model()
        self.trainer.champion.batch_train.assert_called()
コード例 #15
0
def run_training(H):
    # torch.cuda.is_available = lambda : False
    # torch.backends.cudnn.enabled=False
    torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = True

    create_logger(H)

    random.seed(H.SEED)
    np.random.seed(H.SEED)
    torch.manual_seed(H.SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(H.SEED)
        torch.cuda.manual_seed_all(H.SEED)

    logger.info("Training start.")
    logger.info(repr(H))

    train_loader, valid_loader, vocab = create_data_pipelines(H)

    logger.info(train_loader.dataset)
    logger.info(valid_loader.dataset)

    m = Metric([('train_loss', np.inf), ('train_score', np.inf),
                ('valid_loss', np.inf), ('valid_score', 0), ('train_lr', 0),
                ('valid_cer', np.inf)])

    model = NeuralSpeechRecognizer(
        vocab,
        train_loader.dataset.max_seq_length,
        rnn_hidden_size=H.RNN_HIDDEN_SIZE,
        rnn_num_layers=H.RNN_NUM_LAYERS,
        rnn_dropout=H.RNN_DROPOUT,
        cnn_dropout=H.CNN_DROPOUT,
        teacher_forcing_ratio=H.TEACHER_FORCING_RATIO,
        sample_rate=H.AUDIO_SAMPLE_RATE,
        window_size=H.SPECT_WINDOW_SIZE,
        initialize=torch_weight_init)
    if H.USE_CUDA:
        model.cuda()

    logging.info(model_summary(model, line_length=100))

    if H.PRELOAD_MODEL_PATH:
        path = os.path.join(H.EXPERIMENT, H.PRELOAD_MODEL_PATH)
        state = torch.load(path)
        model.load_state_dict(state)
        logging.info("Preloaded model: {}".format(path))

    criterion = LabelSmoothingLoss(padding_idx=0,
                                   label_smoothing=H.LABEL_SMOOTHING)

    sts_decoder = STSDecoder(vocab)

    scorer = Scorer()

    optimizer = optim.Adam(list(
        filter(lambda p: p.requires_grad, model.parameters())),
                           amsgrad=False,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           lr=H.LR,
                           weight_decay=H.WEIGHT_DECAY)

    stopping = Stopping(model, patience=H.STOPPING_PATIENCE)

    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[H.LR_LAMBDA])

    tlogger = TensorboardLogger(root_dir=H.EXPERIMENT,
                                experiment_dir=H.TIMESTAMP)  # PytorchLogger()

    checkpoint = Checkpoint(model,
                            optimizer,
                            stopping,
                            m,
                            root_dir=H.EXPERIMENT,
                            experiment_dir=H.TIMESTAMP,
                            restore_from=-1,
                            interval=H.CHECKPOINT_INTERVAL,
                            verbose=0)

    trainer = Trainer(model, train_loader, optimizer, scheduler, criterion,
                      sts_decoder, scorer, H.MAX_GRAD_NORM)

    evaluator = Evaluator(model, valid_loader, criterion, sts_decoder, scorer)

    epoch_start = 1
    if H.CHECKPOINT_RESTORE:
        epoch_start = checkpoint.restore() + 1
        train_loader.batch_sampler.shuffle(epoch_start)

    epoch = epoch_start
    try:
        epoch_itr = tlogger.set_itr(range(epoch_start, H.MAX_EPOCHS + 1))

        for epoch in epoch_itr:

            with DelayedKeyboardInterrupt():

                m.train_loss, m.train_score, m.train_lr = trainer(epoch)

                m.valid_loss, m.valid_score = evaluator()

                if checkpoint:
                    checkpoint.step(epoch)

                stopping_flag = stopping.step(epoch, m.valid_loss,
                                              m.valid_score)

                epoch_itr.log_values(m.train_loss, m.train_score, m.train_lr,
                                     m.valid_loss, m.valid_score,
                                     stopping.best_score_epoch,
                                     stopping.best_score)

                if stopping_flag:
                    logger.info(
                        "Early stopping at epoch: %d, score %f" %
                        (stopping.best_score_epoch, stopping.best_score))
                    break

                train_loader.batch_sampler.shuffle(epoch)

    except KeyboardInterrupt:
        logger.info("Training interrupted at: {}".format(epoch))
        pass

    checkpoint.create(epoch)

    model.load_state_dict(stopping.best_score_state)
    torch.save(model.state_dict(),
               os.path.join(H.EXPERIMENT, H.MODEL_NAME + '.tar'))

    logger.info(repr(tlogger))
    logger.info(repr(stopping))
    logger.info(repr(checkpoint))

    logger.info("Training end.")
コード例 #16
0
ファイル: run_training.py プロジェクト: camjw/robot_pong
def main():
    memory_bank = Memory(MEMORY_SIZE)
    pong_game = Game(GAME_LENGTH, GAME_STEP_TIME)

    champion = Network(3,
                       7,
                       hidden_layer_size=HIDDEN_LAYER_SIZE,
                       no_hidden_layers=NO_HIDDEN_LAYERS,
                       learning_rate=LEARNING_RATE)
    competitor = Network(3,
                         7,
                         hidden_layer_size=HIDDEN_LAYER_SIZE,
                         no_hidden_layers=NO_HIDDEN_LAYERS)

    trainer = Trainer(pong_game,
                      memory_bank,
                      champion,
                      competitor,
                      MAX_EPSILON,
                      MIN_EPSILON,
                      EPSILON_DECAY,
                      GAMMA,
                      RETURNS_DECAY,
                      WINNERS_GROWTH,
                      batch_size=BATCH_SIZE)

    champion.save_network(DIRECTORY + '/version_' + str(STARTING_VERSION))

    for version in range(STARTING_VERSION,
                         STARTING_VERSION + NUMBER_OF_TRAINING_SESSIONS):

        start_time = time.time()
        for _ in range(GAMES_PER_TRAINING_SESSION):
            print('New game')
            trainer.run_game()
            trainer.game = Game(GAME_LENGTH, GAME_STEP_TIME)

        print("Time taken for training session: ", time.time() - start_time)
        champion.save_network(DIRECTORY + '/version_' + str(version + 1))

        current_epsilon = trainer.epsilon
        current_returns_parameter = trainer.returns_parameter
        current_winners_parameter = trainer.winners_parameter
        trainer = Trainer(Game(GAME_LENGTH, GAME_STEP_TIME),
                          memory_bank,
                          champion,
                          competitor,
                          current_epsilon,
                          MIN_EPSILON,
                          EPSILON_DECAY,
                          GAMMA,
                          RETURNS_DECAY,
                          WINNERS_GROWTH,
                          returns_parameter=current_returns_parameter,
                          winners_parameter=current_winners_parameter,
                          batch_size=BATCH_SIZE)
        test_score = trainer.test_game()

        if test_score < 0:
            print('Competitor wins, score was ' + str(test_score))
            competitor.save_network(DIRECTORY + '/competitor_save')
            champion.load_network(DIRECTORY + '/competitor_save')
        else:
            print('Champion continues, score was ' + str(test_score))

        new_competitor_version = random.randint(max(0, version - 5), version)
        print('New competitor version: ' + str(new_competitor_version))

        competitor.load_network(DIRECTORY + '/version_' +
                                str(new_competitor_version))

        current_epsilon = trainer.epsilon
        print('epsilon is ' + str(current_epsilon))
        current_returns_parameter = trainer.returns_parameter
        current_winners_parameter = trainer.winners_parameter
        trainer = Trainer(Game(GAME_LENGTH, GAME_STEP_TIME),
                          memory_bank,
                          champion,
                          competitor,
                          current_epsilon,
                          MIN_EPSILON,
                          EPSILON_DECAY,
                          GAMMA,
                          RETURNS_DECAY,
                          WINNERS_GROWTH,
                          returns_parameter=current_returns_parameter,
                          winners_parameter=current_winners_parameter,
                          batch_size=BATCH_SIZE)
コード例 #17
0
ファイル: train_eval.py プロジェクト: tmaone/s3vdc
# train data_sources can not be empty
if ds_train.is_empty:
    raise RuntimeError("training dataset can not be empty!")

# evaluation dataset can be empty, yet a warning will be given
ds_eval = DataSource(
    _tf_feature_cols=tf_feature_cols, _ds_class="eval", _dsutil=data_utils
)
if ds_eval.is_empty:
    ds_eval = DataSource(
        _tf_feature_cols=tf_feature_cols,
        _ds_class="train",
        _dsutil=data_utils,
        force_test=True,
    )
    tf.logging.warn(
        "evaluation dataset is empty; will use training dataset for evaluation instead!"
    )

_handle = Trainer(
    model_fn=resolve_model_type(conf_parser.model_type),
    train_data_source=ds_train,
    eval_data_source=ds_eval,
    hyper_params=conf_parser.model_params,
    model_dir=model_dir,
    date_time_str=date_time_str,
)
_result = _handle.run()

tf.logging.info(_result)
コード例 #18
0
network.add_layer(Pooling(3, 3))

network.add_layer(conv3)
network.add_layer(Relu())
network.add_layer(LRN())
network.add_layer(Pooling(3, 3))

network.add_layer(conv4)
network.add_layer(Relu())
network.add_layer(LRN())
network.add_layer(Pooling(3, 3))

network.add_layer(conv5)
network.add_layer(Relu())
network.add_layer(LRN())
network.add_layer(Pooling(3, 3))

network.add_layer(affine1)
network.add_layer(Relu())

network.add_layer(affine2)
network.add_layer(Softmax())

# 学習
os_name = subprocess.check_output("uname").decode("utf-8").strip()
plot = os_name != "Linux"

trainer = Trainer(network, x_train, t_train, x_test, t_test)
trainer.train(lr=0.1, epoch_num=20, batch_size=100, plot=plot, for_cnn=True)
trainer.savefig("aleexnet", "../data/alexnet.png")
コード例 #19
0
ファイル: train.py プロジェクト: xiekun2019/MLCReID
def main():
    args = parse_args()
    update_config(args.cfg_file)

    if args.gpus:
        config.GPUS = args.gpus
    else:
        config.CUDA = False
    if args.workers:
        config.WORKERS = args.workers
    print('Using config:')
    pprint.pprint(config)

    if args.manualSeed is None:
        args.manualSeed = random.randint(1, 10000)
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if config.CUDA:
        torch.cuda.manual_seed_all(args.manualSeed)

    torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    if config.CUDA:
        os.environ["CUDA_VISIBLE_DEVICES"] = config.GPUS
    device = torch.device('cuda' if config.CUDA else 'cpu')

    # Redirect print to both console and log file
    sys.stdout = Logger(osp.join(config.OUTPUT_DIR, 'log.txt'))

    # Create data loaders
    dataset = DataSet(config.DATASET.ROOT, config.DATASET.DATASET)
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_transformer = T.Compose([
        T.RandomSizedRectCrop(*config.MODEL.IMAGE_SIZE),
        T.RandomHorizontalFlip(),
        T.RandomRotation(10),
        T.ColorJitter(0.2, 0.2, 0.2),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(EPSILON=config.DATASET.RE),
    ])
    test_transformer = T.Compose([
        T.Resize(config.MODEL.IMAGE_SIZE, interpolation=3),
        T.ToTensor(),
        normalizer,
    ])
    train_loader = DataLoader(UnsupervisedCamStylePreprocessor(
        dataset.train,
        root=osp.join(dataset.images_dir, dataset.train_path),
        camstyle_root=osp.join(dataset.images_dir,
                               dataset.train_camstyle_path),
        num_cam=dataset.num_cam,
        use_gan=True,
        transform=train_transformer),
                              batch_size=config.TRAIN.BATCH_SIZE,
                              num_workers=config.WORKERS,
                              shuffle=config.TRAIN.SHUFFLE,
                              pin_memory=True,
                              drop_last=False)

    query_loader = DataLoader(Preprocessor(dataset.query,
                                           root=osp.join(
                                               dataset.images_dir,
                                               dataset.query_path),
                                           transform=test_transformer),
                              batch_size=config.TEST.BATCH_SIZE,
                              num_workers=config.WORKERS,
                              shuffle=False,
                              pin_memory=True)

    gallery_loader = DataLoader(Preprocessor(dataset.gallery,
                                             root=osp.join(
                                                 dataset.images_dir,
                                                 dataset.gallery_path),
                                             transform=test_transformer),
                                batch_size=config.TEST.BATCH_SIZE,
                                num_workers=config.WORKERS,
                                shuffle=False,
                                pin_memory=True)

    # Create model
    model = models.create(config.MODEL.NAME,
                          pretrained=config.MODEL.PRETRAINED,
                          num_classes=dataset.num_train_ids)

    # Memory Network
    num_tgt = len(dataset.train)
    memory = models.create('memory', config.MODEL.FEATURES, num_tgt)

    # Load from checkpoint
    if config.TRAIN.RESUME:
        checkpoint = load_checkpoint(config.TRAIN.CHECKPOINT)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        memory.load_state_dict(checkpoint['state_dict_memory'], strict=False)
        print("=> Start epoch {} ".format(checkpoint['epoch']))

    # Set model
    model = nn.DataParallel(model).to(device)
    memory = memory.to(device)

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                       model.module.base.parameters())

    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': base_params_need_for_grad,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = get_optimizer(config, param_groups)

    # Trainer
    trainer = Trainer(config, model, memory)

    def adjust_lr(epoch):
        step_size = config.TRAIN.LR_STEP
        lr = config.TRAIN.LR * (config.TRAIN.LR_FACTOR**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    best_r1 = 0.0
    # Start training
    for epoch in range(config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH):
        # lr_scheduler.step()
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        print('Test with latest model:')
        evaluator = Evaluator(model)
        r1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                dataset.gallery, config.TEST.OUTPUT_FEATURES)

        if r1 > best_r1:
            best_r1 = r1
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'state_dict_memory': memory.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(config.OUTPUT_DIR, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    checkpoint = load_checkpoint(
        osp.join(config.OUTPUT_DIR, 'checkpoint.pth.tar'))
    print('best model at epoch: {}'.format(checkpoint['epoch']))
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, config.TEST.OUTPUT_FEATURES)
コード例 #20
0
logger = Logger(opt)


model = TensorMask(backbone=opt.backbone , num_cls=opt.num_class ,
                   base_window= opt.base_window ,
                   freezeBN=opt.frezeBN,freezeLayers=opt.frezeLayer,
                   align_corners= opt.align_corners)

optimizer = optimer.SGD([{'params':filter(lambda x:len(x.size()) == 4 ,model.parameters()),'weight_decay':0.0001 },
                            {'params': filter(lambda x:len(x.size()) <4,model.parameters())}],
                     lr=opt.lr,warm_up=1000,momentum=0.9,nesterov=True)
start_epoch = 0
if opt.weights != '' :
    model, optimizer, start_epoch = load_model(
      model, opt.weights, optimizer, opt.resume, opt.lr, opt.lr_step)
trainer = Trainer(opt,model,optimizer)
trainer.set_device(opt.gpus,opt.device)

print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
    COCO(cfg=opt, split='val',augment=False),
    batch_size=8,
    shuffle=False,
    num_workers=8,
    pin_memory=True
)
train_loader = torch.utils.data.DataLoader(
    COCO(cfg=opt, split='train',augment=True),
    batch_size=opt.batch_size,
    shuffle=True,
    num_workers=8,
コード例 #21
0
from lib.trainer import Trainer
from lib.options import parse_options

# Set logger display format
log.basicConfig(format='[%(asctime)s] [INFO] %(message)s',
                datefmt='%d/%m %H:%M:%S',
                level=log.INFO)

if __name__ == "__main__":
    """Main program."""

    args, args_str = parse_options()
    log.info(f'Parameters: \n{args_str}')
    if args.analytic:
        for filename in glob.glob(os.path.join(args.glsl_path,
                                               '*/*.glsl'))[::-1]:
            name = os.path.basename(filename).split('.')[0]

            if args.exclude and name in args.exclude:
                continue
            if args.include and name not in args.include:
                continue

            log.info(f'Training {name}')
            model = Trainer(args, args_str, name)
            model.train()
    else:
        log.info(f'Training on {args.dataset_path}')
        model = Trainer(args, args_str, 'mesh')
        model.train()
コード例 #22
0
def train_one(model_folder):
    new_model_folder_name = model_folder.replace('_created', '_training')
    os_rename(model_folder, new_model_folder_name)
    frequencies = glob(os_path_join(new_model_folder_name, 'k_*'))
    for frequency in frequencies:
        # Load model
        print('train.py: training {}'.format(frequency))
        model_params_path = os_path_join(frequency, model_params_fname)

        # create model
        model, model_params = get_which_model_from_params_fname(
            model_params_path, return_params=True)
        if 'cuda' in model_params:
            using_cuda = model_params['cuda'] and torch_cuda_is_available()
        else:
            using_cuda = torch_cuda_is_available()

        if using_cuda is True:
            model.cuda()

        # save initial weights
        if 'save_initial' in model_params and model_params[
                'save_initial'] and model_params['save_dir']:
            suffix = '_initial'
            path = add_suffix_to_path(model_params_fname['save_dir'], suffix)  # pylint: disable=E1126
            ensure_dir(path)
            torch_save(model.state_dict(),
                       os_path_join(path, MODEL_DATA_FNAME))
            save_model_params(os_path_join(path, model_params_fname),
                              model_params)

        # loss
        if 'cost_function' in model_params:
            loss = model_params['cost_function']
        elif 'loss_function' in model_params:
            loss = model_params['loss_function']
        else:
            raise ValueError(
                'model_params missing key cost_function or loss_function')

        if loss not in ['MSE', 'L1', 'SmoothL1']:
            raise TypeError('Error must be MSE, L1, or SmoothL1. You gave ' +
                            str(loss))
        if loss == 'MSE':
            from torch.nn import MSELoss
            loss = MSELoss()
        elif loss == 'L1':
            from torch.nn import L1Loss
            loss = L1Loss()
        elif loss == 'SmoothL1':
            from torch.nn import SmoothL1Loss
            loss = SmoothL1Loss()

        # optimizer
        if model_params['optimizer'] == 'Adam':
            from torch.optim import Adam
            optimizer = Adam(model.parameters(),
                             lr=model_params['learning_rate'],
                             weight_decay=model_params['weight_decay'])
        elif model_params['optimizer'] == 'SGD':
            from torch.optim import SGD
            optimizer = SGD(model.parameters(),
                            lr=model_params['learning_rate'],
                            momentum=model_params['momentum'],
                            weight_decay=model_params['weight_decay'])
        else:
            raise ValueError(
                'model_params[\'optimizer\'] must be either Adam or SGD. Got '
                + model_params['optimizer'])

        logger = Logger()

        # Load training, validation, and test data
        # Load primary training data
        dat_train = ApertureDataset(
            model_params['data_train'],
            NUM_SAMPLES_TRAIN,
            k=model_params['k'],
            target_is_data=model_params['data_is_target'])
        loader_train = DataLoader(dat_train,
                                  batch_size=model_params['batch_size'],
                                  shuffle=True,
                                  num_workers=DATALOADER_NUM_WORKERS,
                                  pin_memory=using_cuda)

        # Load secondary training data - used to evaluate training loss after every epoch
        dat_train2 = ApertureDataset(
            model_params['data_train'],
            NUM_SAMPLES_TRAIN_EVAL,
            k=model_params['k'],
            target_is_data=model_params['data_is_target'])
        loader_train_eval = DataLoader(dat_train2,
                                       batch_size=model_params['batch_size'],
                                       shuffle=False,
                                       num_workers=DATALOADER_NUM_WORKERS,
                                       pin_memory=using_cuda)

        # Load validation data - used to evaluate validation loss after every epoch
        dat_val = ApertureDataset(
            model_params['data_val'],
            NUM_SAMPLES_VALID,
            k=model_params['k'],
            target_is_data=model_params['data_is_target'])
        loader_val = DataLoader(dat_val,
                                batch_size=model_params['batch_size'],
                                shuffle=False,
                                num_workers=DATALOADER_NUM_WORKERS,
                                pin_memory=using_cuda)

        trainer = Trainer(
            model=model,
            loss=loss,
            optimizer=optimizer,
            patience=model_params['patience'],
            loader_train=loader_train,
            loader_train_eval=loader_train_eval,
            loader_val=loader_val,
            cuda=using_cuda,
            logger=logger,
            data_noise_gaussian=model_params['data_noise_gaussian'],
            save_dir=frequency)

        # run training
        trainer.train()

    os_rename(new_model_folder_name,
              new_model_folder_name.replace('_training', '_trained'))
コード例 #23
0
                embeddings_weights[
                    train_dataset.vocabulary[word]] = np.random.uniform(
                        -0.5, 0.5)

        with open(params.processed_embeddings_path, 'w') as outfile:
            json.dump(embeddings_weights.tolist(), outfile)
        print("Embedding weights saved!")
        print("Out of %d total words, %d are found in the embedding" %
              (len(train_dataset.vocabulary), words_missing))
    else:
        with open(params.processed_embeddings_path) as json_file:
            data = json.load(json_file)
            embeddings_weights = torch.Tensor(data)
        print("Embedding weights loaded!")

train_loader = DataLoader(train_dataset, batch_size=256)
valid_loader = DataLoader(valid_dataset, batch_size=256)
test_loader = DataLoader(test_dataset, batch_size=256)

nermodel = NERModel(len(train_dataset.vocabulary),
                    len(train_dataset.label_vocabulary),
                    params).to(torch.device(params.device))
if (params.embeddings_path != None):
    nermodel.word_embedder.weight.data.copy_(torch.Tensor(embeddings_weights))

trainer = Trainer(model=nermodel,
                  loss_function=nn.CrossEntropyLoss(
                      ignore_index=train_dataset.label_vocabulary["<pad>"]),
                  optimizer=optim.Adam(params=nermodel.parameters()))

trainer.train(train_loader, valid_loader, epochs=1000)
コード例 #24
0
    parser.add_argument('--use-cuda', type=strtobool, default='1')
    parser.add_argument('--max-sent-len', type=int, default=30)
    parser.add_argument('--data-dir', type=str, default='./')
    parser.add_argument('--vocab-size', type=int, default=50000)
    parser.add_argument('--embedding-dim', type=int, default=300)
    parser.add_argument('--class-n', type=int, default=4)
    parser.add_argument('--model', type=str, default='CNN')
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--gpu-id', type=int, default=0)
    return parser.parse_args()


if __name__ == '__main__':
    # args = Args()
    args = get_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
    print('using GPU id: ', os.environ['CUDA_VISIBLE_DEVICES'])

    if args.use_cuda and torch.cuda.is_available():
        args.use_cuda = True
    else:
        args.use_cuda = False

    trainer = Trainer(args)

    for i_epoch in range(1, args.epoch + 1):
        loss = trainer.train_one_epoch(1)
        print('%d th epoch: training loss -> %f' % (i_epoch, loss))
        loss, acc = trainer.test()
        print('test loss:  %f, test acc: %f' % (loss, acc))
コード例 #25
0
ファイル: main.py プロジェクト: zgojcic/OverlapPredator
    benchmark_set = ThreeDMatchDownsampled(info_benchmark,config, data_augmentation=False)

    config.train_loader, neighborhood_limits = get_dataloader(dataset=train_set,
                                        batch_size=config.batch_size,
                                        shuffle=True,
                                        num_workers=config.num_workers,
                                        )
    config.val_loader, _ = get_dataloader(dataset=val_set,
                                        batch_size=config.batch_size,
                                        shuffle=False,
                                        num_workers=1,
                                        neighborhood_limits=neighborhood_limits
                                        )
    config.test_loader, _ = get_dataloader(dataset=benchmark_set,
                                        batch_size=config.batch_size,
                                        shuffle=False,
                                        num_workers=1,
                                        neighborhood_limits=neighborhood_limits)
    
    # create evaluation metrics
    config.desc_loss = MetricLoss(config)

    # start to train our model
    trainer = Trainer(config)
    if(config.mode=='train'):
        trainer.train()
    elif(config.mode =='val'):
        trainer.eval()
    else:
        trainer.test()
    
コード例 #26
0
from lib.trainer import Trainer

# データセットのロード
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  flatten=False,
                                                  one_hot_label=True)

# ネットワークを定義
network = MyNeuralNet(cost_function=cross_entropy_error)

# レイヤを追加
conv1 = Conv((30, 1, 5, 5))
pool1 = Pooling(2, 2, stride=2)
affine1 = Affine(30 * 12 * 12, 100, "he")
affine2 = Affine(100, 10, "he")

network.add_layer(conv1)
network.add_layer(Relu())
network.add_layer(pool1)

network.add_layer(affine1)
network.add_layer(Relu())

network.add_layer(affine2)
network.add_layer(Softmax())

# 学習
trainer = Trainer(network, x_train, t_train, x_test, t_test)
trainer.train(lr=0.1, epoch_num=20, batch_size=100, for_cnn=True)
trainer.savefig("simple_cnn", "../data/simple_cnn.png")