def main():
    args = get_args()

    criterion = torch.nn.MSELoss().cuda()
    train_loader, test_loader = load_data(args.dataset,
                                          training_size=args.n_samples,
                                          batch_size=args.load_size)
    net = load_net(args.dataset)
    optimizer = get_optimizer(net, args)
    print(optimizer)

    print('===> Architecture:')
    print(net)

    print('===> Start training')
    train(net,
          criterion,
          optimizer,
          train_loader,
          args.batch_size,
          args.n_iters,
          verbose=True)

    train_loss, train_accuracy = eval_accuracy(net, criterion, train_loader)
    test_loss, test_accuracy = eval_accuracy(net, criterion, test_loader)
    print('===> Solution: ')
    print('\t train loss: %.2e, acc: %.2f' % (train_loss, train_accuracy))
    print('\t test loss: %.2e, acc: %.2f' % (test_loss, test_accuracy))

    torch.save(net.state_dict(), args.model_file)
Exemple #2
0
def train_wrapper(model):
    """Wrapping function to train the model."""
    if FLAGS.pretrained_model:
        model.load(FLAGS.pretrained_model)
    # load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name,
        FLAGS.train_data_paths,
        FLAGS.valid_data_paths,
        FLAGS.batch_size * FLAGS.n_gpu,
        FLAGS.img_width,
        seq_length=FLAGS.total_length,
        is_training=True)

    eta = FLAGS.sampling_start_value

    for itr in range(1, FLAGS.max_iterations + 1):
        if train_input_handle.no_batch_left():
            train_input_handle.begin(do_shuffle=True)
        ims = train_input_handle.get_batch()
        if FLAGS.dataset_name == 'penn':
            ims = ims['frame']
        ims = preprocess.reshape_patch(ims, FLAGS.patch_size)

        eta, real_input_flag = schedule_sampling(eta, itr)

        trainer.train(model, ims, real_input_flag, FLAGS, itr)

        if itr % FLAGS.snapshot_interval == 0:
            model.save(itr)

        if itr % FLAGS.test_interval == 0:
            trainer.test(model, test_input_handle, FLAGS, itr)

        train_input_handle.next()
Exemple #3
0
def train_function():
    fix_annotations()
    annot = load_annotations(preprocessed=True)
    annot_df=pd.DataFrame(annot["annotations"])
    images_df=pd.DataFrame(annot["images"])
    annot_df , annot = choose_category(annot_df , annot)
    annot_df = kfold_split(annot_df)
    train(annot_df , images_df , annot)
Exemple #4
0
def train_wrapper(model):
	"""Wrapping function to train the model."""
	if FLAGS.pretrained_model:
		model.load(FLAGS.pretrained_model)
  # load data
	train_input_handle, test_input_handle = datasets_factory.data_provider(
		FLAGS.dataset_name,
		FLAGS.train_data_paths,
		FLAGS.valid_data_paths,
		FLAGS.batch_size * FLAGS.n_gpu,
		FLAGS.img_width,
		FLAGS.input_seq_length,
		FLAGS.output_seq_length,
		FLAGS.dimension_3D,
		is_training=True)
	print('Data loaded.')
	eta = FLAGS.sampling_start_value

	tra_cost = 0.0
	batch_id = 0
	stopping = [10000000000000000]
	for itr in range(2351, FLAGS.max_iterations + 1):
		if itr == 2:
			print('training process started.')
		#if itr % 50 == 0:
		#	print('training timestep: ' + str(itr))
		if train_input_handle.no_batch_left() or itr % 50 == 0:
			model.save(itr)
			print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'itr: ' + str(itr))
			print('training loss: ' + str(tra_cost / batch_id))
			val_cost = trainer.test(model, test_input_handle,FLAGS, itr)
			if val_cost < min(stopping):
				stopping = [val_cost]
			elif len(stopping) < 10:
				stopping.append(val_cost)
			if len(stopping) == 10:
				break
			train_input_handle.begin(do_shuffle=True)
			tra_cost = 0
			batch_id = 0

		ims = train_input_handle.get_batch()
		batch_id += 1

		eta, real_input_flag = schedule_sampling(eta, itr)

		tra_cost += trainer.train(model, ims, real_input_flag, FLAGS, itr)

		#if itr % FLAGS.snapshot_interval == 0:
			#model.save(itr)

		#if itr % FLAGS.test_interval == 0:
			#trainer.test(model, test_input_handle, FLAGS, itr)

		train_input_handle.next_batch()
Exemple #5
0
def train_wrapper(model):
    """Wrapping function to train the model."""
    if FLAGS.pretrained_model:
        model.load(FLAGS.pretrained_model)

# load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name,
        FLAGS.train_data_paths,
        FLAGS.valid_data_paths,
        FLAGS.batch_size * FLAGS.n_gpu,
        FLAGS.img_width,
        FLAGS.input_seq_length,
        FLAGS.output_seq_length,
        is_training=True)

    tra_cost = 0.0
    batch_id = 0
    stopping = [10000000000000000]
    for itr in range(4502, FLAGS.max_iterations + 1):
        if itr == 2 or itr % 10 == 0:
            print('training...')
            #model.save(itr)
            #print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'itr: ' + str(itr))
            #print('training loss: ' + str(tra_cost / batch_id))
            #val_cost = trainer.test(model, test_input_handle,FLAGS, itr)
        if train_input_handle.no_batch_left():
            model.save(itr)
            print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                  'itr: ' + str(itr))
            print('training loss: ' + str(tra_cost / batch_id))
            val_cost = trainer.test(model, test_input_handle, FLAGS, itr)
            if val_cost < min(stopping):
                stopping = [val_cost]
            elif len(stopping) < 3:
                stopping.append(val_cost)
            if len(stopping) == 3:
                break
            train_input_handle.begin(do_shuffle=True)
            tra_cost = 0
            batch_id = 0
        if itr % 50 == 0:
            model.save(itr)
            print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                  'itr: ' + str(itr))
            print('training loss: ' + str(tra_cost / batch_id))

        ims = train_input_handle.get_batch()
        batch_id += 1

        tra_cost += trainer.train(model, ims, FLAGS, itr)

        train_input_handle.next_batch()
Exemple #6
0
    def on_train(self):
        pipeline, search_space = get_pipeline(self.selected_preproc,
                                              self.preproc_config,
                                              self.model_config)

        try:
            logging.info(f'Shape: X {self.X.shape} - y {self.y.shape}')
            trained_model, cv_mean, cv_std, train_time = train(
                self.X,
                self.y,
                pipeline,
                search_space,
                self.train_mode,
                self.n_iters,
                n_jobs=train_config['n_jobs'],
                is_convnet=self.is_convnet)
        except Exception:
            logging.info(f'Training failed - {traceback.format_exc()}')
            self.button_train.button_type = 'danger'
            self.button_train.label = 'Failed'
            return

        model_to_save = trained_model if self.train_mode == 'validate' \
            else trained_model.best_estimator_

        if 'Save' in self.selected_settings:
            dataset_name = '_'.join([id for id in self.train_ids])
            filename = f'{self.model_name}_{dataset_name}'
            save_pipeline(model_to_save, self.save_path, filename)

            model_info = {
                "Model name": self.model_name,
                "Model file": filename,
                "Train ids": self.train_ids,
                "fs": self.fs,
                "Shape": self.X.shape,
                "Preprocessing": self.selected_preproc,
                "Model pipeline": {k: str(v)
                                   for k, v in model_to_save.steps},
                "CV RMSE": f'{cv_mean:.3f}+-{cv_std:.3f}',
                "Train time": train_time
            }
            save_json(model_info, self.save_path, filename)

        logging.info(f'{model_to_save} \n'
                     f'Trained successfully in {train_time:.0f}s \n'
                     f'Accuracy: {cv_mean:.2f}+-{cv_std:.2f}')

        # Update info
        self.button_train.button_type = 'success'
        self.button_train.label = 'Trained'
        self.div_info.text += f'<b>Accuracy:</b> {cv_mean:.2f}+-{cv_std:.2f} <br>'
def invoke_trainer(pars):

    # if flag_create_dirs:
    # invoke_create_train_test_face_directories(pars)
    log_total_individuals_faces(pars)
    debug.msg("\n============== Experiment {} ==================".format(pars["architecture"]))
    debug.msg("Train Steps:{}".format(pars["train_steps"]))
    debug.msg("Test Percent:{}".format(pars["testing_percentage"]))
    # if flag_backup_files:
    #     shutil.copytree(exp["summaries_dir"], "{}summaries".format(backup_sub_case_dir))
    #     shutil.copytree(sub_case_dir, "{}tf_files".format(backup_sub_case_dir))
    test_accuracy = trainer.train(pars)
    debug.msg("Test Accuracy: {}".format(test_accuracy))
    return test_accuracy
            logging.info("Loaded {} test samples".format(test_data.size))
        except:
            logging.ERROR("Failed to load test dataset. Skipping testing!!!")

    if restore_mode != RestoreMode.ONLY_GIVEN_VARS:
        config.restore_layers = None
    net = tf_convnet.ConvNetModel(convnet_config=config,
                                  mode=train_mode,
                                  create_summaries=create_summaries)
    opt_args = None
    if config.optimizer == Optimizer.MOMENTUM:
        opt_args = config.momentum_args
    elif config.optimizer == Optimizer.ADAGRAD:
        opt_args = config.adagrad_args
    elif config.optimizer == Optimizer.ADAM:
        opt_args = config.adam_args

    trainer = trainer.Trainer(net=net,
                              data_provider_train=training_data,
                              data_provider_val=validation_data,
                              out_path=data_paths.tf_out_path,
                              train_config=config,
                              restore_path=restore_path,
                              caffemodel_path=caffemodel_path,
                              restore_mode=restore_mode,
                              mode=train_mode,
                              data_provider_test=test_data,
                              fold_nr=fold_nr)

    path = trainer.train()
Exemple #9
0
                pyplot.xlabel('Sample')
                pyplot.show()

        if os.path.isfile(
                os.path.join(tsf_model_filepath,
                             tsf_model_filename + '_fext' + tsf_model_ext)):
            tsf_model_feature_extractor = load_model(
                os.path.join(tsf_model_filepath,
                             tsf_model_filename + '_fext' + tsf_model_ext))
            print('Begin training feature extractor model!')
            (tsf_model_feature_extractor,
             history) = train(tsf_model_feature_extractor,
                              ts_normed_dataset_x,
                              ts_normed_dataset_x,
                              epochs=epochs,
                              batch_size=batch_size,
                              validation_split=validation_split,
                              initial_lrate=initial_lrate,
                              lrate_drop=lrate_drop,
                              verbose=verbose)

            get_feature_extractor_output = K.function([
                tsf_model_feature_extractor.get_layer(
                    'feature_extractor').get_layer('autoencoder').get_layer(
                        'autoencoder_input_layer').input
            ], [
                tsf_model_feature_extractor.get_layer(
                    'feature_extractor').get_layer('autoencoder').get_layer(
                        'autoencoder_latent_output_layer').output
            ])
            feature_extractor_output = get_feature_extractor_output(
 def run(data_dir,
         dl_info_file,
         model_type=1,
         epochs=8,
         lr=0.002,
         lr_decay_step=7,
         batch_size=8,
         dilation=[[1, 1, 1]],
         validate=True,
         use_adam=False,
         log_dir='log',
         checkpoint_dir='checkpoints',
         checkpoint_file=None,
         avg_fps=[0, 0.125, 0.25, 0.5, 1, 2, 4, 5, 8],
         output_dir='output'):
     """
     Train one or multiple model(s) and generate a report afterwards.
     :param data_dir: String. The path to the data directory.
     :param dl_info_file: String. The path of the DL_info.csv file.
     :param model_type: Integer or array[Integer]. The type of the model referred to as a number:
                         BASIC = 1
                         DEFORMABLE_CONV = 2
                         DEFORMABLE_ROI = 3
                         DEFORMABLE_CONV_ROI = 4
     :param epochs: Integer or array[Integer]. The number of epochs to train.
     :param lr: Float or array[Float]. The learning rate that should be used.
     :param lr_decay_step: Integer or array[Integer]. The steps the learning rate should be reduced by a factor of 0.1.
     :param batch_size: Integer or array[Integer]. The batch size the training should be done with.
     :param dilation: array[array[Integer]]. See https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html for further information.
     :param validate: Bool. Use a validation step after each epoch.
     :param use_adam: Bool. Use of the adam optimizer.
     :param log_dir: String. The path to the log directory.
     :param checkpoint_dir: String. The path to the checkpoint directory.
     :param checkpoint_file: String. The path to the checkpoint file from which to resume.
     :param avg_fps: array[Float]. The false positives of the x-axis which should be interpolated.
     :param output_dir: String. The path to the files which will contain the results and the report.
     """
     print('##### TRAINING #####')
     train(model_type=model_type,
           lr=lr,
           lr_decay_step=lr_decay_step,
           epochs=epochs,
           batch_size=batch_size,
           dilation=dilation,
           validate=bool(validate),
           log_dir=log_dir,
           data_dir=data_dir,
           csv_file=dl_info_file,
           use_adam=bool(use_adam),
           checkpoint_dir=checkpoint_dir,
           resume_checkpoint=checkpoint_file)
     print('##### REPORT #####')
     generate_report(data_dir=data_dir,
                     csv_file=dl_info_file,
                     checkpoint_dir=checkpoint_dir,
                     output_dir=output_dir,
                     avg_fps=avg_fps)
     print('##### PREVIEW #####')
     generate_preview(data_dir=data_dir,
                      csv_file=dl_info_file,
                      checkpoint_dir=checkpoint_dir,
                      output_dir=output_dir)
Exemple #11
0
# -*- coding: utf-8 -*-

import gym
from src.dqn import DQNAgent, dqn_params
from src.trainer import train, train_params

agent_params = dqn_params(buffer_size=int(1e6),
                          gamma=0.99,
                          epsilon=0.95,
                          epsilon_decay=5e-5,
                          epsilon_min=0.1,
                          batch_size=64,
                          lr=2e-4,
                          update_freq=5,
                          tau=0.01)
training_params = train_params(max_episodes=1200,
                               max_steps_per_episode=1000,
                               stop_value=475,
                               avg_window=100)

# create Gym environment
env = gym.make('CartPole-v1')

# create a DQN agent
obs_dim = env.observation_space.shape
actions = range(env.action_space.n)
agent = DQNAgent(obs_dim, actions, seed=0, params=agent_params)

# train the agent
train(env, agent, training_params)