コード例 #1
0
def start_prediction(output_directory,
                     data_directory,
                     dataset_name,
                     model_dir,
                     network_name,
                     batch_size,
                     batch_threads,
                     num_classes=None):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     augment=False)

    if num_classes is None:
        num_classes = dataset_factory.get_dataset('train').num_classes()

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        model_dir, network_name, num_classes),
                                       model_dir=model_dir,
                                       config=run_config,
                                       params={})
    image_size = nets_factory.get_input_size(network_name)

    run_prediction_and_evaluation(output_directory, batch_size, batch_threads,
                                  dataset_factory, estimator, image_size)
コード例 #2
0
def start_training(data_directory, dataset_name, output_directory,
                   network_name, batch_size, learning_rate, batch_threads,
                   num_epochs, initial_checkpoint, checkpoint_exclude_scopes,
                   ignore_missing_variables, trainable_scopes,
                   not_trainable_scopes, fixed_learning_rate,
                   learning_rate_decay_rate, do_evaluation,
                   learning_rate_decay_steps):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory)
    model_params = {
        'learning_rate':
        learning_rate,
        'fixed_learning_rate':
        fixed_learning_rate,
        'learning_rate_decay_rate':
        learning_rate_decay_rate,
        'learning_rate_decay_steps':
        (dataset_factory.get_dataset('train').get_number_of_samples() if
         learning_rate_decay_steps is None else learning_rate_decay_steps) //
        batch_size
    }

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        output_directory, network_name,
        dataset_factory.get_dataset('train').num_classes(), initial_checkpoint,
        checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes,
        not_trainable_scopes),
                                       params=model_params,
                                       model_dir=output_directory,
                                       config=run_config)
    image_size = nets_factory.get_input_size(network_name)

    dataset = dataset_factory.get_dataset('train')
    evaluation_summary_writer = get_evaluation_summary_writer(
        do_evaluation, output_directory)

    for epoch in range(num_epochs):
        run_training(dataset=dataset,
                     batch_size=batch_size,
                     batch_threads=batch_threads,
                     epoch=epoch,
                     estimator=estimator,
                     num_epochs=num_epochs,
                     image_size=image_size)

        if do_evaluation:
            run_evaluation_conserving_best(
                estimator=estimator,
                batch_size=2 * batch_size,
                batch_threads=batch_threads,
                dataset_factory=dataset_factory,
                image_size=image_size,
                evaluation_summary_writer=evaluation_summary_writer)

    print('Finished training')
コード例 #3
0
ファイル: trainer.py プロジェクト: splinter21/SPL
def start_training(data_directory, dataset_name, mean, output_directory,
                   network_name, batch_size, learning_rate, learning_rate_gen,
                   beta1_gen, separable_conv, batch_threads, num_epochs,
                   initial_checkpoint, checkpoint_exclude_scopes,
                   ignore_missing_variables, trainable_scopes,
                   not_trainable_scopes, fixed_learning_rate,
                   learning_rate_decay_rate, do_evaluation,
                   learning_rate_decay_steps, img_size):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     mean=mean)
    model_params = {
        'learning_rate':
        learning_rate,
        'learning_rate_gen':
        learning_rate_gen,
        'beta1_gen':
        beta1_gen,
        'fixed_learning_rate':
        fixed_learning_rate,
        'learning_rate_decay_rate':
        learning_rate_decay_rate,
        'separable_conv':
        separable_conv,
        'learning_rate_decay_steps':
        (dataset_factory.get_dataset('train').get_number_of_samples() if
         learning_rate_decay_steps is None else learning_rate_decay_steps) //
        batch_size
    }

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        output_directory, network_name,
        dataset_factory.get_dataset('train').num_classes(), initial_checkpoint,
        checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes,
        not_trainable_scopes),
                                       params=model_params,
                                       model_dir=output_directory,
                                       config=run_config)

    image_size = img_size

    for epoch in range(num_epochs):
        run_training(dataset_factory,
                     batch_size=batch_size,
                     batch_threads=batch_threads,
                     epoch=epoch,
                     estimator=estimator,
                     num_epochs=num_epochs,
                     image_size=image_size)
    print('Finished training')
コード例 #4
0
ファイル: trainer_views.py プロジェクト: saiftumrani/CVTC
def start_training(data_directory, dataset_name, output_directory, network_name, batch_size, learning_rate, batch_threads, num_epochs, initial_checkpoint, checkpoint_exclude_scopes,ignore_missing_variables, trainable_scopes, fixed_learning_rate, learning_rate_decay_rate, num_classes):
    dataset_factory = DatasetFactory(dataset_name=dataset_name, data_directory=data_directory)
    model_params = {'learning_rate': learning_rate,'fixed_learning_rate': fixed_learning_rate,'learning_rate_decay_rate': learning_rate_decay_rate,'learning_rate_decay_steps': dataset_factory.get_dataset('train').get_number_of_samples() // batch_size}

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(output_directory, network_name, dataset_factory.get_dataset('train').num_classes() if num_classes is None else num_classes, initial_checkpoint,checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes),params=model_params,model_dir=output_directory,config=run_config)
    image_size = nets_factory.get_input_size(network_name)
  

    for epoch in range(num_epochs):
        run_training(dataset_factory, batch_size, batch_threads, epoch, estimator, num_epochs, image_size)
        #run_validation(dataset_factory, batch_size, batch_threads, estimator, image_size)

    run_evaluation(batch_size, batch_threads, dataset_factory, estimator, image_size)
コード例 #5
0
def start_prediction(data_directory, dataset_name, mean, model_dir,
                     network_name, batch_size, batch_threads, num_classes,
                     result_dir, img_size, model, mode):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     mean=mean,
                                     augment=False,
                                     num_classes=num_classes)

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        model_dir, network_name,
        dataset_factory.get_dataset('train').num_classes()),
                                       model_dir=model_dir,
                                       config=run_config,
                                       params={})

    image_size = img_size
    run_prediction_and_evaluation(batch_size, batch_threads, dataset_factory,
                                  estimator, image_size, result_dir, mode)
コード例 #6
0
ファイル: main.py プロジェクト: mmlab-cv/ICIP-2021-2346
    input, heatmap = voxelization_val({'points': points, 'keypoints': keypoints, 'refpoint': refpoint})
    return (torch.from_numpy(input), torch.from_numpy(heatmap))

def transform_test(sample):
    points, refpoint = sample['points'], sample['refpoint']
    input = voxelize_input(points, refpoint)
    return torch.from_numpy(input), torch.from_numpy(refpoint.reshape((1, -1)))


# TODO: Why so small?
#batch_size = 12
batch_size = 10


if args.validation_dataset == "NONE":
    dataset_factory = DatasetFactory(args.train_dataset, "ITOP", "ITOP", transform_train, transform_val, transform_test)
    training_phase = True
else:
    dataset_factory = DatasetFactory(args.train_dataset, args.validation_dataset, args.test_dataset, transform_train, transform_val, transform_test)
    training_phase = False

train_set = dataset_factory.get_train()
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=6)

val_set = dataset_factory.get_validation()
val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=6)

test_set = dataset_factory.get_test()
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=6)