コード例 #1
0
def main(_):
    problem_type = 'grasp_regression'
    feature_combo = 'image_preprocessed'
    # Override some default flags for this configuration
    # see other configuration in hypertree_train.py choose_features_and_metrics()
    FLAGS.problem_type = problem_type
    FLAGS.feature_combo = feature_combo
    FLAGS.crop_to = 'image_contains_grasp_box_center'
    if FLAGS.load_hyperparams is None:
        # Results from classification hyperparameter run
        # FLAGS.load_hyperparams = ('~/datasets/logs/hyperopt_logs_cornell/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # Results from first regression hyperparameter run
        # FLAGS.load_hyperparams = ('~/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-01-15-12-20_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-01-15-12-20_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Just try out NasNet directly without hyperopt (it didn't work well on 2017-03-04)
        # FLAGS.load_hyperparams = ('~/src/costar_ws/src/costar_plan/costar_google_brainrobotdata/nasnet_large.json')

        # decent, but didn't run kfold 2018-03-05, + 2018-03-07 trying with mae
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-03-16-33-06_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-03-16-33-06_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Old best first epoch on hyperopt run 2018-03-06:
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-06-00-20-24_-vgg19_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-06-00-20-24_-vgg19_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Current best performance with mae on val + test 2018-03-07, haven't tried on kfold yet 2018-03-06
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Best first and last epoch on hyperopt run 2018-03-08
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-07-18-36-17_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-07-18-36-17_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        FLAGS.load_hyperparams = ('hyperparams/regression/'
                                  '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')
    FLAGS.epochs = 60
    FLAGS.fine_tuning_epochs = 0
    print('Regression Training on Jaccard Distance is about to begin. '
          'It overrides some command line parameters including '
          'training on mae loss so to change them '
          'you will need to modify cornell_grasp_train_regression.py directly.')

    dataset_name = 'costar_block_stacking'

    hyperparams = hypertree_utilities.load_hyperparams_json(
        FLAGS.load_hyperparams, FLAGS.fine_tuning, FLAGS.learning_rate,
        feature_combo_name=feature_combo)

    # TODO: remove loss if it doesn't work or make me the default in the other files if it works really well
    hyperparams['loss'] = 'mae'

    if 'k_fold' in FLAGS.pipeline_stage:
        hypertree_train.train_k_fold(
            problem_name=problem_type,
            feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            split_type='objectwise',
            **hyperparams)
        hypertree_train.train_k_fold(
            problem_name=problem_type,
            feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            split_type='imagewise',
            **hyperparams)
    else:
        hypertree_train.run_training(
            problem_name=problem_type,
            feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            **hyperparams)
def main(_):
    use_best_model = True
    # epoch to filter, or None if we should just take the best performing value ever
    filter_epoch = FLAGS.filter_epoch

    # CONFIGURE: Select problem type
    problem_type = 'semantic_translation_regression'
    # problem_type = 'semantic_rotation_regression'
    # problem_type = 'semantic_grasp_regression'

    feature_combo = 'image_preprocessed'
    # Override some default flags for this configuration
    # see other configuration in hypertree_train.py choose_features_and_metrics()
    FLAGS.problem_type = problem_type
    FLAGS.feature_combo = feature_combo
    FLAGS.crop_to = 'image_contains_grasp_box_center'
    load_weights = None
    FLAGS.batch_size = 32
    optimizer_name = 'sgd'
    # FLAGS.crop_height = 480
    # FLAGS.crop_width = 640
    # FLAGS.resize_height = 480
    # FLAGS.resize_width = 640
    # print('Note: special overrides have been applied '
    #       'for an experiment. '
    #       'crop + resize width/height have been set to 640x480.')
    # FLAGS.log_dir = r'C:/Users/Varun/JHU/LAB/Projects/costar_plan/costar_google_brainrobotdata/hyperparams/'
    # FLAGS.data_dir = r'C:/Users/Varun/JHU/LAB/Projects/costar_block_stacking_dataset_v0.4/*success.h5f'
    skip_models = FLAGS.skip_models

    # We generate a summary of the best algorithms as the program runs,
    # so here we configure the summary metrics for the problem type.
    if problem_type == 'semantic_grasp_regression':
        histories_metrics = [
            'val_cart_error', 'val_angle_error', 'val_grasp_acc'
        ]
        histories_summary_metrics = ['min', 'min', 'max']
    elif problem_type == 'semantic_rotation_regression':
        histories_metrics = ['val_angle_error', 'val_grasp_acc']
        histories_summary_metrics = ['min', 'max']
    elif problem_type == 'semantic_translation_regression':
        histories_metrics = ['val_cart_error', 'val_grasp_acc']
        histories_summary_metrics = ['min', 'max']
    else:
        raise ValueError(
            'costar_block_stacking_train_ranked_regression.py::main(): '
            'unsupported problem_type ' + str(problem_type))

    # FLAGS.data_dir = os.path.expanduser('~/.keras/datasets/costar_block_stacking_dataset_v0.4/*success.h5f')
    FLAGS.data_dir = os.path.expanduser(
        '~/.keras/datasets/costar_block_stacking_dataset_v0.4/')
    FLAGS.fine_tuning_epochs = 0

    # CONFIGURE: Change the number of epochs here.
    # final training run:
    # FLAGS.epochs = 600
    FLAGS.epochs = 200
    # FLAGS.random_augmentation = 0.25
    # evaluating top few models run:
    # FLAGS.epochs = 10
    # FLAGS.epochs = 40
    FLAGS.random_augmentation = None
    print(
        'Regression Training on costar block stacking is about to begin. '
        'It overrides some command line parameters including '
        'training on mae loss so to change them '
        'you will need to modify costar_block_stacking_train_ranked_regression.py directly.'
    )

    dataset_name = 'costar_block_stacking'

    json_histories_path = os.path.join(
        FLAGS.log_dir, FLAGS.run_name + '_' + problem_type + '_histories.json')
    json_histories_summary_path = os.path.join(
        FLAGS.log_dir,
        FLAGS.run_name + '_' + problem_type + '_histories_summary.json')
    run_histories = {}
    history_dicts = {}
    sort_by = None

    csv_file = os.path.join(os.path.expanduser(FLAGS.log_dir), FLAGS.rank_csv)

    # load the hyperparameter optimization ranking csv file created by hyperopt_rank.py
    dataframe = pandas.read_csv(csv_file, index_col=None, header=0)
    if problem_type == 'semantic_rotation_regression':
        # sort by val_angle_error from low to high
        dataframe = dataframe.sort_values('val_angle_error', ascending=True)
        dataframe = dataframe.sort_values('val_grasp_acc', ascending=False)
        sort_by = 'val_grasp_acc'
        # DISABLE RANDOM AUGMENTATION FOR ROTATION
        FLAGS.random_augmentation = None
    elif problem_type == 'semantic_translation_regression':
        # sort by cart_error from low to high
        # sort_by = 'cart_error'
        # dataframe = dataframe.sort_values(sort_by, ascending=True)
        # # sort by val_cart_error from low to high
        sort_by = 'val_cart_error'
        dataframe = dataframe.sort_values(sort_by, ascending=True)
        # # sort by grasp accuracy within 4 cm and 60 degrees
        # sort_by = 'val_grasp_acc_4cm_60deg'
        # dataframe = dataframe.sort_values(sort_by, ascending=False)
        # sort_by = 'val_grasp_acc'
        # dataframe = dataframe.sort_values(sort_by, ascending=False)
    elif problem_type == 'semantic_grasp_regression':
        dataframe = dataframe.sort_values('val_grasp_acc', ascending=False)
        sort_by = 'val_grasp_acc'
    else:
        raise ValueError('costar_block_stacking_train_ranked_regression.py: '
                         'unsupported problem type: ' + str(problem_type))

    # don't give really long runs an unfair advantage
    if FLAGS.max_epoch is not None:
        dataframe = dataframe.loc[dataframe['epoch'] <= FLAGS.max_epoch]
    # filter only the specified epoch so we don't redo longer runs
    if filter_epoch is not None and filter_epoch is True:
        dataframe = dataframe.loc[dataframe['epoch'] == FLAGS.epoch]
        # TODO(ahundt) we are really looking for "is this a hyperopt result?" not "checkpoint"
        # hyperopt search results don't have checkpoints, but full training runs do
        dataframe = dataframe.loc[dataframe['checkpoint'] == False]

    # loop over the ranked models
    row_progress = tqdm(dataframe.iterrows(), ncols=240)
    i = -1
    for index, row in row_progress:
        i += 1
        if i < skip_models:
            # we designated this model as one to skip, so continue on
            continue
        history = None
        hyperparameters_filename = row['hyperparameters_filename']

        hyperparams = hypertree_utilities.load_hyperparams_json(
            hyperparameters_filename,
            FLAGS.fine_tuning,
            FLAGS.learning_rate,
            feature_combo_name=feature_combo)

        row_progress.write('-' * 80)
        row_progress.write('Training with hyperparams at index ' + str(index) +
                           ' from: ' + str(hyperparameters_filename) + '\n\n' +
                           str(hyperparams))
        if sort_by is not None:
            row_progress.write('Sorting by: ' + str(sort_by) +
                               ', the value in the rank_csv is: ' +
                               str(row[sort_by]))
        row_progress.write('-' * 80)
        hyperparams['loss'] = 'msle'
        # save weights at checkpoints as the model's performance improves
        hyperparams['checkpoint'] = True
        hyperparams['learning_rate'] = 1.0
        hyperparams['batch_size'] = FLAGS.batch_size

        if i > 0:
            # only load weights for the first entry
            # TODO(ahundt) allow automated loading of weights from past runs
            load_weights = None
            FLAGS.load_weights = None

        try:
            history = hypertree_train.run_training(
                problem_name=problem_type,
                # feature_combo_name=feature_combo,
                hyperparams=hyperparams,
                dataset_name=dataset_name,
                optimizer_name=optimizer_name,
                load_weights=load_weights,
                hyperparameters_filename=hyperparameters_filename,
                **hyperparams)

            run_histories[hyperparameters_filename] = history
            history_dicts[hyperparameters_filename] = history.history
            # save the histories so far, overwriting past updates
            with open(json_histories_path, 'w') as fp:
                # save out all kfold params so they can be reloaded in the future
                json.dump(history_dicts,
                          fp,
                          cls=hypertree_utilities.NumpyEncoder)

            # generate the summary results
            results = hypertree_utilities.multi_run_histories_summary(
                run_histories,
                metrics=histories_metrics,
                multi_history_metrics=histories_summary_metrics,
                save_filename=json_histories_summary_path,
                description_prefix=problem_type,
                results_prefix='ranked_results',
            )
        except tf.errors.ResourceExhaustedError as exception:
            print(
                'Hyperparams caused algorithm to run out of resources, '
                'will continue to next stage and return infinity loss for now.'
                'To avoid this entirely you might set more memory sensitive hyperparam ranges,'
                'or add constraints to your hyperparam search so it does not choose'
                'huge values for all the parameters at once'
                'Error: ', exception)
            loss = float('inf')
            ex_type, ex, tb = sys.exc_info()
            traceback.print_tb(tb)
            # deletion must be explicit to prevent leaks
            # https://stackoverflow.com/a/16946886/99379
            del tb
        except (ValueError, tf.errors.FailedPreconditionError,
                tf.errors.OpError) as exception:
            print(
                'Hyperparams encountered a model that failed with an invalid combination of values, '
                'we will continue to next stage and return infinity loss for now.'
                'To avoid this entirely you will need to debug your model w.r.t. '
                'the current hyperparam choice.'
                'Error: ', exception)
            loss = float('inf')
            ex_type, ex, tb = sys.exc_info()
            traceback.print_tb(tb)
            # deletion must be explicit to prevent leaks
            # https://stackoverflow.com/a/16946886/99379
            del tb
        except KeyboardInterrupt as e:
            print(
                'Evaluation of this model canceled based on a user request. '
                'We will continue to next stage and return infinity loss for the canceled model.'
            )
            loss = float('inf')
            ex_type, ex, tb = sys.exc_info()
            traceback.print_tb(tb)
            # deletion must be explicit to prevent leaks
            # https://stackoverflow.com/a/16946886/99379
            del tb

        # TODO(ahundt) consider shutting down dataset generators and clearing the session when there is an exception
        # https://github.com/tensorflow/tensorflow/issues/4735#issuecomment-363748412
        keras.backend.clear_session()
コード例 #3
0
def main(_):
    problem_type = 'grasp_classification'
    # recommended for
    # - pixelwise classification
    # - classification of images centered and rotated to grasp proposals
    feature_combo = 'image_preprocessed_norm_sin2_cos2_width_3'

    # Warning: this version will have some correlation with due to the height parameter!
    # However, it will be OK to use to classify the output of regression
    # feature_combo = 'image_preprocessed_norm_sin2_cos2_height_width_4'

    # Recommended for single prediction if images are merely center cropped
    # and not rotated or translated to the proposed grasp positions:
    # feature_combo = 'image_preprocessed_norm_sin2_cos2_w_yx_5'

    # recommended for
    # - classification of images centered and rotated to grasp proposals
    # feature_combo = 'image_preprocessed_width_1'

    # NOT RECOMMENDED due to height being correlated with grasp_success:
    # feature_combo = 'image_preprocessed_sin2_cos2_height_width_4'
    # Override some default flags for this configuration
    # see other configuration in hypertree_train.py choose_features_and_metrics()
    FLAGS.crop_height = 224
    FLAGS.crop_width = 224
    FLAGS.problem_type = problem_type
    FLAGS.feature_combo = feature_combo
    FLAGS.crop_to = 'center_on_gripper_grasp_box_and_rotate_upright'
    if FLAGS.load_hyperparams is None:
        # FLAGS.load_hyperparams = ('~/datasets/logs/hyperopt_logs_cornell/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # About 0.95 k-fold accuracy on preprocessed_norm_sin2_cos2_w_3:
        # FLAGS.load_hyperparams = ('hyperparams/classification/2018-03-03-07-14-59_'
        #                           '-vgg_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # (not very good) Best result from classification hyperopt run ending 2018-03-16:
        # FLAGS.load_hyperparams = ('~/src/costar_ws/src/costar_plan/costar_google_brainrobotdata/hyperparams/classification/'
        #                           '2018-03-14-00-40-09_-vgg19_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # (not very good) Best NON VGG result from classification hyperopt run ending 2018-03-16:
        # FLAGS.load_hyperparams = ('~/src/costar_ws/src/costar_plan/costar_google_brainrobotdata/hyperparams/classification/'
        #                           '2018-03-16-06-15-01_-densenet_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # Quite good kfold, best hyperparams from 2018-04 2000 model hyperopt run TODO(ahundt) add details from kfold run
        # FLAGS.load_hyperparams = ('hyperparams/classification/2018-04-08-21-04-19__optimized_hyperparams.json')
        # about to run: best hyperparams with trainable set to False from 2018-04 2000 model hyperopt run
        # FLAGS.load_hyperparams = ('hyperparams/classification/2018-04-07-15-31-34_vgg_classifier_model-_img_vgg_vec_dense_block_trunk_dense_block-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # One of best results from hyperopt run ending 2018-05-08
        # FLAGS.load_hyperparams = ('hyperparams/classification/2018-05-03-17-02-01_inception_resnet_v2_classifier_model-'
        #                           '_img_inception_resnet_v2_vec_dense_trunk_vgg_conv_block-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # Best hyperopt result without "height" (length along which gripper can slide, not gripper openness) parameter from hyperopt run ending 2018-06-02
        FLAGS.load_hyperparams = ('hyperparams/classification/2018-05-26-01-22-00_inception_resnet_v2_classifier_model-'
                                  '_img_inception_resnet_v2_vec_dense_trunk_vgg_conv_block-dataset_cornell_grasping-grasp_success_hyperparams.json')
    FLAGS.epochs = 80
    FLAGS.fine_tuning_epochs = 0
    # 8 training folds
    FLAGS.num_train = 8
    # 1 validation fold, this is automatically
    # changed below for the k_fold case
    FLAGS.num_validation = 1
    FLAGS.num_test = 1

    # This only means that the first part of the run is not fine tuning
    # The later fine_tuning_epochs will work correctly.
    FLAGS.fine_tuning = False
    print('Classification Training on grasp_success is about to begin. '
          'This mode overrides some command line parameters so to change them '
          'you will need to modify cornell_grasp_train_classification.py directly.')

    hyperparams = hypertree_utilities.load_hyperparams_json(
        FLAGS.load_hyperparams, fine_tuning=FLAGS.fine_tuning,
        learning_rate=FLAGS.learning_rate,
        feature_combo_name=feature_combo)

    # keep feature_combo_name from causing errors if it is present or not present
    if feature_combo not in hyperparams:
        hyperparams['feature_combo_name'] = feature_combo

    if 'k_fold' in FLAGS.pipeline_stage:
        FLAGS.num_validation = 2
        FLAGS.num_test = 0
        hypertree_train.train_k_fold(
            problem_name=problem_type,
            # feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            split_type='objectwise',
            **hyperparams)
        hypertree_train.train_k_fold(
            problem_name=problem_type,
            # feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            split_type='imagewise',
            **hyperparams)
    else:
        hypertree_train.run_training(
            problem_name=problem_type,
            # feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            **hyperparams)
コード例 #4
0
def main(_):
    use_best_model = True
    load_best_weights = True
    # a bit hacky pseudo-eval on training data
    eval_on_training_data = False
    # problem_type = 'semantic_translation_regression'
    problem_type = 'semantic_rotation_regression'
    # problem_type = 'semantic_grasp_regression'
    # problem_type2 = 'semantic_grasp_regression'
    problem_type2 = None
    feature_combo = 'image_preprocessed'
    # Override some default flags for this configuration
    # see other configuration in hypertree_train.py choose_features_and_metrics()
    FLAGS.problem_type = problem_type
    FLAGS.feature_combo = feature_combo
    FLAGS.crop_to = 'image_contains_grasp_box_center'
    # uncomment when running on combined block only + block and plush datasets
    # FLAGS.costar_filename_base = 'costar_combined_block_plush_stacking_v0.4_success_only'
    FLAGS.costar_filename_base = 'costar_block_stacking_v0.4_success_only'
    load_weights = None
    if FLAGS.load_hyperparams is None:
        # Results from classification hyperparameter run
        # FLAGS.load_hyperparams = ('~/datasets/logs/hyperopt_logs_cornell/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success/'
        #                           '2018-02-23-09-35-21_-vgg_dense_model-dataset_cornell_grasping-grasp_success_hyperparams.json')

        # Results from first regression hyperparameter run
        # FLAGS.load_hyperparams = ('~/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-01-15-12-20_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-01-15-12-20_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Just try out NasNet directly without hyperopt (it didn't work well on 2017-03-04)
        # FLAGS.load_hyperparams = ('~/src/costar_ws/src/costar_plan/costar_google_brainrobotdata/nasnet_large.json')

        # decent, but didn't run kfold 2018-03-05, + 2018-03-07 trying with mae
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-03-16-33-06_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-03-16-33-06_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Old best first epoch on hyperopt run 2018-03-06:
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-06-00-20-24_-vgg19_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-06-00-20-24_-vgg19_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Current best performance with mae on val + test 2018-03-07, haven't tried on kfold yet 2018-03-06
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # Best first and last epoch on hyperopt run 2018-03-08
        # FLAGS.load_hyperparams = ('~/.keras/datasets/logs/hyperopt_logs_cornell_regression/'
        #                           '2018-03-07-18-36-17_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6/'
        #                           '2018-03-07-18-36-17_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')
        # FLAGS.load_hyperparams = (r'C:/Users/Varun/JHU/LAB/Projects/costar_plan/costar_google_brainrobotdata/hyperparams/regression/2018-03-01-15-12-20_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')

        # FLAGS.load_hyperparams = ('hyperparams/regression/'
        #                           '2018-03-05-23-05-07_-vgg_regression_model-dataset_cornell_grasping-norm_sin2_cos2_hw_yx_6_hyperparams.json')
        # 2018-06-28 hyperopt 324 model results on actual stacking dataset, may not be a great run... .25 m error. Rerun?
        # FLAGS.load_hyperparams = ('hyperparams/semantic_grasp_regression/2018-06-25-03-45-46_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json')

        # 2018-06-29 hyperopt best of 65 models on val_cart_error in hyperopt, the vgg model above reliably does better.
        # 1 epoch results: val angle error (rad): 0.215976331639 val cart error (m): 0.126106130658
        # 120 epoch results: val cart error 0.25... much worse than the original 1 epoch.
        # FLAGS.load_hyperparams = ('hyperparams/semantic_grasp_regression/2018-06-28-21-16-47_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json')

        # 2018-6-30 hyperop 2nd best of 120 models on val_cart error in hyperopt
        # FLAGS.load_hyperparams = ('hyperopt_logs_costar_grasp_regression/2018-06-28-15-45-13_inception_resnet_v2_semantic_grasp_regression_model-_img_inception_resnet_v2_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-06-28-15-45-13_inception_resnet_v2_semantic_grasp_regression_model-_img_inception_resnet_v2_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json')

        # 2018-07-26 #1 COORDCONV applied to initial input image, 2nd best best performing overall of about 700 hyperopt results (ABOUT TO BE RUN)
        # FLAGS.load_hyperparams = ('hyperopt_logs_costar_grasp_regression/2018-07-26-19-40-57_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-26-19-40-57_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json')

        # 2018-07-07 best performing non-vgg model with 15% val and test grasp accracy for translations with semantic_translation_regression case.
        # FLAGS.load_hyperparams = 'hyperparams/semantic_grasp_regression/2018-07-06-22-34-31_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'

        if (problem_type == 'semantic_translation_regression' or problem_type
                == 'semantic_grasp_regression') and use_best_model:
            # 2018-08-12 CURRENT SECOND BEST MODEL FOR TRANSLATION, 22% translation only validation accuracy
            # FLAGS.load_hyperparams = 'hyperparams/semantic_grasp_regression/2018-07-06-22-34-31_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'
            # 2018-07-08 BEST of ~400 or maybe the one 2017-07-07?, 30% translation only validation accuracy, 17.5% combined translation + rotation validation accuracy.
            # BEST MODEL Results in: ./logs_cornell/2018-07-09-09-08-15_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3
            FLAGS.load_hyperparams = 'hyperparams/semantic_grasp_regression/2018-07-07-15-05-32_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'
            # weights for TRANSLATION PROBLEM ONLY:
            if problem_type == 'semantic_translation_regression' and load_best_weights:
                # load_weights = None
                # use these weights if xyz is input but not axis/angle data
                load_weights = 'logs_cornell/2018-09-26-21-31-00_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3/2018-09-26-21-31-00_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-341-val_loss-0.000-val_cart_error-0.023.h5'
                FLAGS.initial_epoch = 245 + 146 + 341  # from continuing runs below
                # load_weights = 'logs_cornell/2018-09-26-21-31-00_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3/2018-09-26-21-31-00_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-245-val_loss-0.000-val_cart_error-0.024.h5'

#                FLAGS.initial_epoch = 391 # 245+146 from two continuing runs below
# load_weights = './logs_cornell/2018-09-25-18-29-20_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3/2018-09-25-18-29-20_train_0.4_gripper_center-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-146-val_loss-0.000-val_cart_error-0.026.h5'
# load_weights = './logs_cornell/2018-09-15-21-10-39_train_no_augmentation-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-003-val_loss-0.000-val_cart_error-0.031.h5'
# load_weights = './logs_cornell/2018-08-29-22-47-40_train_v0.3-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3/2018-08-29-22-47-40_train_v0.3-nasnet_mobile_semantic_translation_regression_model--dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-230-val_loss-0.000-val_grasp_acc-0.289.h5'
# in the past weights were disabled due to change in input vector to include angle. See commits 785eaf4a4501f2e6532ed59b0972d7b1aaa5784e, b78c3e558567c4b3388a99786549d23a2a1e060c, and 19f274c0d77deff533175692046e424165b821df
# weights from 2018-07-31-21-40-50 are technically the best for val_grasp_acc (1cm), but newer weights to better with respect to other metrics like val_cart_error and other grasp acc distance metrics
# load_weights = './logs_cornell/2018-07-31-21-40-50_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3/2018-07-31-21-40-50_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-018-val_loss-0.000-val_grasp_acc-0.300.h5'
# load_weights = './logs_cornell/2018-07-30-21-47-16_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3/2018-07-30-21-47-16_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-016-val_loss-0.000-val_grasp_acc-0.273.h5'
# load_weights = './logs_cornell/2018-07-09-09-08-15_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3/2018-07-09-09-08-15_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-115-val_loss-0.000-val_grasp_acc-0.258.h5'
# use these weights for both xyz and axis angle input data
# Be careful if loading the weights below, the correct vector input data and backwards compatibility code must be in place to avoid:
# "ValueError: You are trying to load a weight file containing 13 layers into a model with 11 layers."
# load_weights = './logs_cornell/2018-08-09-11-26-03_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3/2018-08-09-11-26-03_nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-003-val_loss-0.000-val_grasp_acc-0.160.h5'
# weights below are trained with data augmentation, weights 2018-07-31-21-40-50 above are actual best so far for translation as of 2018-08-12
# load_weights = 'logs_cornell/2018-08-12-22-45-00_train_200_epochs-nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3/2018-08-12-22-45-00_train_200_epochs-nasnet_mobile_semantic_translation_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_3-epoch-198-val_loss-0.000-val_grasp_acc-0.158.h5'
# weights for TRANSLATION + ROTATION PROBLEM:
            if problem_type == 'semantic_grasp_regression' and load_best_weights:
                # same hyperparams as above, just also the same folder as the weights below it
                # FLAGS.load_hyperparams = './logs_cornell/2018-07-11-14-01-56_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-11-14-01-56_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'
                load_weights = './logs_cornell/2018-07-11-14-01-56_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-11-14-01-56_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8-epoch-018-val_loss-0.027-val_grasp_acc-0.175.h5'

        # 0.6 val_grasp_acc, hyperopt results placed this as #1 of 700 models for val_grasp_acc in 1 epoch
        # FLAGS.load_hyperparams = 'hyperopt_logs_costar_grasp_regression/2018-07-22-07-15-09_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_block_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-22-07-15-09_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_block_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'

        # not so great, was top nasnset_mobile model of 700 models. (0.1 or 0.7 accuracy after 120 epochs, not that great, slow runtime)
        # FLAGS.load_hyperparams = 'hyperopt_logs_costar_grasp_regression/2018-07-22-16-59-27_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-22-16-59-27_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'

        # 2018-07-23 best nasnet image model hyperopt, #6 result of 730 models (ABOUT TO BE RUN)
        # FLAGS.load_hyperparams = 'hyperopt_logs_costar_grasp_regression/2018-07-21-18-59-02_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-21-18-59-02_nasnet_mobile_semantic_grasp_regression_model-_img_nasnet_mobile_vec_dense_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'

        # 2018-07-30 maybe worth a try, vgg model which did very well on rotation, #4 of 729, and fairly good on both val_grasp_acc and val_cart_error
        # FLAGS.load_hyperparams = 'hyperopt_logs_costar_grasp_regression/2018-07-24-07-43-45_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_block_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8/2018-07-24-07-43-45_vgg_semantic_grasp_regression_model-_img_vgg_vec_dense_block_trunk_vgg_conv_block-dataset_costar_block_stacking-grasp_goal_xyz_aaxyz_nsc_8_hyperparams.json'

        if problem_type == 'semantic_rotation_regression' and use_best_model:
            # 2018-08-12 BEST ROTATION MODEL (#5 of 730 models in hyperopt, but #1 after long term training) for rotation 58% val accuracy for angles within 15 degrees.
            FLAGS.load_hyperparams = 'hyperparams/semantic_rotation_regression/2018-08-09-03-05-18_train_200_epochs-vgg_semantic_rotation_regression_model-_img_vgg_vec_dense_block_trunk_nasnet_normal_a_cell-dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5_hyperparams.json'
            if load_best_weights:
                # TODO(ahundt) 2018-09-25 the next line is not actually the current best weights, but we are training with a new configuration so that's what we will load for now
                load_weights = 'logs_cornell/2018-09-25-23-51-23_train_0.4_gripper_center_rot-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5/2018-09-25-23-51-23_train_0.4_gripper_center_rot-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5-epoch-080-val_loss-0.003-val_angle_error-0.550.h5'
                FLAGS.initial_epoch = 144
                # load_weights = 'hyperopt_logs_costar_rotation_regression/2018-09-04-20-17-25_train_v0.4_msle-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5/2018-09-04-20-17-25_train_v0.4_msle-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5-epoch-412-val_loss-0.002-val_angle_error-0.279.h5'
                # FLAGS.initial_epoch = 413
                # load_weights = 'hyperopt_logs_costar_rotation_regression/2018-08-31-20-35-15_train_v0.4_msle-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5/2018-08-31-20-35-15_train_v0.4_msle-vgg_semantic_rotation_regression_model--dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5-epoch-237-val_loss-0.002-val_angle_error-0.281.h5'
                # FLAGS.initial_epoch = 238
                # load_weights = 'hyperopt_logs_costar_block_stacking_train_ranked_regression/2018-08-10-06-55-09_train_200_epochs-vgg_semantic_rotation_regression_model-_img_vgg_vec_dense_block_trunk_nasnet_normal_a_cell-dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5/2018-08-10-06-55-09_train_200_epochs-vgg_semantic_rotation_regression_model-_img_vgg_vec_dense_block_trunk_nasnet_normal_a_cell-dataset_costar_block_stacking-grasp_goal_aaxyz_nsc_5-epoch-041-val_loss-0.007-val_grasp_acc-0.581.h5'
            FLAGS.random_augmentation = None

    FLAGS.epochs = 600
    FLAGS.batch_size = 32
    optimizer_name = 'sgd'
    # FLAGS.crop_height = 480
    # FLAGS.crop_width = 640
    # FLAGS.resize_height = 480
    # FLAGS.resize_width = 640
    # print('Note: special overrides have been applied '
    #       'for an experiment. '
    #       'crop + resize width/height have been set to 640x480.')
    # FLAGS.log_dir = r'C:/Users/Varun/JHU/LAB/Projects/costar_plan/costar_google_brainrobotdata/hyperparams/'
    # FLAGS.data_dir = r'C:/Users/Varun/JHU/LAB/Projects/costar_block_stacking_dataset_v0.4/*success.h5f'

    FLAGS.data_dir = os.path.expanduser(
        '~/.keras/datasets/costar_block_stacking_dataset_v0.4/blocks_only/')
    # FLAGS.data_dir = os.path.expanduser('~/.keras/datasets/costar_block_stacking_dataset_v0.3/')
    FLAGS.fine_tuning_epochs = 40
    print(
        'Regression Training on costar block stacking is about to begin. '
        'It overrides some command line parameters including '
        'training on mae loss so to change them '
        'you will need to modify cornell_grasp_train_regression.py directly.')

    dataset_name = 'costar_block_stacking'
    FLAGS.dataset_name = dataset_name

    print('-' * 80)
    print('Training with hyperparams from: ' + str(FLAGS.load_hyperparams))
    learning_rate = FLAGS.learning_rate

    hyperparams = hypertree_utilities.load_hyperparams_json(
        FLAGS.load_hyperparams,
        FLAGS.fine_tuning,
        learning_rate,
        feature_combo_name=feature_combo)

    print('n' + str(hyperparams))
    print('-' * 80)

    # TODO: remove loss if it doesn't work or make me the default in the other files if it works really well
    if 'rotation' in problem_type:
        # rotation does much better with msle over mse
        hyperparams['loss'] = 'msle'
    elif 'translation' in problem_type:
        # translation does slightly better with mse over msle
        hyperparams['loss'] = 'mse'
    elif 'grasp' in problem_type:
        hyperparams['loss'] = 'mse'
    else:
        raise ValueError(
            'costar_block_stacking_train_regression.py update the train config file, '
            'unsupported problem type: ' + problem_type)

    # save weights at checkpoints as the model's performance improves
    hyperparams['checkpoint'] = True
    hyperparams['batch_size'] = FLAGS.batch_size
    # ------------------------------------
    # temporary 0 learning rate for eval!
    if eval_on_training_data:
        print(
            'EVAL on training data (well, a slightly hacky version) with 0 LR 0 dropout trainable False, no learning rate schedule'
        )
        learning_rate = 0.000000000001
        hyperparams['dropout_rate'] = 0.000000000001
        # TODO(ahundt) it seems set_trainable_layers in hypertree_model.py has a bug?
        # hyperparams['trainable'] = 0.00000000001
        FLAGS.learning_rate_schedule = 'none'
    else:
        print('manual initial 1.0 learning rate override applied')
        learning_rate = 1.0
    #------------------------------------
    # learning_rate = 1.0
    if load_weights is not None:
        FLAGS.load_weights = load_weights
        # For training from scratch
        # learning_rate = 1.0
        # For resuming translation + rotation model training
        # learning_rate = 1e-2
        # For resuming translation model training
        # learning_rate = 1e-3
    # hyperparams['trainable'] = True

    # override all learning rate settings to make sure
    # it is consistent with any modifications made above
    hyperparams['learning_rate'] = learning_rate
    FLAGS.learning_rate = learning_rate
    FLAGS.fine_tuning_learning_rate = learning_rate

    if 'k_fold' in FLAGS.pipeline_stage:
        hypertree_train.train_k_fold(problem_name=problem_type,
                                     feature_combo_name=feature_combo,
                                     hyperparams=hyperparams,
                                     split_type='objectwise',
                                     dataset_name=dataset_name,
                                     **hyperparams)
        hypertree_train.train_k_fold(problem_name=problem_type,
                                     feature_combo_name=feature_combo,
                                     hyperparams=hyperparams,
                                     split_type='imagewise',
                                     dataset_name=dataset_name,
                                     **hyperparams)
        hypertree_train.train_k_fold(problem_name=problem_type2,
                                     feature_combo_name=feature_combo,
                                     hyperparams=hyperparams,
                                     split_type='objectwise',
                                     dataset_name=dataset_name,
                                     **hyperparams)
        hypertree_train.train_k_fold(problem_name=problem_type2,
                                     feature_combo_name=feature_combo,
                                     hyperparams=hyperparams,
                                     split_type='imagewise',
                                     dataset_name=dataset_name,
                                     **hyperparams)
    else:
        print('\n---------------------\ntraining problem type: ' +
              str(problem_type) + '\n---------------------')
        hypertree_train.run_training(
            problem_name=problem_type,
            # feature_combo_name=feature_combo,
            hyperparams=hyperparams,
            dataset_name=dataset_name,
            optimizer_name=optimizer_name,
            load_weights=load_weights,
            hyperparameters_filename=FLAGS.load_hyperparams,
            **hyperparams)
        if problem_type2 is not None:
            print('\n---------------------\ntraining problem type2: ' +
                  str(problem_type2) + '\n---------------------')
            hypertree_train.run_training(
                problem_name=problem_type2,
                # feature_combo_name=feature_combo,
                hyperparams=hyperparams,
                dataset_name=dataset_name,
                optimizer_name=optimizer_name,
                hyperparameters_filename=FLAGS.load_hyperparams,
                **hyperparams)
コード例 #5
0
def main(_):
    csv_files = gfile.Glob(
        os.path.join(os.path.expanduser(FLAGS.log_dir), FLAGS.glob_csv))
    dataframe_list = []
    progress = tqdm(csv_files)
    for csv_file in progress:
        # progress.write('reading: ' + str(csv_file))
        try:
            dataframe = pandas.read_csv(csv_file, index_col=None, header=0)
            # add a filename column for this csv file's name
            dataframe['basename'] = os.path.basename(csv_file)
            dataframe['csv_filename'] = csv_file
            csv_dir = os.path.dirname(csv_file)
            hyperparam_filename = gfile.Glob(
                os.path.join(csv_dir, FLAGS.glob_hyperparams))

            # filter specific epochs
            if FLAGS.filter_epoch:
                dataframe = dataframe.loc[dataframe['epoch'] == FLAGS.epoch]

            if FLAGS.max_epoch is not None:
                dataframe = dataframe.loc[
                    dataframe['epoch'] <= FLAGS.max_epoch]

            if FLAGS.min_epoch is not None:
                dataframe = dataframe.loc[
                    dataframe['epoch'] >= FLAGS.min_epoch]

            # manage hyperparams
            if len(hyperparam_filename) > 1:
                progress.write(
                    'Unexpectedly got more than hyperparam file match, '
                    'only keeping the first one: ' + str(hyperparam_filename))
            if hyperparam_filename:
                hyperparam_filename = hyperparam_filename[0]
            else:
                progress.write('No hyperparameters in directory, skipping: ' +
                               str(csv_dir))
                continue
            dataframe['hyperparameters_filename'] = hyperparam_filename
            if FLAGS.load_hyperparams and len(hyperparam_filename) > 0:
                hyperparams = hypertree_utilities.load_hyperparams_json(
                    hyperparam_filename)
                for key, val in six.iteritems(hyperparams):
                    dataframe[key] = val

            # accumulate the data
            dataframe_list.append(dataframe)
        except pandas.io.common.EmptyDataError as exception:
            # Ignore empty files, it just means hyperopt got killed early
            pass

    results_df = pandas.DataFrame()
    results_df = pandas.concat(dataframe_list, ignore_index=True)
    results_df = results_df.sort_values(FLAGS.sort_by,
                                        ascending=FLAGS.ascending,
                                        kind='mergesort')
    if FLAGS.basename_contains is not None:
        # match rows where the basename contains the string specified in basename_contains
        results_df = results_df[results_df['basename'].str.contains(
            FLAGS.basename_contains)]
    # re-number the row indices according to the sorted order
    results_df = results_df.reset_index(drop=True)

    if FLAGS.filter_unique:
        results_df = results_df.drop_duplicates(subset='csv_filename')

    if FLAGS.print_results:
        with pandas.option_context('display.max_rows', None,
                                   'display.max_columns', None):
            print(results_df)

    if FLAGS.save_dir is None:
        FLAGS.save_dir = FLAGS.log_dir
    output_filename = os.path.join(FLAGS.save_dir, FLAGS.save_csv)
    results_df.to_csv(output_filename)
    print('Processing complete. Results saved to file: ' +
          str(output_filename))