Пример #1
0
def define_cnn_ops():
    global inc_noncol_gstep, inc_col_gstep
    global tf_optimize, tf_bump_optimize, tf_test_predictions, tf_bump_test_predictions
    global tf_loss, tf_bump_loss
    global tf_test_labels, tf_bump_test_labels

    noncol_global_step = tf.Variable(0, trainable=False)
    col_global_step = tf.Variable(0, trainable=False)

    inc_noncol_gstep = cnn_learner_naive.inc_gstep(noncol_global_step)
    inc_col_gstep = cnn_learner_naive.inc_gstep(col_global_step)

    tf_img_ids, tf_images, tf_labels = models_utils.build_input_pipeline(
        dataset_filenames['train_dataset'],
        config.BATCH_SIZE,
        shuffle=True,
        training_data=True,
        use_opposite_label=False,
        inputs_for_sdae=False)

    tf_bump_img_ids, tf_bump_images, tf_bump_labels = models_utils.build_input_pipeline(
        dataset_filenames['train_bump_dataset'],
        config.BATCH_SIZE,
        shuffle=True,
        training_data=True,
        use_opposite_label=True,
        inputs_for_sdae=False)

    tf_logits = cnn_learner_naive.logits(tf_images)
    tf_loss = cnn_learner_naive.calculate_loss(tf_logits, tf_labels)

    tf_bump_logits = cnn_learner_naive.logits(tf_bump_images)
    tf_bump_loss = cnn_learner_naive.calculate_loss(tf_bump_logits,
                                                    tf_bump_labels)

    tf_optimize, tf_grads_and_vars = cnn_learner_naive.cnn_optimizer.optimize_model_naive_no_momentum(
        tf_loss, noncol_global_step, tf.global_variables())
    tf_bump_optimize, _ = cnn_learner_naive.cnn_optimizer.optimize_model_naive_no_momentum(
        tf_bump_loss, col_global_step, tf.global_variables())

    tf_test_img_ids, tf_test_images, tf_test_labels = models_utils.build_input_pipeline(
        dataset_filenames['test_dataset'],
        config.BATCH_SIZE,
        shuffle=False,
        training_data=False,
        use_opposite_label=False,
        inputs_for_sdae=False)
    tf_bump_test_img_ids, tf_bump_test_images, tf_bump_test_labels = models_utils.build_input_pipeline(
        dataset_filenames['test_bump_dataset'],
        config.BATCH_SIZE,
        shuffle=False,
        training_data=False,
        use_opposite_label=True,
        inputs_for_sdae=False)

    tf_test_predictions = cnn_learner_naive.predictions_with_inputs(
        tf_test_images)
    tf_bump_test_predictions = cnn_learner_naive.predictions_with_inputs(
        tf_bump_test_images)
Пример #2
0
def test_if_input_pipeline_images_makes_sense_in_terms_of_direction_to_turn():
    # with random_flipping on

    session = tf.InteractiveSession()

    dir_to_save = 'test_if_inputs_correct'
    if dir_to_save and not os.path.exists(dir_to_save):
        os.mkdir(dir_to_save)

    tf_images, tf_labels = {},{}
    tf_bump_images, tf_bump_labels = {},{}

    for di,direct in enumerate(['left','straight','right']):
        _, tf_images[direct], tf_labels[direct] = models_utils.build_input_pipeline(
            ['..' + os.sep + 'data_indoor_1_1000' + os.sep + 'image-direction-0-%d.tfrecords'%di], 5, shuffle=True,
            training_data=True, use_opposite_label=False, inputs_for_sdae=False)

        _, tf_bump_images[direct], tf_bump_labels[direct] = models_utils.build_input_pipeline(
            ['..' + os.sep + 'data_indoor_1_bump_200' + os.sep + 'image-direction-0-%d.tfrecords' % di], 5, shuffle=True,
            training_data=True, use_opposite_label=True, inputs_for_sdae=False)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=session)

    img_indices = [0 for _ in range(3)]
    loggers = []
    for di, direct in enumerate(['left', 'straight', 'right']):
        dir_to_save_direct = 'test_if_inputs_correct' + os.sep + direct
        if dir_to_save_direct and not os.path.exists(dir_to_save_direct):
            os.mkdir(dir_to_save_direct)

        testLogger = logging.getLogger('Logger')
        testLogger.setLevel(logging.INFO)
        testFH = logging.FileHandler(dir_to_save_direct + os.sep + 'test_labels.log', mode='w')
        testFH.setFormatter(logging.Formatter(logging.INFO))
        testFH.setLevel(logging.INFO)
        testLogger.addHandler(testFH)
        loggers.append(testLogger)

        for _ in range(10):
            imgs, labels = session.run([tf_images[direct], tf_labels[direct]])
            bump_imgs, bump_labels = session.run([tf_bump_images[direct], tf_bump_labels[direct]])

            for img, lbl in zip(imgs,labels):
                filename = dir_to_save_direct + os.sep + 'test_img_%d.jpg'%img_indices[di]
                img = (img - np.min(img))
                img /= np.max(img)
                imsave(filename,img)

                #loggers[di].info('%d , %s'%(img_indices[di],lbl.tolist()))
                img_indices[di] += 1

    coord.request_stop()
    coord.join(threads)
Пример #3
0
def test_if_tensorflow_use_two_batches_to_calculate_two_things_in_same_session_dot_run():
    session = tf.InteractiveSession()

    tf_images, tf_labels = models_utils.build_input_pipeline(
        ['..' + os.sep + 'data_indoor_1_1000' + os.sep + 'image-direction-0-0.tfrecords','..' + os.sep + 'data_indoor_1_1000' + os.sep + 'image-direction-0-2.tfrecords'], 10, shuffle=True,
        training_data=True, use_opposite_label=False, inputs_for_sdae=False)

    def get_reduce_mean(tf_labels):
        return tf.reduce_mean(tf_labels,axis=[0])

    def get_identity(tf_labels):
        return tf_labels

    tf_red_mean = get_reduce_mean(tf_labels)
    tf_identity = get_identity(tf_labels)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=session)

    for _ in range(10):
        red_mean, labels = session.run([tf_red_mean,tf_identity])

    coord.request_stop()
    coord.join(threads)

    print(red_mean)
    print(labels)
def define_input_pipeline(dataset_filenames, shuffle):

    tf_img_ids, tf_images, tf_labels = models_utils.build_input_pipeline(
        dataset_filenames, config.BATCH_SIZE, shuffle=shuffle,
        training_data=True, use_opposite_label=False, inputs_for_sdae=False)

    return tf_img_ids, tf_images, tf_labels
Пример #5
0
def train_cnn_multiple_epochs(sess,
                              n_epochs,
                              test_interval,
                              dataset_filenames_dict,
                              dataset_size_dict,
                              train_fraction=1.0,
                              valid_fraction=1.0):

    n_print_prediction_steps = 10

    noncol_global_step = tf.Variable(0, trainable=False)
    inc_noncol_gstep = inc_gstep(noncol_global_step)

    tf_img_ids, tf_images, tf_labels = {}, {}, {}
    tf_loss, tf_logits = {}, {}
    tf_bump_loss, tf_bump_logits = {}, {}
    tf_optimize, tf_mom_update_ops, tf_grads = {}, {}, {}
    tf_bump_optimize, tf_bump_mom_update_ops, tf_bump_grads = {}, {}, {}
    tf_mock_labels = tf.placeholder(shape=[batch_size, 1], dtype=tf.float32)
    tf_grads_and_vars = {}
    tf_train_predictions = {}

    for direction in config.TF_DIRECTION_LABELS:

        tf_img_ids[direction], tf_images[direction], tf_labels[
            direction] = models_utils.build_input_pipeline(
                dataset_filenames_dict['train_dataset'][direction],
                batch_size,
                shuffle=True,
                training_data=False,
                use_opposite_label=False,
                inputs_for_sdae=False,
                rand_valid_direction_for_bump=False)

        tf_logits[direction] = logits(tf_images[direction], direction)
        temp = list(config.TF_DIRECTION_LABELS)
        temp.remove(direction)

        tf_bump_logits[direction], tf_bump_loss[direction] = {}, {}
        tf_bump_optimize[direction], tf_bump_mom_update_ops[direction] = {}, {}

        # =================================================
        # Defining Optimization for Opposite Direction
        # =================================================
        for opp_direction in temp:
            bump_var_list = []
            for v in tf.global_variables():

                if opp_direction in v.name and config.TF_MOMENTUM_STR not in v.name:
                    print(v.name)
                    bump_var_list.append(v)

            tf_bump_logits[direction][opp_direction] = logits(
                tf_images[direction], opp_direction)
            tf_bump_loss[direction][opp_direction] = calculate_loss(
                tf_bump_logits[direction][opp_direction], tf_mock_labels)
            tf_bump_optimize[direction][
                opp_direction], _ = cnn_optimizer.optimize_model_naive_no_momentum(
                    tf_bump_loss[direction][opp_direction],
                    noncol_global_step,
                    varlist=bump_var_list)

        tf_train_predictions[direction] = predictions_with_inputs(
            tf_images[direction])

        tf_loss[direction] = calculate_loss(tf_logits[direction],
                                            tf_mock_labels)

        var_list = []
        for v in tf.global_variables():

            if direction in v.name and config.TF_MOMENTUM_STR not in v.name:
                print(v.name)
                var_list.append(v)

        tf_optimize[direction], tf_grads_and_vars[
            direction] = cnn_optimizer.optimize_model_naive_no_momentum(
                tf_loss[direction], noncol_global_step, varlist=var_list)

    tf_valid_img_ids, tf_valid_images, tf_valid_labels = models_utils.build_input_pipeline(
        dataset_filenames_dict['valid_dataset'],
        batch_size,
        shuffle=True,
        training_data=False,
        use_opposite_label=False,
        inputs_for_sdae=False,
        rand_valid_direction_for_bump=False)
    tf_valid_predictions = predictions_with_inputs(tf_valid_images)

    tf_test_img_ids, tf_test_images, tf_test_labels = \
        models_utils.build_input_pipeline(dataset_filenames_dict['test_dataset'], batch_size,
                                          shuffle=False, training_data=False, use_opposite_label=False,
                                          inputs_for_sdae=False, rand_valid_direction_for_bump=False)

    tf_test_predictions = predictions_with_inputs(tf_test_images)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)

    tf.global_variables_initializer().run(session=sess)

    max_valid_accuracy = 0
    n_valid_saturated = 0
    valid_saturate_threshold = 3

    for epoch in range(n_epochs):

        print('=' * 80)
        print('Epoch ', epoch)
        print('=' * 80)

        avg_loss = []
        avg_train_accuracy = []

        # Training with Non-Bump Data
        for step in range(
                int(train_fraction * dataset_size_dict['train_dataset']) //
                batch_size):

            rand_direction = np.random.choice(config.TF_DIRECTION_LABELS)
            temp = list(config.TF_DIRECTION_LABELS)
            temp.remove(rand_direction)

            l1_noncol, _, pred, train_labels = \
                sess.run([tf_loss[rand_direction], tf_optimize[rand_direction],
                          tf_train_predictions[rand_direction], tf_labels[rand_direction]],
                         feed_dict={tf_mock_labels: np.ones(shape=(batch_size, 1), dtype=np.float32)})

            # Try doing negative suppresion for 2 times
            for _ in range(2):
                if 'hard-left' == rand_direction:
                    new_rand_direction = np.random.choice(
                        temp, p=[0.3, 0.3, 0.2, 0.2])
                elif 'soft-left' == rand_direction:
                    new_rand_direction = np.random.choice(
                        temp, p=[0.25, 0.25, 0.25, 0.25])
                elif 'soft-right' == rand_direction:
                    new_rand_direction = np.random.choice(
                        temp, p=[0.25, 0.25, 0.25, 0.25])
                elif 'hard-right' in rand_direction:
                    new_rand_direction = np.random.choice(
                        temp, p=[0.2, 0.2, 0.3, 0.3])
                else:
                    new_rand_direction = np.random.choice(temp)

                l1_col, _ = sess.run(
                    [
                        tf_bump_loss[rand_direction][new_rand_direction],
                        tf_bump_optimize[rand_direction][new_rand_direction],
                    ],
                    feed_dict={
                        tf_mock_labels:
                        np.zeros(shape=(batch_size, 1), dtype=np.float32)
                    })

            avg_loss.append((l1_col + l1_noncol) / 2.0)
            avg_train_accuracy.append(
                models_utils.accuracy(pred, train_labels, use_argmin=False))

            if step < n_print_prediction_steps:
                for pred, lbl in zip(pred, train_labels):
                    is_correct = np.argmax(pred) == np.argmax(lbl)
                    TrainLogger.info('\t%s;%s;%s', pred, lbl, is_correct)

        logger.info('\tAverage Loss for Epoch %d: %.5f' %
                    (epoch, np.mean(avg_loss)))
        logger.info('\t\t Training accuracy: %.3f' %
                    np.mean(avg_train_accuracy))

        valid_accuracy = []
        for step in range(
                int(valid_fraction * dataset_size_dict['valid_dataset']) //
                batch_size):
            vpred, vlabels = sess.run([tf_valid_predictions, tf_valid_labels])
            valid_accuracy.append(
                models_utils.accuracy(vpred, vlabels, use_argmin=False))

            if step < n_print_prediction_steps:
                for pred, lbl in zip(vpred, vlabels):
                    is_correct = np.argmax(pred) == np.argmax(lbl)
                    ValidLogger.info('\t%s;%s;%s', pred, lbl, is_correct)

        logger.info('\tValid Accuracy: %.3f', np.mean(valid_accuracy))

        if np.mean(valid_accuracy) > max_valid_accuracy:
            max_valid_accuracy = np.mean(valid_accuracy)
        else:
            n_valid_saturated += 1
            logger.info('Increase n_valid_saturated to %d', n_valid_saturated)

        if n_valid_saturated >= valid_saturate_threshold:
            logger.info('Stepping down collision learning rate')
            sess.run(inc_noncol_gstep)
            n_valid_saturated = 0

        if (epoch + 1) % test_interval == 0:

            test_results = test_the_model_5_way(sess, tf_test_labels,
                                                tf_test_predictions,
                                                dataset_size_dict)

            soft_test_accuracy = test_results['noncol-accuracy-soft']
            test_accuracy = test_results['noncol-accuracy-hard']
            test_noncol_precision = test_results['noncol-precision']
            test_noncol_recall = test_results['noncol-recall']

            noncol_precision_string = ''.join([
                '%.3f,' % test_noncol_precision[pi]
                for pi in range(config.TF_NUM_CLASSES)
            ])
            noncol_recall_string = ''.join([
                '%.3f,' % test_noncol_recall[ri]
                for ri in range(config.TF_NUM_CLASSES)
            ])

            SummaryLogger.info('%d;%.3f;%.3f;%.5f;%.5f;%s;%s', epoch,
                               np.mean(test_accuracy),
                               np.mean(soft_test_accuracy), np.mean(avg_loss),
                               -1, noncol_precision_string,
                               noncol_recall_string)

    coord.request_stop()
    coord.join(threads)
Пример #6
0
    accuracy_logger = logging.getLogger('AccuracyLogger')
    accuracy_logger.setLevel(logging.INFO)
    accuracyFH = logging.FileHandler(IMG_DIR + os.sep + 'accuracy.log', mode='w')
    accuracyFH.setFormatter(logging.Formatter('%(message)s'))
    accuracyFH.setLevel(logging.INFO)
    accuracy_logger.addHandler(accuracyFH)
    accuracy_logger.info('#Epoch:Accuracy:Accuracy(Soft):Bump Accuracy: Bump Accuracy (Soft)')

    batch_size = 25
    with sess.as_default() and graph.as_default():
        build_tensorflw_variables()
        models_utils.set_from_main(sess,graph,logger)

        global_step = tf.Variable(0,trainable=False)
        tf_images,tf_labels = models_utils.build_input_pipeline(dataset_filenames['train_dataset'], batch_size,shuffle=True,
                                                   training_data=True,use_opposite_label=False,inputs_for_sdae=True)
        tf_bump_images, tf_bump_labels = models_utils.build_input_pipeline(dataset_filenames['train_bump_dataset'], batch_size, shuffle=True,
                                                              training_data=True, use_opposite_label=True,inputs_for_sdae=True)
        tf_test_images,tf_test_labels = models_utils.build_input_pipeline(dataset_filenames['test_dataset: '],batch_size,shuffle=False,
                                                             training_data=False,use_opposite_label=False,inputs_for_sdae=True)
        tf_bump_test_images, tf_bump_test_labels = models_utils.build_input_pipeline(dataset_filenames['test_bump_dataset'], batch_size, shuffle=False,
                                                              training_data=False,use_opposite_label=True,inputs_for_sdae=True)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        tf_loss = calculate_loss(tf_images, tf_labels)
        tf_bump_loss = calculate_loss(tf_bump_images,tf_bump_labels)

        tf_optimize = optimize_model(tf_loss,global_step,increment_global_step=True)
        tf_bump_optimize = optimize_model(tf_bump_loss,global_step,increment_global_step=False)
Пример #7
0
def get_images_predictions_labels_3_way(main_dir,env):


    test_sub_dir = 'data-equal'
    test_dataset_filename = 'data-chunk-0.tfrecords'

    dataset_filenames_dict = {
        'test_dataset': ['..' + os.sep + env + os.sep + test_sub_dir + os.sep + test_dataset_filename],
        'test_bump_dataset': ['..' + os.sep + env + os.sep + test_sub_dir + os.sep + test_dataset_filename]}
    batch_size = 10

    configp = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    sess = tf.InteractiveSession(config=configp)

    tf_test_img_ids, tf_test_images, tf_test_labels = \
        models_utils.build_input_pipeline(dataset_filenames_dict['test_dataset'], batch_size,
                                          shuffle=False, training_data=False, use_opposite_label=False,
                                          inputs_for_sdae=False, rand_valid_direction_for_bump=False)
    tf_bump_test_img_ids, tf_bump_test_images, tf_bump_test_labels = models_utils.build_input_pipeline(
        dataset_filenames_dict['test_bump_dataset'], batch_size, shuffle=False,
        training_data=False, use_opposite_label=True, inputs_for_sdae=False, rand_valid_direction_for_bump=False)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)

    weights_filepath = main_dir + os.sep + config.WEIGHT_SAVE_DIR + os.sep + 'cnn-model-final.ckpt'
    hyperparam_filepath = main_dir + os.sep + config.WEIGHT_SAVE_DIR + os.sep + 'hyperparams-final.pickle'

    # saver = tf.train.import_meta_graph(weights_filepath+'.meta')
    if config.ACTIVATION_MAP_DIR and not os.path.exists(main_dir + os.sep + config.ACTIVATION_MAP_DIR):
        os.mkdir(main_dir + os.sep + config.ACTIVATION_MAP_DIR)

    with sess.as_default():
        cnn_model_visualizer.cnn_create_variables_multiple_with_scope_size_stride(hyperparam_filepath)
        saver = tf.train.Saver()
        print('Restoring weights from file: ', weights_filepath)
        saver.restore(sess, weights_filepath)

        v_list = sess.run(tf.global_variables())
        print('Restored following variables')
        print('=' * 80)
        print([v.name for v in tf.global_variables()])
        print('=' * 80)

        tf_predictions = cnn_learner_multiple.predictions_with_inputs(tf_test_images)

        all_test_images, all_predictoins, all_labels = None,None,None
        for step in range(50):
            np_test_images, predictions, actuals = sess.run([tf_test_images,tf_predictions,tf_test_labels])
            if all_test_images is None and all_predictoins is None:
                all_test_images = np.asarray(normalize_image_batch(np_test_images))
                all_predictoins = np.asarray(predictions)
                all_labels = np.asarray(actuals)
            else:
                all_test_images = np.append(all_test_images,normalize_image_batch(np_test_images),axis=0)
                all_predictoins = np.append(all_predictoins,predictions,axis=0)
                all_labels = np.append(all_labels, actuals,axis=0)

    print('Got data of following sizes')
    print(all_test_images.shape)
    print(all_predictoins.shape)
    print(all_labels.shape)
    return all_test_images,all_predictoins,all_labels
def train_using_all_data():

    dataset_filenames = {
        'train_dataset': ['..' + os.sep + 'data_indoor_1_1000' + os.sep + 'image-direction-shuffled.tfrecords'],
        'train_bump_dataset': [
            '..' + os.sep + 'data_indoor_1_bump_200' + os.sep + 'image-direction-shuffled.tfrecords'],
        'test_dataset': ['..' + os.sep + 'data_grande_salle_1000' + os.sep + 'image-direction-shuffled.tfrecords'],
        'test_bump_dataset': [
            '..' + os.sep + 'data_grande_salle_bump_200' + os.sep + 'image-direction-shuffled.tfrecords']
    }

    dataset_sizes = {'train_dataset': 1000 + 1000,
                     'train_bump_dataset': 400,
                     'test_dataset': 1000,
                     'test_bump_dataset': 200}

    with sess.as_default() and graph.as_default():
        cnn_variable_initializer.set_from_main(sess, graph)
        cnn_variable_initializer.build_tensorflw_variables_naive()
        models_utils.set_from_main(sess, graph, logger)

        all_train_files, all_bump_files = [], []
        all_test_files, all_bump_test_files = [], []

        all_train_files = dataset_filenames['train_dataset']
        all_bump_files = dataset_filenames['train_bump_dataset']
        all_test_files = dataset_filenames['test_dataset']
        all_bump_test_files = dataset_filenames['test_bump_dataset']

        tf_img_ids, tf_images, tf_labels = models_utils.build_input_pipeline(
            all_train_files, config.BATCH_SIZE, shuffle=True,
            training_data=True, use_opposite_label=False, inputs_for_sdae=False)

        tf_bump_img_ids, tf_bump_images, tf_bump_labels = models_utils.build_input_pipeline(
            all_bump_files, config.BATCH_SIZE, shuffle=True,
            training_data=True, use_opposite_label=True, inputs_for_sdae=False)

        tf_test_img_ids, tf_test_images, tf_test_labels = models_utils.build_input_pipeline(all_test_files,
                                                                                            config.BATCH_SIZE,
                                                                                            shuffle=False,
                                                                                            training_data=False,
                                                                                            use_opposite_label=False,
                                                                                            inputs_for_sdae=False)
        tf_bump_test_img_ids, tf_bump_test_images, tf_bump_test_labels = models_utils.build_input_pipeline(
            all_bump_test_files, config.BATCH_SIZE, shuffle=False,
            training_data=False, use_opposite_label=True, inputs_for_sdae=False)

        print('\t\tAverage test accuracy: %.5f ' % np.mean(test_accuracy))
        print('\t\tAverage test accuracy(soft): %.5f' % np.mean(soft_test_accuracy))
        print('\t\tAverage test precision: %s', test_noncol_precision)
        print('\t\tAverage test recall: %s', test_noncol_recall)

        print('\t\tAverage bump test accuracy: %.5f ' % np.mean(bump_test_accuracy))
        print('\t\tAverage bump test (soft) accuracy: %.5f ' % np.mean(bump_soft_accuracy))
        print('\t\tAverage test bump precision: %s', test_col_precision)
        print('\t\tAverage test bump recall: %s', test_col_recall)

        noncol_precision_string = ''.join(['%.3f;' % test_noncol_precision[pi] for pi in range(3)])
        noncol_recall_string = ''.join(['%.3f;' % test_noncol_recall[ri] for ri in range(3)])
        col_precision_string = ''.join(['%.3f;' % test_col_precision[pi] for pi in range(3)])
        col_recall_string = ''.join(['%.3f;' % test_col_recall[ri] for ri in range(3)])

        SummaryLogger.info('%d;%.3f;%.3f;%.3f;%.3f;%.5f;%.5f;%s;%s;%s;%s', epoch, np.mean(test_accuracy),
                             np.mean(soft_test_accuracy),
                             np.mean(bump_test_accuracy), np.mean(bump_soft_accuracy), np.mean(avg_loss),
                             np.mean(avg_bump_loss),
                             noncol_precision_string, noncol_recall_string, col_precision_string, col_recall_string)
def loop_through_by_using_every_dataset_as_holdout_dataset(main_dir,n_epochs):
    global min_thresh,max_thresh

    hold_out_list = ['apartment-my1-2000', 'apartment-my2-2000', 'apartment-my3-2000',
                     'indoor-1-2000', 'indoor-1-my1-2000', 'grande_salle-my1-2000',
                     'grande_salle-my2-2000', 'sandbox-2000']

    for hold_index, hold_name in enumerate(hold_out_list):
        sub_dir = main_dir + os.sep + hold_name

        if not os.path.exists(sub_dir):
            os.mkdir(sub_dir)
        print_start_of_new_input_pipline_to_all_loggers('Using %s as holdout set' % hold_name)

        dataset_filenames, dataset_sizes = dataset_name_factory.new_get_train_test_data_with_holdout(hold_index)

        dsize = dataset_sizes['train_dataset']

        validdsize = dataset_sizes['valid_dataset']
        testdsize = dataset_sizes['test_dataset']
        bumptestdsize = dataset_sizes['test_bump_dataset']

        with open(sub_dir + os.sep + 'dataset_filenames_and_sizes.txt', 'w') as f:
            for k, v in dataset_filenames.items():
                f.write(str(k) + ":" + str(v))
                f.write('\n')

            for k, v in dataset_sizes.items():
                f.write(str(k) + ":" + str(v))
                f.write('\n')

        tf.reset_default_graph()
        configp = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
        sess = tf.InteractiveSession(config=configp)

        tf_img_ids, tf_images, tf_labels = {},{},{}
        for direct in ['left','straight','right']:
            tf_img_ids[direct], tf_images[direct], tf_labels[direct] = models_utils.build_input_pipeline(
                dataset_filenames['train_dataset'][direct], config.BATCH_SIZE, shuffle=True,
                training_data=True, use_opposite_label=False, inputs_for_sdae=False, rand_valid_direction_for_bump=False)

        tf_valid_img_ids, tf_valid_images, tf_valid_labels = models_utils.build_input_pipeline(
            dataset_filenames['valid_dataset'], config.BATCH_SIZE, shuffle=True,
            training_data=True, use_opposite_label=False, inputs_for_sdae=False, rand_valid_direction_for_bump=False)

        tf_test_img_ids, tf_test_images, tf_test_labels = models_utils.build_input_pipeline(
            dataset_filenames['test_dataset'],
            config.BATCH_SIZE,
            shuffle=False,
            training_data=False,
            use_opposite_label=False,
            inputs_for_sdae=False, rand_valid_direction_for_bump=False)

        tf_bump_test_img_ids, tf_bump_test_images, tf_bump_test_labels = models_utils.build_input_pipeline(
            dataset_filenames['test_bump_dataset'], config.BATCH_SIZE, shuffle=False,
            training_data=False, use_opposite_label=True, inputs_for_sdae=False, rand_valid_direction_for_bump=False)

        with sess.as_default():
            cnn_variable_initializer.set_from_main(sess)
            cnn_variable_initializer.build_tensorflw_variables_naive()
            models_utils.set_from_main(sess, logger)

            output_activation = 'sigmoid'

            if output_activation == 'sigmoid':
                max_thresh = 0.6
                min_thresh = 0.4
            else:
                raise NotImplementedError

            train_results, test_results = train_with_non_collision(
                sess, tf_images, tf_labels, dsize,
                tf_valid_images, tf_valid_labels, validdsize,
                tf_test_images, tf_test_labels, tf_test_img_ids, testdsize,
                tf_bump_test_images, tf_bump_test_labels, tf_bump_test_img_ids, bumptestdsize,
                n_epochs, test_interval, sub_dir,
                include_bump_test_data=True, use_cross_entropy=True, activation=output_activation
            )

            sess.close()
            tf.reset_default_graph()
            graph = tf.Graph()
            configp = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
            sess = tf.InteractiveSession(graph=graph, config=configp)

        ep_hyp_dict = make_dict_with_hyperparameters()
        pretty_print_hyperparameters(logger)

        with sess.as_default() and graph.as_default():
            cnn_variable_initializer.set_from_main(sess)
            cnn_variable_initializer.build_tensorflw_variables_detached()
            models_utils.set_from_main(sess, logger)

            tf_train_img_ids, tf_train_images, tf_train_labels = cnn_learner_detached.define_input_pipeline(dataset_filenames)
            tf_test_img_ids, tf_test_images, tf_test_labels = models_utils.build_input_pipeline(
                dataset_filenames['test_dataset'], config.BATCH_SIZE, shuffle=True,
                training_data=False, use_opposite_label=False, inputs_for_sdae=False)

            cnn_learner_detached.define_tf_ops(tf_train_images, tf_train_labels, tf_test_images, tf_test_labels)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord, sess=sess)

            tf.global_variables_initializer().run(session=sess)

            dataset_filenames, dataset_sizes = dataset_name_factory.new_get_noncol_train_data_sorted_by_direction_noncol_test_data()

        for loc_ep in range(epochs_per_search):

            for step in range(dataset_sizes['train_dataset']//config.BATCH_SIZE):
                l1, _ = sess.run([cnn_learner_detached.tf_loss, cnn_learner_detached.tf_optimize],