def train_predictions_with_one_environment_data(env_idx, data_gen, dataset_sizes, sess):
    global logger
    global tf_train_predictions, tf_train_labels

    avg_accuracy, avg_soft_accuracy = [], []

    for step in range(dataset_sizes['train_dataset'][env_idx] // config.BATCH_SIZE):

        tr_img_id, tr_images, tr_labels = data_gen.sample_a_batch_from_data(env_idx, shuffle=False)

        feed_dict = {data_gen.tf_image_ph: tr_images, data_gen.tf_label_ph: tr_labels}
        train_predictions, train_actuals = sess.run([tf_train_predictions, tf_train_labels], feed_dict=feed_dict)

        assert train_predictions.shape[0] == train_actuals.shape[0]
        avg_soft_accuracy.append(
            models_utils.soft_accuracy(
                train_predictions, train_actuals,
                use_argmin=False, max_thresh=max_thresh,
                min_thresh=min_thresh
            )
        )

        avg_accuracy.append(models_utils.accuracy(train_predictions, train_actuals, use_argmin=False))

        if step < 2:
            logger.debug('Predictions for Non-Collided data')
            for pred, lbl in zip(train_predictions, train_actuals):
                logger.debug('\t%s;%s', pred, lbl)

    return {'accuracy': np.mean(avg_accuracy),'soft_accuracy':np.mean(avg_soft_accuracy)}
def test_predictions_with_one_environment_data(test_env_idx, test_data_gen, sess):
    global tf_test_predictions, tf_test_labels

    test_accuracy = []  # accuracy by argmax
    soft_test_accuracy = []  # accuracy if the prediction entry for the non-zero entry of the label exceeds max thres
    all_predictions, all_labels, all_img_ids, all_images = None, None, None, None
    test_image_index = 0

    for step in range(dataset_sizes['test_dataset'][test_env_idx] // config.BATCH_SIZE):

        ts_img_id, ts_images, ts_labels = test_data_gen.sample_a_batch_from_data(test_env_idx, shuffle=False)

        feed_dict = {test_data_gen.tf_image_ph: ts_images,
                     test_data_gen.tf_label_ph: ts_labels}

        predicted_labels, test_actuals = sess.run([tf_test_predictions, tf_test_labels],
                                                  feed_dict=feed_dict
                                                  )

        test_accuracy.append(models_utils.accuracy(predicted_labels, test_actuals, use_argmin=False))
        soft_test_accuracy.append(
            models_utils.soft_accuracy(predicted_labels, test_actuals, use_argmin=False, max_thresh=max_thresh,
                                       min_thresh=min_thresh))

        if all_predictions is None or all_labels is None:
            all_predictions = predicted_labels
            all_labels = test_actuals
            all_img_ids = ts_img_id
            all_images = ts_images

        else:
            all_predictions = np.append(all_predictions, predicted_labels, axis=0)
            all_labels = np.append(all_labels, test_actuals, axis=0)
            all_img_ids = np.append(all_img_ids, ts_img_id, axis=0)
            all_images = np.append(all_images, ts_images, axis=0)

        if step < 2:
            logger.debug('Test Predictions (Non-Collisions)')
            for pred, act in zip(predicted_labels, test_actuals):
                pred_string = ''.join(['%.3f' % p + ',' for p in pred.tolist()])
                act_string = ''.join(['%.1f' % a + ',' for a in act.tolist()])
                predictionlogger.info('%d:%s:%s', test_image_index, act_string, pred_string)
                if step < 2:
                    logger.debug('%d:%s:%s', test_image_index, act_string, pred_string)
                test_image_index += 1

    # Write predictions to log file
    testPredictionLogger.info('Predictions for Non-Collisions (Epoch %d)', epoch)
    test_image_index = 0
    for pred, act in zip(all_predictions, all_labels):
        pred_string = ''.join(['%.3f' % p + ',' for p in pred.tolist()])
        act_string = ''.join(['%.3f' % a + ',' for a in act.tolist()])
        testPredictionLogger.info('%d:%s:%s', test_image_index, act_string, pred_string)
        test_image_index += 1
    testPredictionLogger.info('\n')

    # Calculating Precisio Recall
    test_noncol_precision = models_utils.precision_multiclass(
        all_predictions, all_labels, use_argmin=False,
        max_thresh=max_thresh, min_thresh=min_thresh
    )
    test_noncol_recall = models_utils.recall_multiclass(
        all_predictions, all_labels, use_argmin=False,
        max_thresh=max_thresh, min_thresh=min_thresh
    )

    return {'accuracy':np.mean(test_accuracy), 'soft_accuracy':np.mean(soft_test_accuracy),
            'precision': test_noncol_precision, 'recall': test_noncol_recall}
Ejemplo n.º 3
0
def test_the_model_5_way(sess, tf_test_labels, tf_test_predictions,
                         dataset_size_dict):
    test_accuracy = []
    soft_test_accuracy = []
    all_predictions, all_labels = None, None
    test_image_index = 0
    for step in range(dataset_size_dict['test_dataset'] // batch_size):
        predicted_labels, actual_labels = sess.run(
            [tf_test_predictions, tf_test_labels])

        test_accuracy.append(
            models_utils.accuracy(predicted_labels,
                                  actual_labels,
                                  use_argmin=False))
        soft_test_accuracy.append(
            models_utils.soft_accuracy(predicted_labels,
                                       actual_labels,
                                       use_argmin=False,
                                       max_thresh=max_thresh,
                                       min_thresh=min_thresh))

        if all_predictions is None or all_labels is None:
            all_predictions = predicted_labels
            all_labels = actual_labels
        else:
            all_predictions = np.append(all_predictions,
                                        predicted_labels,
                                        axis=0)
            all_labels = np.append(all_labels, actual_labels, axis=0)

        if step < 10:
            for pred, act in zip(predicted_labels, actual_labels):
                pred_string = ''.join(
                    ['%.3f' % p + ',' for p in pred.tolist()])
                act_string = ''.join([str(int(a)) + ',' for a in act.tolist()])
                is_correct = np.argmax(pred) == np.argmax(act)
                TestLogger.info('%s:%s:%s', act_string, pred_string,
                                is_correct)
                if step < 5:
                    logger.debug('%s:%s:%s', act_string, pred_string,
                                 is_correct)

    print_start_of_new_input_pipline_to_some_logger(
        TestLogger, 'Accuracy for Above: %.3f (Hard) %.3f (Soft)' %
        (np.mean(test_accuracy), np.mean(soft_test_accuracy)))

    test_noncol_precision = models_utils.precision_multiclass(
        all_predictions,
        all_labels,
        use_argmin=False,
        max_thresh=max_thresh,
        min_thresh=min_thresh)
    test_noncol_recall = models_utils.recall_multiclass(all_predictions,
                                                        all_labels,
                                                        use_argmin=False,
                                                        max_thresh=max_thresh,
                                                        min_thresh=min_thresh)
    TestLogger.info('\n')
    print('\t\tAverage test accuracy: %.5f ' % np.mean(test_accuracy))
    print('\t\tAverage test accuracy(soft): %.5f' %
          np.mean(soft_test_accuracy))
    print('\t\tAverage test precision: %s', test_noncol_precision)
    print('\t\tAverage test recall: %s', test_noncol_recall)

    test_results = {}
    test_results['noncol-accuracy-hard'] = np.mean(test_accuracy)
    test_results['noncol-accuracy-soft'] = np.mean(soft_test_accuracy)
    test_results['noncol-precision'] = test_noncol_precision
    test_results['noncol-recall'] = test_noncol_recall

    return test_results
Ejemplo n.º 4
0
        tf_optimize = optimize_model(tf_loss,global_step,increment_global_step=True)
        tf_bump_optimize = optimize_model(tf_bump_loss,global_step,increment_global_step=False)
        tf_train_predictions = predictions_with_inputs(tf_images)

        tf_test_predictions = predictions_with_inputs(tf_test_images)
        tf_bump_test_predictions = predictions_with_inputs(tf_bump_test_images)

        tf.global_variables_initializer().run()

        for epoch in range(50):
            avg_loss = []
            avg_train_accuracy = []
            for step in range(dataset_sizes['train_dataset']//batch_size):
                l1, _, pred,train_labels = sess.run([tf_loss, tf_optimize,tf_train_predictions,tf_labels])
                avg_loss.append(l1)
                avg_train_accuracy.append(models_utils.soft_accuracy(pred,train_labels,use_argmin=False))
            print('\tAverage Loss for Epoch %d: %.5f' %(epoch,np.mean(l1)))
            print('\t\t Training accuracy: %.3f'%np.mean(avg_train_accuracy))
            if (epoch+1)%3==0:
                for step in range(dataset_sizes['train_bump_dataset']//batch_size):
                    sess.run(tf_bump_optimize)
                #print('\t\t Training accuracy: %.3f' % soft_accuracy(pred, train_labels, use_argmin=False))
            if (epoch+1)%5==0:
                test_accuracy = []
                soft_test_accuracy = []
                test_image_index = 0
                for step in range(dataset_sizes['test_dataset']//batch_size):
                    predicted_labels,actual_labels = sess.run([tf_test_predictions,tf_test_labels])
                    test_accuracy.append(models_utils.accuracy(predicted_labels,actual_labels,use_argmin=False))
                    soft_test_accuracy.append(models_utils.soft_accuracy(predicted_labels,actual_labels,use_argmin=False))
def test_the_model(sess, tf_test_img_ids, tf_test_images,tf_test_labels,
                   tf_test_predictions, test_dataset_size,
                   tf_bump_test_img_ids, tf_bump_test_images, tf_bump_test_labels,
                   tf_bump_test_predictions, test_bump_dataset_size, epoch, sub_folder,include_bump_data):
    '''
    Test the Trained CNN by predicting all test non-collision data and collision data
    Things done,
    Calculate accuracies (collision and non-collision) (hard and soft)
    Calculate Precision and Recall (collision and non-collision) (for each direction)
    Save images categorized by the predicted navigationd direction

    :param tf_test_img_ids:
    :param tf_test_images:
    :param tf_test_labels:
    :param tf_test_predictions:
    :param test_dataset_size:
    :param tf_bump_test_img_ids:
    :param tf_bump_test_images:
    :param tf_bump_test_labels:
    :param tf_bump_test_predictions:
    :param test_bump_dataset_size:
    :param epoch:
    :return: Hard_Accuracy(Non-col), Soft Accuracy (Non-col), Hard_Accuracy(col), Soft Accuracy (col),
     Precision (Non-col), Recall (Non-col), Precision (col), Recall (col),


    '''

    test_results = {}
    test_accuracy = []
    soft_test_accuracy = []
    bump_test_accuracy = []
    bump_soft_accuracy = []

    all_predictions, all_labels, all_img_ids, all_images = None, None, None, None
    all_bump_predictions, all_bump_labels, all_bump_img_ids, all_bump_images = None, None, None, None

    test_image_index = 0
    for step in range(test_dataset_size // config.BATCH_SIZE ):
        predicted_labels, actual_labels, test_ids, test_images = sess.run(
            [tf_test_predictions, tf_test_labels, tf_test_img_ids, tf_test_images])

        test_accuracy.append(models_utils.accuracy(predicted_labels, actual_labels, use_argmin=False))
        soft_test_accuracy.append(
            models_utils.soft_accuracy(predicted_labels, actual_labels, use_argmin=False, max_thresh=max_thresh,
                                       min_thresh=min_thresh))

        if all_predictions is None or all_labels is None:
            all_predictions = predicted_labels
            all_labels = actual_labels
            all_img_ids = test_ids
            all_images = test_images
        else:
            all_predictions = np.append(all_predictions, predicted_labels, axis=0)
            all_labels = np.append(all_labels, actual_labels, axis=0)
            all_img_ids = np.append(all_img_ids, test_ids, axis=0)
            all_images = np.append(all_images, test_images, axis=0)

        if step < 5:
            logger.debug('Test Predictions (Non-Collisions)')
        for pred, act in zip(predicted_labels, actual_labels):
            pred_string = ''.join(['%.3f' % p + ',' for p in pred.tolist()])
            act_string = ''.join([str(int(a)) + ',' for a in act.tolist()])
            is_correct = np.argmax(pred)==np.argmax(act)
            TestPredictionLogger.info('%d:%s:%s:%s', test_image_index, act_string, pred_string,is_correct)

            if step < 5:
                logger.debug('%d:%s:%s', test_image_index, act_string, pred_string)
            test_image_index += 1

        TestPredictionLogger.info('\n')

    print('\t\tAverage test accuracy: %.5f ' % np.mean(test_accuracy))
    print('\t\tAverage test accuracy(soft): %.5f' % np.mean(soft_test_accuracy))

    if include_bump_data:
        for step in range(test_bump_dataset_size // config.BATCH_SIZE):
            bump_predicted_labels, bump_actual_labels, bump_test_ids, bump_test_images = sess.run(
                [tf_bump_test_predictions, tf_bump_test_labels, tf_bump_test_img_ids, tf_bump_test_images])
            bump_test_accuracy.append(
                models_utils.accuracy(bump_predicted_labels, bump_actual_labels, use_argmin=True))
            bump_soft_accuracy.append(
                models_utils.soft_accuracy(bump_predicted_labels, bump_actual_labels, use_argmin=True,
                                           max_thresh=max_thresh, min_thresh=min_thresh))

            if all_bump_predictions is None or all_bump_labels is None:
                all_bump_predictions = bump_predicted_labels
                all_bump_labels = bump_actual_labels
                all_bump_img_ids = bump_test_ids
                all_bump_images = bump_test_images
            else:
                all_bump_predictions = np.append(all_bump_predictions, bump_predicted_labels, axis=0)
                all_bump_labels = np.append(all_bump_labels, bump_actual_labels, axis=0)
                all_bump_img_ids = np.append(all_bump_img_ids, bump_test_ids, axis=0)
                all_bump_images = np.append(all_bump_images, bump_test_images, axis=0)

        print('\t\tAverage bump test accuracy: %.5f ' % np.mean(bump_test_accuracy))
        print('\t\tAverage bump test (soft) accuracy: %.5f ' % np.mean(bump_soft_accuracy))


        if step < 5:
            logger.debug('Test Predictions (Collisions)')
        test_image_index = 0
        for pred, act in zip(all_bump_predictions, all_bump_labels):
            bpred_string = ''.join(['%.3f' % p + ',' for p in pred.tolist()])
            bact_string = ''.join([str(int(a)) + ',' for a in act.tolist()])
            is_correct = np.argmin(pred)==np.argmin(act)
            TestBumpPredictionLogger.info('%d:%s:%s:%s', test_image_index, bact_string, bpred_string,is_correct)
            if step < 5:
                logger.debug('%d:%s:%s', test_image_index, bact_string, bpred_string)
            test_image_index += 1

        TestBumpPredictionLogger.info('\n')

        test_col_precision = models_utils.precision_multiclass(all_bump_predictions, all_bump_labels,
                                                               use_argmin=True,
                                                               max_thresh=max_thresh, min_thresh=min_thresh)
        test_col_recall = models_utils.recall_multiclass(all_bump_predictions, all_bump_labels, use_argmin=True,
                                                         max_thresh=max_thresh, min_thresh=min_thresh)

        print('\t\tAverage test bump precision: %s', test_col_precision)
        print('\t\tAverage test bump recall: %s', test_col_recall)


    test_noncol_precision = models_utils.precision_multiclass(all_predictions, all_labels, use_argmin=False,
                                                              max_thresh=max_thresh, min_thresh=min_thresh)
    test_noncol_recall = models_utils.recall_multiclass(all_predictions, all_labels, use_argmin=False,
                                                        max_thresh=max_thresh, min_thresh=min_thresh)

    print('\t\tAverage test precision: %s', test_noncol_precision)
    print('\t\tAverage test recall: %s', test_noncol_recall)

    predicted_hard_ids_sorted, predicted_bump_hard_ids_sorted = {}, {}
    predicted_hard_ids_sorted_best, predicted_bump_hard_ids_sorted_best = {}, {}

    for di, direct in enumerate(['left', 'straight', 'right']):
        predicted_hard_ids_sorted_best[direct] = models_utils.get_id_vector_for_predicted_samples_best(
            all_img_ids, all_predictions, all_labels, di, True, False, max_thresh, min_thresh
        )
        predicted_hard_ids_sorted[direct] = models_utils.get_id_vector_for_predicted_samples(
            all_img_ids, all_predictions, all_labels, di, True, False
        )

        if include_bump_data:
            predicted_bump_hard_ids_sorted_best[direct] = models_utils.get_id_vector_for_predicted_samples_best(
                all_bump_img_ids, all_bump_predictions, all_bump_labels, di, True, True, max_thresh, min_thresh
            )
            predicted_bump_hard_ids_sorted[direct] = models_utils.get_id_vector_for_predicted_samples(
                all_bump_img_ids, all_bump_predictions, all_bump_labels, di, True, True
            )

    image_list = np.split(all_images, all_images.shape[0])
    id_list = all_img_ids.tolist()
    dict_id_image = dict(zip(id_list, image_list))

    if include_bump_data:
        bump_image_list = np.split(all_bump_images, all_bump_images.shape[0])
        bump_id_list = all_bump_img_ids.tolist()
        bump_dict_id_image = dict(zip(bump_id_list, bump_image_list))

    logger.info('correct hard img ids for: %s', predicted_hard_ids_sorted_best)

    #visualizer.save_fig_with_predictions_for_direction(predicted_hard_ids_sorted_best, dict_id_image,
    #                                                   sub_folder + os.sep + 'predicted_best_hard_%d.png' % (
    #                                                       epoch))
    #visualizer.save_fig_with_predictions_for_direction(predicted_hard_ids_sorted, dict_id_image,
    #                                                   sub_folder + os.sep + 'predicted_hard_%d.png' % (
    #                                                       epoch))

    #if include_bump_data:
    #    visualizer.save_fig_with_predictions_for_direction(predicted_bump_hard_ids_sorted_best, bump_dict_id_image,
    #                                                       sub_folder + os.sep + 'predicted_best_bump_%d.png' % (
    #                                                           epoch))
    #    visualizer.save_fig_with_predictions_for_direction(predicted_bump_hard_ids_sorted, bump_dict_id_image,
    #                                                       sub_folder + os.sep + 'predicted_bump_%d.png' % (
    #                                                           epoch))

    test_results['noncol-accuracy-hard'] = np.mean(test_accuracy)
    test_results['noncol-accuracy-soft'] = np.mean(soft_test_accuracy)
    test_results['noncol-precision'] = test_noncol_precision
    test_results['noncol-recall'] = test_noncol_recall

    if include_bump_data:
        test_results['col-accuracy-hard'] = np.mean(bump_test_accuracy)
        test_results['col-accuracy-soft'] = np.mean(bump_soft_accuracy)
        test_results['col-precision'] = test_col_precision
        test_results['col-recall'] = test_col_recall

    return test_results
            tf.global_variables_initializer().run(session=sess)

            dataset_filenames, dataset_sizes = dataset_name_factory.new_get_noncol_train_data_sorted_by_direction_noncol_test_data()

        for loc_ep in range(epochs_per_search):

            for step in range(dataset_sizes['train_dataset']//config.BATCH_SIZE):
                l1, _ = sess.run([cnn_learner_detached.tf_loss, cnn_learner_detached.tf_optimize],
                                 feed_dict=cnn_learner_detached.get_dropout_placeholder_dict())

            avg_train_accuracy = []
            for step in range(dataset_sizes['train_dataset'] // config.BATCH_SIZE):
                train_predictions,train_actuals = sess.run([cnn_learner_detached.tf_train_predictions,tf_train_labels])
                avg_train_accuracy.append(models_utils.soft_accuracy(
                    train_predictions, train_actuals, use_argmin=False,
                    max_thresh=cnn_learner_detached.max_thresh,min_thresh=cnn_learner_detached.min_thresh)
                )

            logger.info('\t\t Training accuracy: %.3f' % np.mean(avg_train_accuracy))

        sess.close()

        all_hyps.append(ep_hyp_dict)
        all_tr_accuracies.append(np.mean(avg_train_accuracy))

    logger.info('='*100)
    logger.info('WINNER')
    logger.info(all_hyps[np.argmax(all_tr_accuracies)])
    logger.info(all_tr_accuracies[np.argmax(all_tr_accuracies)])
    logger.info('='*100)
Ejemplo n.º 7
0
            logger.info('\tAverage Bump Loss (Train) for Epoch %d: %.5f' %
                        (epoch, np.mean(avg_bump_loss)))
            #logger.info('\t\t Learning rate (collision): %.5f', sess.run(tf_col_lr))

            avg_train_accuracy = []
            avg_bump_train_accuracy = []
            # Prediction Phase
            for step in range(dataset_sizes['train_dataset'] //
                              config.BATCH_SIZE):
                train_predictions, train_actuals = sess.run(
                    [tf_train_predictions, tf_train_actuals])
                avg_train_accuracy.append(
                    models_utils.soft_accuracy(train_predictions,
                                               train_actuals,
                                               use_argmin=False,
                                               max_thresh=max_thresh,
                                               min_thresh=min_thresh))

                if step < 2:
                    logger.debug('Predictions for Non-Collided data')
                    for pred, lbl in zip(train_predictions, train_actuals):
                        logger.debug('\t%s;%s', pred, lbl)

            for step in range(dataset_sizes['train_bump_dataset'] //
                              config.BATCH_SIZE):
                train_bump_predictions, train_bump_actuals = sess.run(
                    [tf_train_bump_predictions, tf_train_bump_actuals])
                avg_bump_train_accuracy.append(
                    models_utils.soft_accuracy(train_bump_predictions,
                                               train_bump_actuals,