def evaluate_model(model, batches, steps_per_epoch):
    probs = []
    labels = []
    eval_start_time = time.time()
    for step, batch in enumerate(batches):
        if step >= steps_per_epoch:
            break
        labels.extend(batch['label'])
        probs.extend(model.forward(batch['X']))
        util.over_print("Step: %d/%d \tTime: %f\r" % (step+1, steps_per_epoch, time.time()-eval_start_time))
    loss, roc_auc, accuracy = util.measure_performance(labels, probs)
    return loss, roc_auc, accuracy
                        if best_test_roc_auc[i] < test_roc_auc and current_epoch > 0:
                            best_test_roc_auc[i] = test_roc_auc
                            model.save_model_with_name("best_test_{}".format(i), current_epoch, valid_roc_auc, test_roc_auc)

                    # Log training results
                    logger.log({
                        "epoch":current_epoch,
                        "train_loss":train_loss,  "train_roc-auc":train_roc_auc,
                        "valid_loss":valid_loss, "valid_roc-auc":valid_roc_auc,
                        "test_loss":test_losses, "test_roc-auc":test_roc_aucs,
                        "best_test_roc-auc":list(best_test_roc_auc), "mean_best_test_roc-auc":np.mean(best_test_roc_auc),
                    })
                    print("\nBest Valid ROC-AUC: {}\t Best Test ROC-AUC: {} Mean of Best Test ROC-AUC: {}\n".format(
                                                        best_valid_roc_auc, best_test_roc_auc, np.mean(best_test_roc_auc)))
                
                # Take Gradient Descent
                loss = model.optimize_parameters(batch, lr)
                util.over_print("Train Step: %d/%d \t Loss: %f \t LR: %f \t Time: %f \r"
                    %(step, dataloader.train_steps_per_epoch, loss, lr, time.time() - step_start_time))
                step_start_time = time.time()


            print('\nEnd of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.nepoch, time.time()-epoch_start_time))
            # print('saving the model at the end of epoch %d'%(epoch))
            # model.save(epoch)

    dataloader.close_threads(coord)

    print("Done Training!")
    sess.close()
예제 #3
0
                print("Accident Probability: %f" % probs[-1][1])
                print("Label: %f" % labels[-1])
                print("Frame Label: %f" % batch['frame_label'][-1])
                frame = batch['X'][-1]
                image = frame[:, :, 0:3].astype(np.uint8)
                bbox2 = frame[:, :, 6].astype(np.uint8)
                masked2 = np.ma.masked_where(bbox2 == 0, bbox2)
                bbox1 = frame[:, :, 7].astype(np.uint8)
                masked1 = np.ma.masked_where(bbox1 == 0, bbox1)
                plt.imshow(image)
                plt.imshow(masked2, 'Reds_r', alpha=0.4)
                plt.imshow(masked1, 'jet', alpha=0.4)
                plt.show()

            util.over_print("Step: %d/%d \tTime: %f\r" %
                            (step + 1, dataset['test_steps_per_epoch'],
                             time.time() - eval_start_time))

        samples_grouped_by_frame = dataset['test_samples_grouped_by_frame']
        samples = list(itertools.chain.from_iterable(samples_grouped_by_frame))
        result_dict['split_{}_result'.format(split)] = {
            'labels': labels,
            'probs': probs,
            'samples': samples
        }

        test_loss, test_roc_auc, test_accuracy = util.measure_performance(
            labels, probs)
        test_roc_aucs.append(test_roc_auc)
        print("\nTest - Loss: %f\t ROC-AUC: %f\t Accuracy: %f\n" %
              (test_loss, test_roc_auc, test_accuracy))
예제 #4
0
def build_sample_list(scenes,
                      frames_per_sample,
                      label_method,
                      motion_model,
                      ttc_threshold,
                      shuffle_by_which=None):
    n_samples = 0
    n_acc_samples = 0
    n_nonacc_samples = 0
    samples_grouped_by_frame = []

    for s, scene in enumerate(scenes):
        for f, frame in enumerate(scene):

            samples_per_frame = []
            if f < frames_per_sample - 1:
                continue

            frame_t_minus_0_info = json.load(open(scene[f]['json_path']))
            frame_t_minus_1_info = json.load(open(scene[f - 1]['json_path']))
            frame_t_minus_2_info = json.load(open(scene[f - 2]['json_path']))
            frame_t_minus_3_info = json.load(open(scene[f - 3]['json_path']))
            frame_t_minus_4_info = json.load(open(scene[f - 4]['json_path']))
            frame_t_minus_5_info = json.load(open(scene[f - 5]['json_path']))
            frame_label = scene[f]['frame_label']

            for bbox in frame_t_minus_0_info['vehicleInfo']:
                data_driven_label, rule_based_label, rule_based_prob = get_vehicle_label(
                    bbox, motion_model, ttc_threshold)
                if data_driven_label != None and rule_based_label != None:
                    sample = {
                        'hashcode':
                        bbox['hashcode'],
                        'frame_label':
                        frame_label,
                        'frame_t-0.0s':
                        frame_per_vehicle(scene[f], bbox),
                        'frame_t-0.1s':
                        frame_per_vehicle(
                            scene[f - 1],
                            get_bbox(frame_t_minus_1_info, bbox['hashcode']))
                        if frames_per_sample > 1 else None,
                        'frame_t-0.2s':
                        frame_per_vehicle(
                            scene[f - 2],
                            get_bbox(frame_t_minus_2_info, bbox['hashcode']))
                        if frames_per_sample > 2 else None,
                        'frame_t-0.3s':
                        frame_per_vehicle(
                            scene[f - 3],
                            get_bbox(frame_t_minus_3_info, bbox['hashcode']))
                        if frames_per_sample > 3 else None,
                        'frame_t-0.4s':
                        frame_per_vehicle(
                            scene[f - 4],
                            get_bbox(frame_t_minus_4_info, bbox['hashcode']))
                        if frames_per_sample > 4 else None,
                        'frame_t-0.5s':
                        frame_per_vehicle(
                            scene[f - 5],
                            get_bbox(frame_t_minus_5_info, bbox['hashcode']))
                        if frames_per_sample > 5 else None,
                    }
                    n_samples += 1
                    if label_method == "data_driven":
                        sample['label'] = data_driven_label
                        sample['label_prob'] = data_driven_label
                        if data_driven_label:
                            n_acc_samples += 1
                        else:
                            n_nonacc_samples += 1
                    elif label_method == "rule_based":
                        sample['label'] = rule_based_label
                        sample['label_prob'] = rule_based_label
                        if rule_based_label:
                            n_acc_samples += 1
                        else:
                            n_nonacc_samples += 1
                    elif label_method == "rule_based_prob":
                        sample['label'] = rule_based_label
                        sample['label_prob'] = rule_based_prob
                        if rule_based_label:
                            n_acc_samples += 1
                        else:
                            n_nonacc_samples += 1
                    samples_per_frame.append(sample)

            samples_grouped_by_frame.append(samples_per_frame)

        util.over_print("Building sample list: %d/%d \r" % (s, len(scenes)))
    print()

    return samples_grouped_by_frame, n_acc_samples, n_nonacc_samples, n_samples
예제 #5
0
    model = CreateModel(opt, sess)
    data_loader = CreateDataLoader(opt, sess)
    coord = tf.train.Coordinator()
    data_loader.start_enqueue_threads_for_testing(coord)
    data_loader.init_test_generator()
    y_true = []
    y_pred = []
    step_count = 0.0

    for step, batch in enumerate(data_loader.test_datasets[0]['test_batch']):
        probs = model.forward(batch['X'])
        y_true.extend(batch['label'])
        y_pred.extend(probs)
        step_count += 1
        util.over_print("Step count: {} \r".format(str(step_count)))

    temp_y_pred = [p[1] for p in y_pred]
    data_loader.close_threads(coord)
    sess.close()

####################################################################################
#   Filter only top n vehicles within a frame with highest collision probability   #
####################################################################################
if opt.filter_top_n > 0:
    samples_grouped_by_frame = data_loader.test_datasets[0][
        'test_samples_grouped_by_frame']
    samples = list(itertools.chain.from_iterable(samples_grouped_by_frame))
    json_path = samples[0]['frame_t-0.0s']['json_path']
    new_y_pred = []
    preds_per_frame = []