def get_dataset_parking_cars(measure_collections, dataset=None):
    if dataset is None:
        dataset = DataSet(['PARKING_CAR', 'NO_PARKING_CAR'])

    for mc in measure_collections:
        features = [
            mc.avg_distance,
            mc.get_length(),
            mc.get_duration(),
            mc.get_nr_of_measures(),
            mc.get_distance_variance(), mc.avg_speed,
            mc.get_acceleration(),
            mc.first_measure().distance,
            mc.measures[len(mc.measures) / 2].distance,
            mc.last_measure().distance
        ]

        for interval, surrounding_mc in mc.time_surrounding_mcs.iteritems():
            features.append(surrounding_mc.avg_distance)
            features.append(surrounding_mc.avg_speed)
            features.append(surrounding_mc.length)
            features.append(surrounding_mc.get_acceleration())

        ground_truth = 'NO_PARKING_CAR'
        gt = mc.get_probable_ground_truth()
        if GroundTruthClass.is_parking_car(gt):
            ground_truth = 'PARKING_CAR'

        dataset.append_sample(features, ground_truth)

    return dataset
def create_keras_model_dense():
    mc = MeasureCollection()
    m = Measurement(1, 1111111111, 48, 14, 4,
                    GroundTruth(1111, GroundTruthClass.FREE_SPACE))
    mc.add_measure(m)
    dataset = DataSet.get_raw_sensor_dataset([mc])
    model = Sequential()
    model.add(Dense(64, activation='relu', input_dim=len(dataset.x[0])))
    model.add(Dropout(0.1))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(len(dataset.class_labels), activation='softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
Esempio n. 3
0
def enhance_dataset2(dataset, predictions):
    dataset_normal_plus = DataSet(class_labels=dataset.class_labels,
                                  is_softmax_y=dataset.is_softmax_y)
    surrounding_mcs = 30
    for i in range(0, len(predictions)):
        features = [x for x in dataset.x[i]]
        features.append(dataset.class_to_index(predictions[i]))
        avg_distance_parking_before = 0.0
        avg_distance_parking_after = 0.0
        avg_distance_overtaken_before = 0.0
        avg_distance_overtaken_after = 0.0
        j = i - 1
        while j >= i - surrounding_mcs:
            if j >= 0 and dataset.mcs[j].length_to(dataset.mcs[i]) < 20.0:
                if predictions[j] == 'PARKING_CAR':
                    avg_distance_parking_before = dataset.x[j][0] - dataset.x[
                        i][0]
                    break
                elif predictions[j] == 'OVERTAKING_SITUATION':
                    avg_distance_overtaken_before = dataset.x[j][
                        0] - dataset.x[i][0]
                    break
            j -= 1
        j = i + 1
        while j <= i + surrounding_mcs:
            if j < len(predictions) and dataset.mcs[j].length_to(
                    dataset.mcs[i]) < 20.0:
                if predictions[j] == 'PARKING_CAR':
                    avg_distance_parking_after = dataset.x[j][0] - dataset.x[
                        i][0]
                    break
                elif predictions[j] == 'OVERTAKING_SITUATION':
                    avg_distance_overtaken_after = dataset.x[j][0] - dataset.x[
                        i][0]
                    break
            j += 1
        features.append(avg_distance_parking_before)
        features.append(avg_distance_parking_after)
        features.append(avg_distance_overtaken_before)
        features.append(avg_distance_overtaken_after)
        dataset_normal_plus.x.append(features)
        dataset_normal_plus.y_true.append(dataset_normal.y_true[i])

    return dataset_normal_plus
Esempio n. 4
0
def enhance_dataset(dataset, predictions, predictions_are_softmax=False):
    dataset_normal_plus = DataSet(class_labels=dataset.class_labels,
                                  is_softmax_y=dataset.is_softmax_y)
    surrounding_mcs = 20
    for i in range(0, len(predictions)):
        features = [x for x in dataset.x[i]]
        features.append(dataset.class_to_index(predictions[i]))
        j = i - 1
        while j >= i - surrounding_mcs:
            if j >= 0 and predictions[j] == 'PARKING_CAR' and dataset.mcs[
                    j].length_to(dataset.mcs[i]) < 20.0:
                features.append(dataset.class_to_index(predictions[j]))
                #features.extend(dataset.x[j])
                features.append(dataset.x[j][0])
            else:
                features.append(-1.0)
                features.append(0.0)
                #features.extend([0.0 for cnt in range(len(dataset.x[0]))])
            j -= 1
        j = i + 1
        while j <= i + surrounding_mcs:
            if j < len(predictions) and predictions[
                    j] == 'PARKING_CAR' and dataset.mcs[j].length_to(
                        dataset.mcs[i]) < 20.0:
                features.append(dataset.class_to_index(predictions[j]))
                features.append(dataset.x[j][0])
                #features.extend(dataset.x[j])
            else:
                features.append(-1.0)
                features.append(0.0)
                #features.extend([0.0 for cnt in range(len(dataset.x[0]))])
            j += 1
        dataset_normal_plus.x.append(features)
        dataset_normal_plus.mcs.append(dataset.mcs[i])
        dataset_normal_plus.y_true.append(dataset.y_true[i])

    return dataset_normal_plus
def get_overtaking_situation_dataset(measure_collections, dataset=None):
    if dataset is None:
        dataset = DataSet(['NO_OVERTAKING_SITUATION', 'OVERTAKING_SITUATION'])

    for mc in measure_collections:
        if mc.get_length() > 1.0:
            features = [
                mc.avg_distance,
                mc.get_length(),
                mc.get_duration(),
                mc.get_nr_of_measures(),
                mc.get_distance_variance(), mc.avg_speed,
                mc.get_acceleration(),
                mc.first_measure().distance,
                mc.measures[len(mc.measures) / 2].distance,
                mc.last_measure().distance
            ]

            for interval, surrounding_mc in mc.time_surrounding_mcs.iteritems(
            ):
                features.append(surrounding_mc.avg_distance)
                features.append(surrounding_mc.avg_speed)
                features.append(surrounding_mc.length)
                features.append(surrounding_mc.get_acceleration())

            ground_truth = 'NO_OVERTAKING_SITUATION'
            gt = mc.get_probable_ground_truth()
            if GroundTruthClass.is_overtaking_situation(gt):
                ground_truth = 'OVERTAKING_SITUATION'

            # undersampling
            if not GroundTruthClass.is_overtaking_situation(
                    gt) and random.randint(0, 10) < 10:
                dataset.append_sample(features, ground_truth)
            elif GroundTruthClass.is_overtaking_situation(gt):
                i = 0
                while i < 3:
                    dataset.append_sample(features, ground_truth)
                    i += 1

    return dataset
Esempio n. 6
0
    'mc_min_measure_count': 2,
    # 'mc_surrounding_times_s': [2.0, 5.0],
    'outlier_threshold_distance': 1.0,
    'outlier_threshold_diff': 0.5,
    # 'replacement_values': {0.01: 10.01},
    'min_measurement_value': 0.06,
}

dataset = None
measure_collections_files_dir = MeasureCollection.read_directory(
    base_path, options=options)
measure_collections_dir = {}
for file_name, measure_collections in measure_collections_files_dir.items():
    print(file_name)
    dataset = DataSet.get_raw_sensor_dataset_per_10cm(measure_collections,
                                                      dataset=dataset,
                                                      is_softmax_y=True)
    measure_collections_dir.update(
        MeasureCollection.mc_list_to_dict(measure_collections))

# Generate dummy data
x_train = [x_t for i, x_t in enumerate(dataset.x) if i < len(dataset.x) * 0.8]
y_train = [
    x_t for i, x_t in enumerate(dataset.y_true) if i < len(dataset.x) * 0.8
]
x_test = [x_t for i, x_t in enumerate(dataset.x) if i >= len(dataset.x) * 0.8]
y_test = [
    x_t for i, x_t in enumerate(dataset.y_true) if i >= len(dataset.x) * 0.8
]

print('x_train[0]', x_train[0])
def create_staged_forest(model, dataset, x_train, y_train, x_test, y_test,
                         y_pred):
    derived_dataset = DataSet(dataset.class_labels, dataset.is_softmax_y)
    'mc_min_measure_count': 2,
    # 'mc_surrounding_times_s': [2.0, 5.0],
    'outlier_threshold_distance': 1.0,
    'outlier_threshold_diff': 0.5,
    # 'replacement_values': {0.01: 10.01},
    'min_measurement_value': 0.06,
}

dataset = None
measure_collections_files_dir = MeasureCollection.read_directory(
    base_path, options=options)
measure_collections_dir = {}
for file_name, measure_collections in measure_collections_files_dir.items():
    print(file_name)
    dataset = DataSet.get_dataset(measure_collections,
                                  dataset=dataset,
                                  use_floats=True)
    measure_collections_dir.update(
        MeasureCollection.mc_list_to_dict(measure_collections))

# keras.utils.to_categorical(
x_train = np.array(
    [x_t for i, x_t in enumerate(dataset.x) if i < len(dataset.x) * 0.8])
y_train = np.array(
    [x_t for i, x_t in enumerate(dataset.y_true) if i < len(dataset.x) * 0.8])
#num_classes=len(dataset.class_labels))
x_test = np.array(
    [x_t for i, x_t in enumerate(dataset.x) if i >= len(dataset.x) * 0.8])
y_test = np.array(
    [x_t for i, x_t in enumerate(dataset.y_true) if i >= len(dataset.x) * 0.8])
#num_classes=len(dataset.class_labels))
Esempio n. 9
0
    dataset_softmax_10cm = None
    dataset_normal = None
    measure_collections_files_dir = MeasureCollection.read_directory(
        base_path, options=options)

    parking_space_map_clusters, _ = create_parking_space_map(
        measure_collections_files_dir)
    measure_collections_files_dir = filter_parking_space_map_mcs(
        measure_collections_files_dir, parking_space_map_clusters)

    measure_collections_dir = {}
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        print(file_name)
        dataset_softmax_10cm = DataSet.get_raw_sensor_dataset_per_10cm(
            measure_collections,
            dataset=dataset_softmax_10cm,
            is_softmax_y=True)
        dataset_normal = DataSet.get_dataset(measure_collections,
                                             dataset=dataset_normal)
        measure_collections_dir.update(
            MeasureCollection.mc_list_to_dict(measure_collections))

    start = time.time()
    # confusion_m_simp = evaluate_model(simple_dense_model, dataset)

    evaluator = DriveByEvaluation()
    confusion_m_lstm, predictions = evaluator.evaluate(create_random_forest,
                                                       predict,
                                                       dataset_normal,
                                                       number_of_splits=10,
                                                       shuffle=True)
        'max_measure_value': 10.0,
        # 'replacement_values': {0.01: 10.01},
        'min_measurement_value': 0.06
    }

    camera_folder = scenario_path + '_images_Camera\\'
    ground_truth_file = [
        os.path.join(camera_folder, f) for f in os.listdir(camera_folder) if
        os.path.isfile(os.path.join(camera_folder, f)) and f.startswith('00gt')
    ][0]
    measurements_scenario = Measurement.read(scenario_path,
                                             ground_truth_file,
                                             options=options)
    measure_collections_scenario = MeasureCollection.create_measure_collections(
        measurements_scenario, options=options)
    dataset_scenario = DataSet.get_dataset(measure_collections_scenario)

    options['exclude_mcs'] = measure_collections_scenario

    dataset = None
    measure_collections_files_dir = MeasureCollection.read_directory(
        base_path, options=options)
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        dataset = DataSet.get_dataset(measure_collections, dataset=dataset)

    clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1, random_state=42)
    clf.fit(dataset.x, dataset.y_true)

    predictions = clf.predict(np.array(dataset_scenario.x)).reshape(1, -1)[0]
        'mc_min_measure_count': 2,
        # 'mc_surrounding_times_s': [2.0, 5.0],
        'outlier_threshold_distance': 1.0,
        'outlier_threshold_diff': 0.5,
        # 'replacement_values': {0.01: 10.01},
        'min_measurement_value': 0.06,
    }

    dataset = None
    measure_collections_files_dir = MeasureCollection.read_directory(
        base_path, options=options)
    measure_collections_dir = {}
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        print(file_name)
        dataset = DataSet.get_dataset(measure_collections, dataset=dataset)
        measure_collections_dir.update(
            MeasureCollection.mc_list_to_dict(measure_collections))

    classifiers = {
        'DecisionTree_GINI': DecisionTreeClassifier(max_depth=3),
    }

    for name, clf in classifiers.items():
        clf.fit(dataset.x, dataset.y_true)

        import pydot
        from io import StringIO

        #dot_data = StringIO()
        tree.export_graphviz(clf, out_file='tree_pruned_2.dot')
        measure_collections_files_dir)
    measure_collections_files_dir = filter_parking_space_map_mcs(
        measure_collections_files_dir, parking_space_map_clusters)
    print(MeasureCollection.get_size(measure_collections_files_dir))

    measure_collections_dir = {}
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        print(file_name)
        #print(len(measure_collection))
        #measure_collection = filter_acceleration_situations(measure_collection)
        #print('filtered', len(measure_collection))
        #MeasureCollection.write_arff_file(measure_collections1, ml_file_path)
        #measure_collection = [mc for mc in measure_collection if mc.length > 0.5]
        dataset_raw = DataSet.get_raw_sensor_dataset(measure_collections,
                                                     dataset=dataset_raw,
                                                     is_softmax_y=True)
        dataset_10cm = DataSet.get_raw_sensor_dataset_per_10cm(
            measure_collections, dataset=dataset_10cm, is_softmax_y=False)
        dataset_10cm_surrounding = DataSet.get_raw_sensor_dataset_per_10cm_p_surroundings(
            measure_collections,
            dataset=dataset_10cm_surrounding,
            is_softmax_y=False)
        dataset_parking = DataSet.get_raw_sensor_dataset_parking_space_detection(
            measure_collections, dataset=dataset_parking)
        measure_collections_dir.update(
            MeasureCollection.mc_list_to_dict(measure_collections))

    datasets = {
        #'dataset_raw': dataset_raw,
        'dataset_raw_10cm': dataset_10cm,