'mc_min_measure_count': 2,
    # 'mc_surrounding_times_s': [2.0, 5.0],
    'outlier_threshold_distance': 1.0,
    'outlier_threshold_diff': 0.5,
    # 'replacement_values': {0.01: 10.01},
    'min_measurement_value': 0.06,
}

dataset = None
measure_collections_files_dir = MeasureCollection.read_directory(
    base_path, options=options)
measure_collections_dir = {}
for file_name, measure_collections in measure_collections_files_dir.items():
    print(file_name)
    dataset = DataSet.get_dataset(measure_collections,
                                  dataset=dataset,
                                  use_floats=True)
    measure_collections_dir.update(
        MeasureCollection.mc_list_to_dict(measure_collections))

# keras.utils.to_categorical(
x_train = np.array(
    [x_t for i, x_t in enumerate(dataset.x) if i < len(dataset.x) * 0.8])
y_train = np.array(
    [x_t for i, x_t in enumerate(dataset.y_true) if i < len(dataset.x) * 0.8])
#num_classes=len(dataset.class_labels))
x_test = np.array(
    [x_t for i, x_t in enumerate(dataset.x) if i >= len(dataset.x) * 0.8])
y_test = np.array(
    [x_t for i, x_t in enumerate(dataset.y_true) if i >= len(dataset.x) * 0.8])
#num_classes=len(dataset.class_labels))
        'max_measure_value': 10.0,
        # 'replacement_values': {0.01: 10.01},
        'min_measurement_value': 0.06
    }

    camera_folder = scenario_path + '_images_Camera\\'
    ground_truth_file = [
        os.path.join(camera_folder, f) for f in os.listdir(camera_folder) if
        os.path.isfile(os.path.join(camera_folder, f)) and f.startswith('00gt')
    ][0]
    measurements_scenario = Measurement.read(scenario_path,
                                             ground_truth_file,
                                             options=options)
    measure_collections_scenario = MeasureCollection.create_measure_collections(
        measurements_scenario, options=options)
    dataset_scenario = DataSet.get_dataset(measure_collections_scenario)

    options['exclude_mcs'] = measure_collections_scenario

    dataset = None
    measure_collections_files_dir = MeasureCollection.read_directory(
        base_path, options=options)
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        dataset = DataSet.get_dataset(measure_collections, dataset=dataset)

    clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1, random_state=42)
    clf.fit(dataset.x, dataset.y_true)

    predictions = clf.predict(np.array(dataset_scenario.x)).reshape(1, -1)[0]
Пример #3
0
        base_path, options=options)

    parking_space_map_clusters, _ = create_parking_space_map(
        measure_collections_files_dir)
    measure_collections_files_dir = filter_parking_space_map_mcs(
        measure_collections_files_dir, parking_space_map_clusters)

    measure_collections_dir = {}
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        print(file_name)
        dataset_softmax_10cm = DataSet.get_raw_sensor_dataset_per_10cm(
            measure_collections,
            dataset=dataset_softmax_10cm,
            is_softmax_y=True)
        dataset_normal = DataSet.get_dataset(measure_collections,
                                             dataset=dataset_normal)
        measure_collections_dir.update(
            MeasureCollection.mc_list_to_dict(measure_collections))

    start = time.time()
    # confusion_m_simp = evaluate_model(simple_dense_model, dataset)

    evaluator = DriveByEvaluation()
    confusion_m_lstm, predictions = evaluator.evaluate(create_random_forest,
                                                       predict,
                                                       dataset_normal,
                                                       number_of_splits=10,
                                                       shuffle=True)
    # confusion_m_lstm = evaluator.evaluate(create_random_forest, predict, dataset_normal)
    # confusion_m_lstm = evaluator.evaluate(simple_dense_model, predict_softmax, dataset_softmax_10cm)
    # confusion_m_conv = evaluate_model(create_conv_model, dataset)
        'mc_min_measure_count': 2,
        # 'mc_surrounding_times_s': [2.0, 5.0],
        'outlier_threshold_distance': 1.0,
        'outlier_threshold_diff': 0.5,
        # 'replacement_values': {0.01: 10.01},
        'min_measurement_value': 0.06,
    }

    dataset = None
    measure_collections_files_dir = MeasureCollection.read_directory(
        base_path, options=options)
    measure_collections_dir = {}
    for file_name, measure_collections in measure_collections_files_dir.items(
    ):
        print(file_name)
        dataset = DataSet.get_dataset(measure_collections, dataset=dataset)
        measure_collections_dir.update(
            MeasureCollection.mc_list_to_dict(measure_collections))

    classifiers = {
        'DecisionTree_GINI': DecisionTreeClassifier(max_depth=3),
    }

    for name, clf in classifiers.items():
        clf.fit(dataset.x, dataset.y_true)

        import pydot
        from io import StringIO

        #dot_data = StringIO()
        tree.export_graphviz(clf, out_file='tree_pruned_2.dot')