Пример #1
0
    def __setSavePathFile(self, save=False, path_result=None):
        # Fields: Origin, Node, Predecessor
        # Number of records: Origins * Nodes
        a = AequilibraeData()
        d1 = max(1, self.zones)
        d2 = 1
        memory_mode = True

        if save:
            if path_result is None:
                warnings.warn(
                    "Path file not set properly. Need to specify output file too"
                )
            else:
                # This is the only place where we keep 32bits, as going 64 bits would explode the file size
                if self.nodes > 0 and self.zones > 0:
                    d1 = self.zones
                    d2 = self.nodes
                    memory_mode = False

        a.create_empty(
            file_path=path_result,
            entries=d1 * d2,
            field_names=["origin", "node", "predecessor", "connector"],
            data_types=[np.uint32, np.uint32, np.uint32, np.uint32],
            memory_mode=memory_mode,
        )

        self.path_file = {"save": save, "results": a}
 def __init__(self, parent_thread, layer, index_field, fields, file_name):
     WorkerThread.__init__(self, parent_thread)
     self.layer = layer
     self.index_field = index_field
     self.fields = fields
     self.error = None
     self.python_version = 8 * struct.calcsize("P")
     self.output = AequilibraeData()
     self.output_name = file_name
Пример #3
0
 def load_from_aequilibrae_format(self):
     out_name, _ = GetOutputFileName(self, "AequilibraE dataset",
                                     ["Aequilibrae dataset(*.aed)"], ".aed",
                                     self.path)
     try:
         self.dataset = AequilibraeData()
         self.dataset.load(out_name)
     except:
         self.error = "Could not load file. It might be corrupted or might not be a valid AequilibraE file"
     self.exit_procedure()
Пример #4
0
    def test_load(self):
        # re-imports the dataset
        self.ad = AequilibraeData()
        self.ad.load(file_path)

        # checks if the values were properly saved
        if self.ad.index[70] != 170:
            self.fail("Value for data index test was not as expected")

        if int(self.ad.d[70]) != 28900:
            self.fail("Value for data field test was not as expected")

        for f in self.ad.fields:
            if f not in args["field_names"]:
                self.fail("Could not retrieve all fields")
Пример #5
0
    def get_load_results(self) -> AequilibraeData:
        """
        Translates the assignment results from the graph format into the network format

        Returns:
            dataset (:obj:`AequilibraeData`): AequilibraE data with the traffic class assignment results
        """
        fields = ['link_id']
        for n in self.classes['names']:
            fields.extend([f'{n}_ab', f'{n}_ba', f'{n}_tot'])
        types = [np.float64] * len(fields)

        entries = int(np.unique(self.lids).shape[0])
        res = AequilibraeData()
        res.create_empty(memory_mode=True,
                         entries=entries,
                         field_names=fields,
                         data_types=types)
        res.data.fill(np.nan)
        res.index[:] = np.unique(self.lids)[:]
        res.link_id[:] = res.index[:]

        indexing = np.zeros(int(self.lids.max()) + 1, np.uint64)
        indexing[res.index[:]] = np.arange(entries)

        # Indices of links BA and AB
        ABs = self.direcs > 0
        BAs = self.direcs < 0
        ab_ids = indexing[self.lids[ABs]]
        ba_ids = indexing[self.lids[BAs]]

        # Link flows
        link_flows = self.link_loads[:, :]
        for i, n in enumerate(self.classes["names"]):
            # AB Flows
            res.data[n + "_ab"][ab_ids] = np.nan_to_num(link_flows[ABs, i])
            # BA Flows
            res.data[n + "_ba"][ba_ids] = np.nan_to_num(link_flows[BAs, i])

            # Tot Flow
            res.data[n + "_tot"] = np.nan_to_num(
                res.data[n + "_ab"]) + np.nan_to_num(res.data[n + "_ba"])
        return res
Пример #6
0
class TestAequilibraEData(TestCase):
    def test___init__(self):
        # Generates the dataset
        dataset = AequilibraeData()
        dataset.create_empty(**args)

        dataset.index[:] = np.arange(dataset.entries) + 100
        dataset.d[:] = dataset.index[:]**2
        if dataset.index[70] != 170:
            self.fail()

        if int(dataset.d[70]) != 28900:
            self.fail()

        # removes the dataset
        del dataset

    def test_load(self):
        # re-imports the dataset
        self.ad = AequilibraeData()
        self.ad.load(file_path)

        # checks if the values were properly saved
        if self.ad.index[70] != 170:
            self.fail("Value for data index test was not as expected")

        if int(self.ad.d[70]) != 28900:
            self.fail("Value for data field test was not as expected")

        for f in self.ad.fields:
            if f not in args["field_names"]:
                self.fail("Could not retrieve all fields")

    def test_export(self):
        self.test_load()
        temp_name = os.path.join(tempfile.gettempdir(),
                                 "aequilibrae_data_example.csv")
        self.ad.export(temp_name)
Пример #7
0
    def test___init__(self):
        # Generates the dataset
        dataset = AequilibraeData()
        dataset.create_empty(**args)

        dataset.index[:] = np.arange(dataset.entries) + 100
        dataset.d[:] = dataset.index[:]**2
        if dataset.index[70] != 170:
            self.fail()

        if int(dataset.d[70]) != 28900:
            self.fail()

        # removes the dataset
        del dataset
Пример #8
0
class LoadDatasetDialog(QtWidgets.QDialog, FORM_CLASS):
    def __init__(self, iface, single_use=True):
        QtWidgets.QDialog.__init__(self)
        self.iface = iface
        self.setupUi(self)
        self.path = standard_path()

        self.output_name = None
        self.layer = None
        self.zones = None
        self.cells = None
        self.error = None
        self.selected_fields = None
        self.worker_thread = None
        self.dataset = None
        self.ignore_fields = []
        self.single_use = single_use

        self.radio_layer_matrix.clicked.connect(
            partial(self.size_it_accordingly, False))
        self.radio_aequilibrae.clicked.connect(
            partial(self.size_it_accordingly, False))
        self.chb_all_fields.clicked.connect(self.set_tables_with_fields)
        self.but_adds_to_links.clicked.connect(self.append_to_list)

        # For changing the network layer
        self.cob_data_layer.currentIndexChanged.connect(
            self.load_fields_to_combo_boxes)
        self.but_removes_from_links.clicked.connect(self.removes_fields)
        # For adding skims
        self.but_load.clicked.connect(self.load_from_aequilibrae_format)
        self.but_save_and_use.clicked.connect(self.load_the_vector)
        self.but_import_and_use.clicked.connect(self.load_just_to_use)

        # THIRD, we load layers in the canvas to the combo-boxes
        for layer in all_layers_from_toc():  # We iterate through all layers
            if "wkbType" in dir(layer):
                if layer.wkbType() in [100] + point_types + poly_types:
                    self.cob_data_layer.addItem(layer.name())

        if not self.single_use:
            self.radio_layer_matrix.setChecked(True)
            self.radio_aequilibrae.setEnabled(False)
            self.but_import_and_use.setEnabled(False)
            self.but_load.setEnabled(False)
            self.but_save_and_use.setText("Import")

        self.size_it_accordingly(partial(self.size_it_accordingly, False))

    def set_tables_with_fields(self):
        self.size_it_accordingly(False)

        if self.chb_all_fields.isChecked() and self.layer is not None:
            self.ignore_fields = []
            self.selected_fields = [
                x.name() for x in self.layer.dataProvider().fields().toList()
            ]

        for table in [self.table_all_fields, self.table_fields_to_import]:
            table.setRowCount(0)
            table.clearContents()
        if self.layer is not None:
            comb = [(self.table_fields_to_import, self.selected_fields),
                    (self.table_all_fields, self.ignore_fields)]
            for table, fields in comb:
                for field in fields:
                    table.setRowCount(table.rowCount() + 1)
                    item1 = QTableWidgetItem(field)
                    item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
                    table.setItem(table.rowCount() - 1, 0, item1)

    def size_it_accordingly(self, final=False):
        def set_size(w, h):
            self.setMaximumSize(QtCore.QSize(w, h))
            self.resize(w, h)

        if self.radio_aequilibrae.isChecked():
            set_size(154, 100)
        else:
            if final:
                if self.radio_layer_matrix.isChecked():
                    if self.chb_all_fields.isChecked():
                        set_size(498, 120)
                    self.progressbar.setMinimumHeight(100)
                else:
                    set_size(498, 410)
                    self.progressbar.setMinimumHeight(390)
            else:
                if self.chb_all_fields.isChecked():
                    set_size(449, 120)
                else:
                    set_size(449, 410)

    def removes_fields(self):
        for i in self.table_fields_to_import.selectedRanges():
            old_fields = [
                self.table_fields_to_import.item(row, 0).text()
                for row in xrange(i.topRow(),
                                  i.bottomRow() + 1)
            ]

            self.ignore_fields.extend(old_fields)
            self.selected_fields = [
                x for x in self.selected_fields if x not in old_fields
            ]

        self.set_tables_with_fields()

    def append_to_list(self):
        for i in self.table_all_fields.selectedRanges():
            new_fields = [
                self.table_all_fields.item(row, 0).text()
                for row in xrange(i.topRow(),
                                  i.bottomRow() + 1)
            ]

            self.selected_fields.extend(new_fields)
            self.ignore_fields = [
                x for x in self.ignore_fields if x not in new_fields
            ]

        self.set_tables_with_fields()

    def load_fields_to_combo_boxes(self):
        self.cob_index_field.clear()

        all_fields = []
        if self.cob_data_layer.currentIndex() >= 0:
            self.ignore_fields = []
            self.layer = get_vector_layer_by_name(
                self.cob_data_layer.currentText())
            self.selected_fields = [
                x.name() for x in self.layer.dataProvider().fields().toList()
            ]
            for field in self.layer.dataProvider().fields().toList():
                if field.type() in integer_types:
                    self.cob_index_field.addItem(field.name())
                    all_fields.append(field.name())
                if field.type() in float_types:
                    all_fields.append(field.name())
        self.set_tables_with_fields()

    def run_thread(self):
        self.worker_thread.ProgressValue.connect(
            self.progress_value_from_thread)
        self.worker_thread.ProgressMaxValue.connect(
            self.progress_range_from_thread)
        self.worker_thread.finished_threaded_procedure.connect(
            self.finished_threaded_procedure)

        self.chb_all_fields.setEnabled(False)
        self.but_load.setEnabled(False)
        self.but_save_and_use.setEnabled(False)
        self.worker_thread.start()
        self.exec_()

    # VAL and VALUE have the following structure: (bar/text ID, value)
    def progress_range_from_thread(self, val):
        self.progressbar.setRange(0, val)

    def progress_value_from_thread(self, val):
        self.progressbar.setValue(val)

    def finished_threaded_procedure(self, param):
        self.but_load.setEnabled(True)
        self.but_save_and_use.setEnabled(True)
        self.chb_all_fields.setEnabled(True)
        if self.worker_thread.error is not None:
            qgis.utils.iface.messageBar().pushMessage(
                "Error while loading vector:",
                self.worker_thread.error,
                level=1)
        else:
            self.dataset = self.worker_thread.output
        self.exit_procedure()

    def load_from_aequilibrae_format(self):
        out_name, _ = GetOutputFileName(self, "AequilibraE dataset",
                                        ["Aequilibrae dataset(*.aed)"], ".aed",
                                        self.path)
        try:
            self.dataset = AequilibraeData()
            self.dataset.load(out_name)
        except:
            self.error = "Could not load file. It might be corrupted or might not be a valid AequilibraE file"
        self.exit_procedure()

    def load_the_vector(self):
        if self.single_use:
            self.output_name = None
        else:
            self.error = None
            self.output_name, _ = GetOutputFileName(
                self, "AequilibraE dataset", ["Aequilibrae dataset(*.aed)"],
                ".aed", self.path)
            if self.output_name is None:
                self.error = "No name provided for the output file"

        if self.radio_layer_matrix.isChecked() and self.error is None:
            if self.cob_data_layer.currentIndex(
            ) < 0 or self.cob_index_field.currentIndex() < 0:
                self.error = "Invalid field chosen"

            index_field = self.cob_index_field.currentText()
            if index_field in self.selected_fields:
                self.selected_fields.remove(index_field)

            if len(self.selected_fields) > 0:
                self.worker_thread = LoadDataset(
                    qgis.utils.iface.mainWindow(),
                    layer=self.layer,
                    index_field=index_field,
                    fields=self.selected_fields,
                    file_name=self.output_name,
                )
                self.size_it_accordingly(True)
                self.run_thread()
            else:
                qgis.utils.iface.messageBar().pushMessage(
                    "Error:",
                    "One cannot load a dataset with indices only",
                    level=1)
        if self.error is not None:
            qgis.utils.iface.messageBar().pushMessage("Error:",
                                                      self.error,
                                                      level=1)

    def load_just_to_use(self):
        self.single_use = True
        self.load_the_vector()

    def exit_procedure(self):
        self.close()
Пример #9
0
    def results(self) -> pd.DataFrame:
        """Prepares the assignment results as a Pandas DataFrame

        Returns:
            *DataFrame* (:obj:`pd.DataFrame`): Pandas dataframe with all the assignment results indexed on link_id
        """

        idx = self.classes[0].graph.graph.__supernet_id__
        assig_results = [cls.results.get_load_results() for cls in self.classes]

        class1 = self.classes[0]
        res1 = assig_results[0]

        tot_flow = self.assignment.fw_total_flow[idx]
        voc = tot_flow / self.capacity[idx]
        congested_time = self.congested_time[idx]
        free_flow_tt = self.free_flow_tt[idx]

        entries = res1.data.shape[0]
        fields = [
            "Congested_Time_AB",
            "Congested_Time_BA",
            "Congested_Time_Max",
            "Delay_factor_AB",
            "Delay_factor_BA",
            "Delay_factor_Max",
            "VOC_AB",
            "VOC_BA",
            "VOC_max",
            "PCE_AB",
            "PCE_BA",
            "PCE_tot",
        ]

        types = [np.float64] * len(fields)
        agg = AequilibraeData()
        agg.create_empty(memory_mode=True, entries=entries, field_names=fields, data_types=types)
        agg.data.fill(np.nan)
        agg.index[:] = res1.data.index[:]

        link_ids = class1.results.lids
        ABs = class1.results.direcs > 0
        BAs = class1.results.direcs < 0

        indexing = np.zeros(int(link_ids.max()) + 1, np.uint64)
        indexing[agg.index[:]] = np.arange(entries)

        # Indices of links BA and AB
        ab_ids = indexing[link_ids[ABs]]
        ba_ids = indexing[link_ids[BAs]]

        agg.data["Congested_Time_AB"][ab_ids] = np.nan_to_num(congested_time[ABs])
        agg.data["Congested_Time_BA"][ba_ids] = np.nan_to_num(congested_time[BAs])
        agg.data["Congested_Time_Max"][:] = np.nanmax([agg.data.Congested_Time_AB, agg.data.Congested_Time_BA], axis=0)

        agg.data["Delay_factor_AB"][ab_ids] = np.nan_to_num(congested_time[ABs] / free_flow_tt[ABs])
        agg.data["Delay_factor_BA"][ba_ids] = np.nan_to_num(congested_time[BAs] / free_flow_tt[BAs])
        agg.data["Delay_factor_Max"][:] = np.nanmax([agg.data.Delay_factor_AB, agg.data.Delay_factor_BA], axis=0)

        agg.data["VOC_AB"][ab_ids] = np.nan_to_num(voc[ABs])
        agg.data["VOC_BA"][ba_ids] = np.nan_to_num(voc[BAs])
        agg.data["VOC_max"][:] = np.nanmax([agg.data.VOC_AB, agg.data.VOC_BA], axis=0)

        agg.data["PCE_AB"][ab_ids] = np.nan_to_num(tot_flow[ABs])
        agg.data["PCE_BA"][ba_ids] = np.nan_to_num(tot_flow[BAs])
        agg.data["PCE_tot"][:] = np.nansum([agg.data.PCE_AB, agg.data.PCE_BA], axis=0)

        assig_results.append(agg)

        dfs = [pd.DataFrame(aed.data) for aed in assig_results]
        dfs = [df.rename(columns={"index": "link_id"}).set_index("link_id") for df in dfs]
        df = pd.concat(dfs, axis=1)

        return df
Пример #10
0
from unittest import TestCase
from aequilibrae.matrix import AequilibraeData, AequilibraeMatrix
from aequilibrae.distribution import SyntheticGravityModel, GravityApplication
import numpy as np
import tempfile
import os

zones = 10

# row vector
args = {"entries": zones, "field_names": [u"rows"], "data_types": [np.float64], "memory_mode": True}

row_vector = AequilibraeData()
row_vector.create_empty(**args)
row_vector.index[:] = np.arange(row_vector.entries) + 100
row_vector.rows[:] = row_vector.index[:] + np.random.rand(zones)[:]

# column vector
args["field_names"] = ["columns"]
column_vector = AequilibraeData()
column_vector.create_empty(**args)
column_vector.index[:] = np.arange(column_vector.entries) + 100
column_vector.columns[:] = column_vector.index[:] + np.random.rand(zones)[:]

# balance vectors
column_vector.columns[:] = column_vector.columns[:] * (row_vector.rows.sum() / column_vector.columns.sum())

# Impedance matrix_procedures
name_test = os.path.join(tempfile.gettempdir(), "aequilibrae_matrix_test.aem")

args = {"file_name": name_test, "zones": zones, "matrix_names": ["impedance"]}
Пример #11
0
# %%

# We compute the vectors from our matrix
origins = np.sum(demand.matrix_view, axis=1)
destinations = np.sum(demand.matrix_view, axis=0)

args = {
    "file_path": join(fldr, "synthetic_future_vector.aed"),
    "entries": demand.zones,
    "field_names": ["origins", "destinations"],
    "data_types": [np.float64, np.float64],
    "memory_mode": False,
}

vectors = AequilibraeData()
vectors.create_empty(**args)

vectors.index[:] = demand.index[:]

# Then grow them with some random growth between 0 and 10% - Plus balance them
vectors.origins[:] = origins * (1 + np.random.rand(vectors.entries) / 10)
vectors.destinations[:] = destinations * (1 +
                                          np.random.rand(vectors.entries) / 10)
vectors.destinations *= vectors.origins.sum() / vectors.destinations.sum()

# %%

# Impedance
imped = proj_matrices.get_matrix("base_year_assignment_skims")
imped.computational_view(["final_time_with_intrazonals"])
Пример #12
0
import os
import tempfile
from unittest import TestCase

import numpy as np

from aequilibrae.matrix import AequilibraeData

file_path = AequilibraeData().random_name()
args = {
    "file_path": file_path,
    "entries": 100,
    "field_names": ["d", "data2", "data3"],
    "data_types": [np.float64, np.float32, np.int8],
}


class TestAequilibraEData(TestCase):
    def test___init__(self):
        # Generates the dataset
        dataset = AequilibraeData()
        dataset.create_empty(**args)

        dataset.index[:] = np.arange(dataset.entries) + 100
        dataset.d[:] = dataset.index[:]**2
        if dataset.index[70] != 170:
            self.fail()

        if int(dataset.d[70]) != 28900:
            self.fail()
class LoadDataset(WorkerThread):
    def __init__(self, parent_thread, layer, index_field, fields, file_name):
        WorkerThread.__init__(self, parent_thread)
        self.layer = layer
        self.index_field = index_field
        self.fields = fields
        self.error = None
        self.python_version = 8 * struct.calcsize("P")
        self.output = AequilibraeData()
        self.output_name = file_name

    def doWork(self):
        feat_count = self.layer.featureCount()
        self.ProgressMaxValue.emit(feat_count)

        # Create specification for the output file
        datafile_spec = {"entries": feat_count}
        if self.output_name is None:
            datafile_spec["memory_mode"] = True
        else:
            datafile_spec["memory_mode"] = False
        fields = []
        types = []
        idxs = []
        empties = []
        for field in self.layer.dataProvider().fields().toList():
            if field.name() in self.fields:
                if field.type() in integer_types:
                    types.append("<i8")
                    empties.append(np.iinfo(np.int64).min)
                elif field.type() in float_types:
                    types.append("<f8")
                    empties.append(np.nan)
                elif field.type() in string_types:
                    types.append("S" + str(field.length()))
                    empties.append("")
                else:
                    self.error = "Field {} does has a type not supported.".format(
                        str(field.name()))
                    break
                fields.append(str(field.name()))
                idxs.append(self.layer.dataProvider().fieldNameIndex(
                    field.name()))

        index_idx = self.layer.dataProvider().fieldNameIndex(self.index_field)
        datafile_spec["field_names"] = fields
        datafile_spec["data_types"] = types
        datafile_spec["file_path"] = self.output_name

        if self.error is None:
            self.output.create_empty(**datafile_spec)

            # Get all the data
            for p, feat in enumerate(self.layer.getFeatures()):
                for idx, field, empty in zip(idxs, fields, empties):
                    if feat.attributes()[idx] == QVariant():
                        self.output.data[field][p] = empty
                    else:
                        self.output.data[field][p] = feat.attributes()[idx]
                self.output.index[p] = feat.attributes()[index_idx]
                self.ProgressValue.emit(p)

            self.ProgressValue.emit(feat_count)
        self.finished_threaded_procedure.emit("Done")
Пример #14
0
    def test_fit(self):
        proj = Project()
        proj.open(self.proj_dir)
        mats = proj.matrices
        mats.update_database()
        seed = mats.get_matrix('SiouxFalls_omx')
        seed.computational_view('matrix')
        # row vector
        args = {
            "entries": seed.zones,
            "field_names": ["rows"],
            "data_types": [np.float64],
            "memory_mode": True
        }
        row_vector = AequilibraeData()
        row_vector.create_empty(**args)
        row_vector.rows[:] = np.random.rand(seed.zones)[:] * 1000
        row_vector.index[:] = seed.index[:]
        # column vector
        args["field_names"] = ["columns"]
        column_vector = AequilibraeData()
        column_vector.create_empty(**args)
        column_vector.columns[:] = np.random.rand(seed.zones)[:] * 1000
        column_vector.index[:] = seed.index[:]
        # balance vectors
        column_vector.columns[:] = column_vector.columns[:] * (
            row_vector.rows.sum() / column_vector.columns.sum())

        # The IPF per se
        args = {
            "matrix": seed,
            "rows": row_vector,
            "row_field": "rows",
            "columns": column_vector,
            "column_field": "columns",
            "nan_as_zero": False,
        }

        with self.assertRaises(TypeError):
            fratar = Ipf(data='test', test='data')
            fratar.fit()

        with self.assertRaises(ValueError):
            fratar = Ipf(**args)
            fratar.parameters = ['test']
            fratar.fit()

        fratar = Ipf(**args)
        fratar.fit()

        result = fratar.output

        self.assertAlmostEqual(np.nansum(result.matrix_view),
                               np.nansum(row_vector.data["rows"]), 4,
                               "Ipf did not converge")
        self.assertGreater(fratar.parameters["convergence level"], fratar.gap,
                           "Ipf did not converge")

        mr = fratar.save_to_project('my_matrix_ipf', 'my_matrix_ipf.aem')

        self.assertTrue(
            os.path.isfile(os.path.join(mats.fldr, 'my_matrix_ipf.aem')),
            'Did not save file to the appropriate place')

        self.assertEqual(mr.procedure_id, fratar.procedure_id,
                         'procedure ID saved wrong')
        proj.close()
Пример #15
0
# %%

zonal_data = pd.read_sql(
    "Select zone_id, population, employment from zones order by zone_id",
    project.conn)
# We compute the vectors from our matrix

args = {
    "file_path": join(fldr, "synthetic_future_vector.aed"),
    "entries": demand.zones,
    "field_names": ["origins", "destinations"],
    "data_types": [np.float64, np.float64],
    "memory_mode": True,
}

vectors = AequilibraeData()
vectors.create_empty(**args)

vectors.index[:] = zonal_data.zone_id[:]

# We apply a trivial regression-based model and balance the vectors
vectors.origins[:] = zonal_data.population[:] * 2.32
vectors.destinations[:] = zonal_data.employment[:] * 1.87
vectors.destinations *= vectors.origins.sum() / vectors.destinations.sum()

# %%

# We simply apply the models to the same impedance matrix now
for function in ["power", "expo"]:
    model = SyntheticGravityModel()
    model.load(join(fldr, f"{function}_model.mod"))