Ejemplo n.º 1
0
    def test_list_reduce(self):
        # Create data to test
        testListOfLists = [[0, 0.0, "test1"], [-1, -1.0, "test2"],
                           [1, 1.0, "test3"], [-10, -10.0, "test4"],
                           [10, 10.0, "test5"]]
        testTable = DataTable(testListOfLists,
                              columns=['ints', 'floats', 'strings'])

        # List tests
        self.assertEquals(list_reduce(4), 4)
        self.assertEquals(list_reduce([4]), 4)
        self.assertEquals(list_reduce([[4]]), 4)
        self.assertEquals(list_reduce([[4, 5]]), [4, 5])
        self.assertEquals(list_reduce([[4], [5]]), [4, 5])

        # Test DataTables
        self.assertEquals(list_reduce(testListOfLists), list_reduce(testTable))
        self.assertEquals(list_reduce(testTable['ints']),
                          list_reduce([value[0] for value in testListOfLists]))
        self.assertEquals(
            list_reduce(testTable['ints']),
            list_reduce([[value[0] for value in testListOfLists]]))
        self.assertEquals(list_reduce(testTable[1]),
                          list_reduce(testListOfLists[0]))
        self.assertEquals(list_reduce(testTable[1]),
                          list_reduce([testListOfLists[0]]))
Ejemplo n.º 2
0
 def writePnrDrivers(self, pnrFileName):
     """
     Writes PNR Auto Trips to DBF with fields:
         ZONE 
         PNR
         TO-DEMAND
         FR-DEMAND
     """
     self.pnrFields = {"ZONE": 'u4', "PNR": 'u4', "TO": 'f4', "FROM": 'f4'}
     self.pnrTable = DataTable(fieldNames=self.pnrFields.keys(),
                               numpyFieldTypes=self.trnAsgnFields.values())
Ejemplo n.º 3
0
    def test_getitem(self):
        # Create data to test
        testTable = DataTable(
            [[0, 0.0, "test1"], [-1, -1.0, "test2"], [1, 1.0, "test3"],
             [-10, -10.0, "test4"], [10, 10.0, "test5"]],
            columns=['ints', 'floats', 'strings'])

        # Tests against self
        self.assertTrue(testTable == testTable)
        self.assertTrue(testTable[1] == testTable[1])
        self.assertTrue(testTable['strings'] == testTable['strings'])
        self.assertTrue(testTable['strings'][1] == testTable[1]['strings'])
Ejemplo n.º 4
0
    def buildAggregateTable(self):
        # first find how big it is
        ABSet = set()
        for row in self.trnAsgnTable:
            ABSet.add(row["AB"])

        self.aggregateTable = DataTable(
            numRecords=len(ABSet),
            fieldNames=self.aggregateFields.keys(),
            numpyFieldTypes=self.aggregateFields.values())
        ABtoRowIndex = {}
        rowsUsed = 0
        for row in self.trnAsgnTable:
            if row["AB"] not in ABtoRowIndex:
                rowIndex = rowsUsed
                self.aggregateTable[rowIndex]["AB"] = row["AB"]
                self.aggregateTable[rowIndex]["A"] = row["A"]
                self.aggregateTable[rowIndex]["B"] = row["B"]
                self.aggregateTable[rowIndex]["DIST"] = row["DIST"]
                self.aggregateTable[rowIndex]["FREQ"] = 0.0
                self.aggregateTable[rowIndex]["PERIODCAP"] = 0.0
                self.aggregateTable[rowIndex]["LOAD"] = 0.0
                self.aggregateTable[rowIndex]["MAXLOAD"] = 0.0
                for field in self.trnAsgnAdditiveFields:  # sum
                    self.aggregateTable[rowIndex][field] = 0.0
                ABtoRowIndex[row["AB"]] = rowsUsed
                rowsUsed += 1
            else:
                rowIndex = ABtoRowIndex[row["AB"]]

            for field in self.trnAsgnAdditiveFields:  # sum
                self.aggregateTable[rowIndex][field] += row[field]
            self.aggregateTable[rowIndex]["AB_VOL"] += row["AB_VOL"]
            self.aggregateTable[rowIndex]["BA_VOL"] += row["BA_VOL"]
            self.aggregateTable[rowIndex]["PERIODCAP"] += row["PERIODCAP"]
            self.aggregateTable[rowIndex]["MAXLOAD"] = max(
                row["LOAD"], self.aggregateTable[rowIndex]["MAXLOAD"])
            self.aggregateTable[rowIndex]["FREQ"] += 1 / row[
                "FREQ"]  # combining -- will take reciprocal later

        self.aggregateTable.setIndex(fieldName="AB")

        count = 0
        for row in self.aggregateTable:
            count += 1
            if row["FREQ"] > 0:
                row["FREQ"] = 1 / row["FREQ"]
            if row["PERIODCAP"] > 0:
                row["LOAD"] = float(row["AB_VOL"]) / row["PERIODCAP"]
                # print row["LOAD"]
        WranglerLogger.debug("count " + str(count) +
                             " lines in aggregate table")
Ejemplo n.º 5
0
    def readTransitAssignmentCsvs(self):
        """
        Read the transit assignment dbfs, the direct output of Cube's transit assignment.
        """
        self.trnAsgnTable = False
        self.aggregateTable = False
        warnline = {}
        ABNameSeq_List = []  # (A,B,NAME,SEQ) from the dbf/csvs

        # open the input assignment files
        for mode in self.MODES:
            if mode == "WMWVIS":
                filename = os.path.join(self.assigndir,
                                        "VISWMW" + self.timeperiod + ".csv")
            elif mode[1] == "T":
                filename = os.path.join(self.assigndir,
                                        "NS" + mode + self.timeperiod + ".csv")
            else:
                filename = os.path.join(self.assigndir,
                                        "SF" + mode + self.timeperiod + ".csv")

            # Read the DBF file into datatable
            WranglerLogger.info("Reading " + filename)

            # Create our table data structure once
            if mode == self.MODES[0]:
                # figure out how many records
                numrecs = 0
                totalrows = 0

                filereader = csv.reader(open(filename, 'rb'),
                                        delimiter=',',
                                        quoting=csv.QUOTE_NONE)
                for row in filereader:

                    # header row?
                    if row[0] == "A":
                        self.initializeFields(row)
                        continue
                    elif totalrows == 0 and not self.csvColnames:
                        self.initializeFields()

                    totalrows += 1
                    if self.profileNode and \
                       (int(row[self.colnameToCsvIndex["A"]]) != self.profileNode and
                        int(row[self.colnameToCsvIndex["B"]]) != self.profileNode):
                        continue

                    if int(row[self.colnameToCsvIndex["MODE"]]
                           ) in self.ignoreModes:
                        continue

                    linename = row[self.colnameToCsvIndex["NAME"]].strip()

                    # exclude this system?
                    (system,
                     vehicletype) = self.capacity.getSystemAndVehicleType(
                         linename, self.timeperiod)

                    if len(self.system) > 0 and system not in self.system:
                        continue

                    numrecs += 1
                WranglerLogger.info("Keeping %d records out of %d" %
                                    (numrecs, totalrows))

                self.trnAsgnTable = DataTable(
                    numRecords=numrecs,
                    fieldNames=self.trnAsgnFields.keys(),
                    numpyFieldTypes=self.trnAsgnFields.values())
                ABNameSeqSet = set()

            # Go through the records
            newrownum = 0  # row number in the trnAsgnTable,ABNameSeq_List -- rows we're keeping
            oldrownum = 0  # row number in the csv,dbf -- all input rows

            filereader = csv.reader(open(filename, 'rb'),
                                    delimiter=',',
                                    quoting=csv.QUOTE_NONE)

            # for the first csv only, also read the dbf for the freq and seq fields
            if mode == self.MODES[0]:
                indbf = dbfTableReader(
                    os.path.join(self.assigndir,
                                 "SFWBW" + self.timeperiod + ".dbf"))
            else:
                indbf = None

            for row in filereader:
                # header row?
                if row[0] == "A": continue

                if self.profileNode:
                    if (int(row[self.colnameToCsvIndex["A"]]) !=
                            self.profileNode
                            and int(row[self.colnameToCsvIndex["B"]]) !=
                            self.profileNode):
                        continue
                    elif int(row[self.colnameToCsvIndex["AB_VOL"]]) > 0:
                        WranglerLogger.info(
                            "Link %s %s for mode %s has AB_VOL %s" %
                            (row[self.colnameToCsvIndex["A"]],
                             row[self.colnameToCsvIndex["B"]], mode,
                             row[self.colnameToCsvIndex["AB_VOL"]]))

                if int(row[
                        self.colnameToCsvIndex["MODE"]]) in self.ignoreModes:
                    oldrownum += 1
                    continue

                linename = row[self.colnameToCsvIndex["NAME"]].strip()

                # exclude this system?
                (system, vehicletype) = self.capacity.getSystemAndVehicleType(
                    linename, self.timeperiod)
                if len(self.system) > 0 and system not in self.system: continue

                # Initial table fill: Special stuff for the first time through
                if mode == self.MODES[0]:

                    # ------------ these fields just get used directly
                    for field in self.trnAsgnCopyFields:

                        try:

                            # integer fields
                            if self.trnAsgnFields[field][0] in ['u', 'b']:
                                if row[self.colnameToCsvIndex[field]] == "":
                                    self.trnAsgnTable[newrownum][field] = 0
                                elif field in ['TIME', 'DIST']:
                                    # backwards compatibility - dbfs were 100ths of a mile/min
                                    self.trnAsgnTable[newrownum][
                                        field] = float(
                                            row[self.colnameToCsvIndex[field]]
                                        ) * 100.0
                                else:
                                    self.trnAsgnTable[newrownum][field] = int(
                                        row[self.colnameToCsvIndex[field]])
                            # float fields
                            elif self.trnAsgnFields[field][0] == 'f':
                                if row[self.colnameToCsvIndex[field]] == "":
                                    self.trnAsgnTable[newrownum][field] = 0.0
                                else:
                                    self.trnAsgnTable[newrownum][
                                        field] = float(
                                            row[self.colnameToCsvIndex[field]])
                            # text fields
                            else:
                                self.trnAsgnTable[newrownum][field] = row[
                                    self.colnameToCsvIndex[field]]

                        except:
                            WranglerLogger.fatal(
                                "Error intepreting field %s: [%s]" %
                                (field, str(self.colnameToCsvIndex[field])))
                            WranglerLogger.fatal("row=%s" % str(row))
                            WranglerLogger.fatal(sys.exc_info()[0])
                            WranglerLogger.fatal(sys.exc_info()[1])
                            sys.exit(2)
                    # ------------ these fields come from the dbf because they're missing in the csv (sigh)
                    dbfRow = indbf.__getitem__(oldrownum)
                    if int(row[self.colnameToCsvIndex["A"]]) < 100000:
                        if dbfRow["A"] != int(
                                row[self.colnameToCsvIndex["A"]]):
                            raise NetworkException(
                                "Assertion error for A on row %d: %s != %s" %
                                (oldrownum, str(dbfRow["A"]),
                                 str(row[self.colnameToCsvIndex["A"]])))
                    if int(row[self.colnameToCsvIndex["B"]]) < 100000:
                        if dbfRow["B"] != int(
                                row[self.colnameToCsvIndex["B"]]):
                            raise NetworkException(
                                "Assertion error for B on row %d: %s != %s" %
                                (oldrownum, str(dbfRow["B"]),
                                 str(row[self.colnameToCsvIndex["B"]])))
                    self.trnAsgnTable[newrownum]["FREQ"] = dbfRow["FREQ"]
                    self.trnAsgnTable[newrownum]["SEQ"] = dbfRow["SEQ"]

                    trySeq = dbfRow["SEQ"]
                    # ------------ special one-time computed fields

                    # ABNameSeq is more complicated because we want it to be unique
                    AB = row[self.colnameToCsvIndex["A"]] + " " + row[
                        self.colnameToCsvIndex["B"]]
                    self.trnAsgnTable[newrownum]["AB"] = AB

                    ABNameSeq = AB + " " + linename
                    if trySeq > 0:
                        tryABNameSeq = ABNameSeq + " " + str(trySeq)

                        # This line seems to be a problem... A/B/NAME/SEQ are not unique
                        if tryABNameSeq in ABNameSeqSet:
                            WranglerLogger.warn("Non-Unique A/B/Name/Seq: " +
                                                tryABNameSeq + "; faking SEQ!")
                        # Find one that works
                        while tryABNameSeq in ABNameSeqSet:
                            trySeq += 1
                            tryABNameSeq = ABNameSeq + " " + str(trySeq)
                        ABNameSeq = tryABNameSeq
                    self.trnAsgnTable[newrownum]["ABNAMESEQ"] = ABNameSeq
                    ABNameSeqSet.add(ABNameSeq)

                    ABNameSeq_List.append(
                        (int(row[self.colnameToCsvIndex["A"]]),
                         int(row[self.colnameToCsvIndex["B"]]),
                         row[self.colnameToCsvIndex["NAME"]], trySeq))

                    # ------------ straight lookup FULLNAME, VEHTYPE, VEHCAP; easy calc for PERIODCAP
                    self.trnAsgnTable[newrownum]["SYSTEM"] = system
                    self.trnAsgnTable[newrownum]["VEHTYPE"] = vehicletype

                    self.trnAsgnTable[newrownum][
                        "FULLNAME"] = self.capacity.getFullname(
                            linename, self.timeperiod)

                    try:
                        (vtype,
                         vehcap) = self.capacity.getVehicleTypeAndCapacity(
                             linename, self.timeperiod)

                        self.trnAsgnTable[newrownum]["VEHCAP"] = vehcap
                        self.trnAsgnTable[newrownum][
                            "PERIODCAP"] = TransitLine.HOURS_PER_TIMEPERIOD[
                                self.
                                timeperiod] * 60.0 * vehcap / self.trnAsgnTable[
                                    newrownum]["FREQ"]
                    except:
                        self.trnAsgnTable[newrownum]["VEHCAP"] = 0
                        self.trnAsgnTable[newrownum]["PERIODCAP"] = 0

                    # if we still don't have a system, warn
                    if self.trnAsgnTable[newrownum][
                            "SYSTEM"] == "" and not warnline.has_key(linename):
                        WranglerLogger.warning("No default system: " +
                                               linename)
                        warnline[linename] = 1

                    #---------add in any grouping that may want to use
                    if self.lineToGroup.has_key(linename):
                        self.trnAsgnTable[newrownum][
                            "GROUP"] = self.lineToGroup[linename]
                    else:
                        self.trnAsgnTable[newrownum]["GROUP"] = ""

                    # initialize additive fields
                    for field in self.trnAsgnAdditiveFields:
                        if row[self.colnameToCsvIndex[field]] == "":
                            self.trnAsgnTable[newrownum][field] = 0.0
                        else:
                            self.trnAsgnTable[newrownum][field] = float(
                                row[self.colnameToCsvIndex[field]])

                # end initial table fill

                # Add in the subsequent assignment files
                else:

                    # print oldrownum, newrownum, ABNameSeq_List[newrownum]
                    # print row[self.colnameToCsvIndex["NAME"]], ABNameSeq_List[oldrownum][2]

                    assert (int(row[self.colnameToCsvIndex["A"]]) ==
                            ABNameSeq_List[newrownum][0])
                    assert (int(row[self.colnameToCsvIndex["B"]]) ==
                            ABNameSeq_List[newrownum][1])
                    # these don't nec match, can be *32 in ferry skim rather than the bart vehicle name, for example
                    # assert(  row[self.colnameToCsvIndex["NAME"]] == ABNameSeq_List[newrownum][2])

                    ABNameSeq = row[self.colnameToCsvIndex["A"]] + " " + \
                                row[self.colnameToCsvIndex["B"]] + " " + \
                                row[self.colnameToCsvIndex["NAME"]].rstrip()
                    if ABNameSeq_List[newrownum][3] > 0:
                        ABNameSeq += " " + str(ABNameSeq_List[newrownum][3])
                    for field in self.trnAsgnAdditiveFields:
                        if row[self.colnameToCsvIndex[field]] != "":
                            self.trnAsgnTable[ABNameSeq][field] += float(
                                row[self.colnameToCsvIndex[field]])

                newrownum += 1
                oldrownum += 1

            # we're done with this; free it up
            del filereader
            if indbf:
                del indbf

            # Table is created and filled -- set the index
            if mode == self.MODES[0]:
                try:
                    self.trnAsgnTable.setIndex(fieldName="ABNAMESEQ")
                except:
                    # failure - try to figure out why
                    ABNameSeqList = []
                    for row in self.trnAsgnTable:
                        ABNameSeqList.append(row["ABNAMESEQ"])
                    ABNameSeqList.sort()
                    for idx in range(len(ABNameSeqList) - 1):
                        if ABNameSeqList[idx] == ABNameSeqList[idx + 1]:
                            WranglerLogger.warn(
                                "Duplicate ABNAMESEQ at idx %d : [%s]" %
                                (idx, ABNameSeqList[idx]))
                    exit(1)

        # ok the table is all filled in -- fill in the LOAD
        for row in self.trnAsgnTable:
            if row["VEHCAP"] == 0: continue
            tpfactor = self.TIMEPERIOD_FACTOR[self.timeperiod]

            # mode-specific peaking factor will over-ride
            if row["MODE"] in self.TIMEPERIOD_FACTOR:
                tpfactor = self.TIMEPERIOD_FACTOR[row["MODE"]][self.timeperiod]

            row["LOAD"] = row["AB_VOL"] * tpfactor * row["FREQ"] / (
                60.0 * row["VEHCAP"])

        # build the aggregate table for key="A B"
        if self.aggregateAll:
            self.buildAggregateTable()
Ejemplo n.º 6
0
      
            # Additionally outputs transitVehicleVolsOnLink[timeperiod].dbf for assignment to the roadway network,
            # with A,B,TRNVEHVOL attributes. 
            AB_to_trnvehvol = defaultdict(float)
            for line in net:              
                trnvehvol = line.vehiclesPerPeriod(timeperiod, Network.MODEL_TYPE_TM1)
                if trnvehvol == 0: continue
                
                prevStop = None
                for stop in line:
                    if prevStop: AB_to_trnvehvol[(prevStop, abs(stop))] += trnvehvol
                    prevStop = abs(stop)
            
            outTable = DataTable(numRecords=len(AB_to_trnvehvol),
                                 header=(FieldType("A", "N", 7, 0),
                                         FieldType("B", "N", 7, 0),
                                         FieldType("AB","C", 15, 0),
                                         FieldType("TRNVEHVOL", "F", 9, 2)))
            rownum = 0
            for key,val in AB_to_trnvehvol.iteritems():
                outTable[rownum]["A"] = key[0]
                outTable[rownum]["B"] = key[1]
                outTable[rownum]["AB"] = "%d %d" % (key[0], key[1])
                outTable[rownum]["TRNVEHVOL"] = val
                rownum += 1
            outfile = "transitVehicleVolsOnLink%s.dbf" % timeperiod
            outTable.writeAsDbf(outfile)
            WranglerLogger.info("Wrote %s with %d rows" % (outfile, len(AB_to_trnvehvol)))
                    
        exit(0)
Ejemplo n.º 7
0
 def test_nrow(self):
     testTable = DataTable(
         [[0, 0.0, "test1"], [-1, -1.0, "test2"], [1, 1.0, "test3"],
          [-10, -10.0, "test4"], [10, 10.0, "test5"]],
         columns=['ints', 'floats', 'strings'])
     self.assertEquals(nrow(testTable), 5)