예제 #1
0
    def test_nullables(self):
        schema = test_schema + "nullables"
        pdf = PanDatFactory(table_with_stuffs=[["field one"], ["field two"]])
        pdf.set_data_type("table_with_stuffs", "field one")
        pdf.set_data_type("table_with_stuffs",
                          "field two",
                          number_allowed=False,
                          strings_allowed='*',
                          nullable=True)
        tdf = TicDatFactory.create_from_full_schema(
            pdf.schema(include_ancillary_info=True))
        tic_dat = tdf.TicDat(
            table_with_stuffs=[[101, "022"], [202, None], [303, "111"]])
        dat = tdf.copy_to_pandas(tic_dat, drop_pk_columns=False)
        self.assertFalse(tdf.find_data_type_failures(tic_dat))
        self.assertFalse(pdf.find_data_type_failures(dat))

        pdf.pgsql.write_schema(self.engine, schema)
        pdf.pgsql.write_data(dat, self.engine, schema)
        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
        self.assertTrue(
            pdf._same_data(dat, dat_1, nans_are_same_for_data_rows=True))
        tic_dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertTrue(
            tdf._same_data(tic_dat,
                           tic_dat_1,
                           nans_are_same_for_data_rows=True))
예제 #2
0
    def testIssue45(self):
        schema = test_schema + "issue45"
        pdf = PanDatFactory(data=[["a"], ["b"]])
        pdf.set_data_type("data",
                          "b",
                          number_allowed=False,
                          strings_allowed='*')
        tdf = TicDatFactory.create_from_full_schema(
            pdf.schema(include_ancillary_info=True))
        tic_dat = tdf.TicDat(data=[[2, "1"], [4, "3"], [44, "022"]])
        dat = tdf.copy_to_pandas(tic_dat, drop_pk_columns=False)
        self.assertFalse(tdf.find_data_type_failures(tic_dat))
        self.assertFalse(pdf.find_data_type_failures(dat))
        pdf.pgsql.write_schema(self.engine,
                               schema,
                               forced_field_types={("data", "a"): "integer"})
        pdf.pgsql.write_data(dat, self.engine, schema)

        def two_checks():
            dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
            self.assertTrue(pdf._same_data(dat, dat_1))
            tic_dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
            self.assertTrue(tdf._same_data(tic_dat, tic_dat_1))

        two_checks()
        tdf.pgsql.write_data(tic_dat, self.engine, schema)
        two_checks()
예제 #3
0
    def testNullsPd(self):
        pdf = PanDatFactory(table=[[], ["field one", "field two"]])
        for f in ["field one", "field two"]:
            pdf.set_data_type("table", f, nullable=True)
        dat = pdf.PanDat(
            table={
                "field one": [None, 200, 0, 300, 400],
                "field two": [100, 109, 300, None, 0]
            })
        schema = test_schema + "_bool_defaults_pd"
        pdf.pgsql.write_schema(self.engine,
                               schema,
                               include_ancillary_info=False)
        pdf.pgsql.write_data(dat, self.engine, schema)

        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
        self.assertTrue(
            pdf._same_data(dat, dat_1, nans_are_same_for_data_rows=True))

        pdf = PanDatFactory(table=[["field one"], ["field two"]])
        for f in ["field one", "field two"]:
            pdf.set_data_type("table", f, max=float("inf"), inclusive_max=True)
        pdf.set_infinity_io_flag(None)
        dat_inf = pdf.PanDat(
            table={
                "field one": [float("inf"), 200, 0, 300, 400],
                "field two": [100, 109, 300, float("inf"), 0]
            })
        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)

        self.assertTrue(pdf._same_data(dat_inf, dat_1))
        pdf.pgsql.write_data(dat_inf, self.engine, schema)
        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
        self.assertTrue(pdf._same_data(dat_inf, dat_1))

        pdf = PanDatFactory(table=[["field one"], ["field two"]])
        for f in ["field one", "field two"]:
            pdf.set_data_type("table",
                              f,
                              min=-float("inf"),
                              inclusive_min=True)
        pdf.set_infinity_io_flag(None)
        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
        self.assertFalse(pdf._same_data(dat_inf, dat_1))
        dat_inf = pdf.PanDat(
            table={
                "field one": [-float("inf"), 200, 0, 300, 400],
                "field two": [100, 109, 300, -float("inf"), 0]
            })
        self.assertTrue(pdf._same_data(dat_inf, dat_1))
예제 #4
0
# Define the foreign key relationships
input_schema.add_foreign_key("arcs", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("arcs", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key(
    "cost", "arcs", [['Source', 'Source'], ['Destination', 'Destination']])
input_schema.add_foreign_key("cost", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("demand", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("demand", "nodes", ['Node', 'Name'])
input_schema.add_foreign_key("supply", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("supply", "nodes", ['Node', 'Name'])

# Define the data types
input_schema.set_data_type("commodities",
                           "Volume",
                           min=0,
                           max=float("inf"),
                           inclusive_min=False,
                           inclusive_max=False)
input_schema.set_data_type("arcs",
                           "Capacity",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=True)
input_schema.set_data_type("cost",
                           "Cost",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("demand",
예제 #5
0
파일: iris.py 프로젝트: nandi6uc/ticdat
from ticdat import PanDatFactory, standard_main
from sklearn.preprocessing import scale
from sklearn.cluster import KMeans

# ------------------------ define the input schema --------------------------------
_core_numeric_fields = [
    'Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'
]
input_schema = PanDatFactory(parameters=[['Name'], ['Value']],
                             iris=[[], _core_numeric_fields + ['Species']])

# the core data fields should be positive, non-infinite numbers
for fld in _core_numeric_fields:
    input_schema.set_data_type("iris",
                               fld,
                               inclusive_min=False,
                               inclusive_max=False,
                               min=0,
                               max=float("inf"))
input_schema.set_data_type("iris",
                           'Species',
                           number_allowed=False,
                           strings_allowed='*')

# the number of clusters is our only parameter, but using a parameters table makes it easy to add more as needed
input_schema.add_parameter("Number of Clusters",
                           default_value=4,
                           inclusive_min=False,
                           inclusive_max=False,
                           min=0,
                           max=float("inf"),
                           must_be_int=True)
예제 #6
0
try: # if you don't have amplpy installed, the code will still load and then fail on solve
    from amplpy import AMPL
except:
    AMPL = None
# ------------------------ define the input schema --------------------------------
input_schema = PanDatFactory(
    workers=[["Name"], ["Payment"]],
    shifts=[["Name"], ["Requirement"]],
    availability=[["Worker", "Shift"], []]
)
# Define the foreign key relationships
input_schema.add_foreign_key("availability", "workers", ['Worker', 'Name'])
input_schema.add_foreign_key("availability", "shifts", ['Shift', 'Name'])

# Define the data types
input_schema.set_data_type("workers", "Payment", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("shifts", "Requirement", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
# ---------------------------------------------------------------------------------

# ------------------------ define the output schema -------------------------------
solution_schema = PanDatFactory(
    assignments=[["Worker", "Shift"], []],
    slacks = [["Shift"], ["Slack"]],
    total_shifts=[["Worker"], ["Total Number Of Shifts"]],
    parameters=[["Parameter"], ["Value"]]
)
# ---------------------------------------------------------------------------------

# ------------------------ solving section-----------------------------------------
def solve(dat):
예제 #7
0
                           inclusive_min=True,
                           inclusive_max=True,
                           min=0,
                           max=float("inf"),
                           must_be_int=False)
input_schema.add_parameter("Objective",
                           "Minimize Average Service Distance",
                           strings_allowed=[
                               "Minimize Average Service Distance",
                               "Maximize Percent High Service Demand",
                               "Minimize Total Cost"
                           ],
                           number_allowed=False)
input_schema.set_data_type("warehouses",
                           "Fixed Cost",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("warehouses",
                           "Max Assignment Capacity",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=True)
input_schema.set_default_value("warehouses", "Max Assignment Capacity",
                               float("inf"))
input_schema.set_data_type("products",
                           "Warehouse Volume",
                           min=0,
                           max=float("inf"),
                           inclusive_min=False,
예제 #8
0
# solution to metrorail_solution_data.json.

# this version of the file uses amplpy and Gurobi
from amplpy import AMPL
from ticdat import PanDatFactory, standard_main
from itertools import product
from pandas import DataFrame

# ------------------------ define the input schema --------------------------------
input_schema = PanDatFactory (
    parameters=[["Parameter"], ["Value"]],
    load_amounts=[["Amount"],[]],
    number_of_one_way_trips=[["Number"],[]],
    amount_leftover=[["Amount"], []])

input_schema.set_data_type("load_amounts", "Amount", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False)

input_schema.set_data_type("number_of_one_way_trips", "Number", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False, must_be_int=True)

input_schema.set_data_type("amount_leftover", "Amount", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)


default_parameters = {"One Way Price": 2.25, "Amount Leftover Constraint": "Upper Bound"}
def _good_parameter_key_value(key, value):
    if key == "One Way Price":
        try:
            return 0 < value < float("inf")
        except:
            return False
예제 #9
0
from ticdat import PanDatFactory
input_schema = PanDatFactory(cities=[["Name"],["Demand"]],
                             distances=[["Source", "Destination"], ["Distance"]],
                             parameters=[["Parameter"], ["Value"]])

input_schema.add_parameter("Number of Centroids", default_value=4, inclusive_min=False, inclusive_max=False, min=0,
                            max=float("inf"), must_be_int=True)
input_schema.set_data_type("cities", "Demand", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("distances", "Distance", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False)
input_schema.add_foreign_key("distances", "cities", ['Source', 'Name'])
input_schema.add_foreign_key("distances", "cities", ['Destination', 'Name'])

# The distance matrix is bi-directionally safe. I.e. if the same source/dest and dest/source exist then the
# distances must match. If only one is present, it can fall back to the other in the code.
def _distance_matrix(dat):
    return {"distance_matrix": {tuple(row[:2]): row[2] for row in dat.distances.itertuples(index=False)}}
input_schema.add_data_row_predicate("distances", predicate_name="Check Bi-Directionally Safe",
    predicate=lambda row, distance_matrix: ((row["Destination"], row["Source"]) not in distance_matrix) or
                                            (row["Distance"] == distance_matrix[row["Destination"], row["Source"]]),
    predicate_kwargs_maker=_distance_matrix)

solution_schema = PanDatFactory(openings=[['City'],[]], assignments=[['City', 'Assigned To'],[]],
                                parameters=[["Parameter"], ["Value"]])

def solve(dat):
    assert input_schema.good_pan_dat_object(dat), "bad dat check"
    assert not input_schema.find_duplicates(dat), "duplicate row check"
    assert not input_schema.find_foreign_key_failures(dat), "foreign key check"
    assert not input_schema.find_data_type_failures(dat), "data type value check"
    assert not input_schema.find_data_row_failures(dat), "data row check"
예제 #10
0
                                                      'Max Num Starters',
                                                      'Min Num Reserve',
                                                      'Max Num Reserve',
                                                      'Flex Status'
                                                  ]],
                             my_draft_positions=[['Draft Position'], []])

# add foreign key constraints (optional, but helps with preventing garbage-in, garbage-out)
input_schema.add_foreign_key("players", "roster_requirements",
                             ['Position', 'Position'])

# set data types (optional, but helps with preventing garbage-in, garbage-out)
input_schema.set_data_type("parameters",
                           "Parameter",
                           number_allowed=False,
                           strings_allowed=[
                               "Starter Weight", "Reserve Weight",
                               "Maximum Number of Flex Starters"
                           ])
input_schema.set_data_type("parameters",
                           "Value",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("players",
                           "Average Draft Position",
                           min=0,
                           max=float("inf"),
                           inclusive_min=False,
                           inclusive_max=False)
예제 #11
0
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = PanDatFactory(categories=[["Name"],
                                         ["Min Nutrition", "Max Nutrition"]],
                             foods=[["Name"], ["Cost"]],
                             nutrition_quantities=[["Food", "Category"],
                                                   ["Quantity"]])

# Define the foreign key relationships
input_schema.add_foreign_key("nutrition_quantities", "foods", ["Food", "Name"])
input_schema.add_foreign_key("nutrition_quantities", "categories",
                             ["Category", "Name"])

# Define the data types
input_schema.set_data_type("categories",
                           "Min Nutrition",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("categories",
                           "Max Nutrition",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=True)
input_schema.set_data_type("foods",
                           "Cost",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("nutrition_quantities",
예제 #12
0
# Demonstrates reading/writing datetime (here, specifically pandas.Timestamp) data
# to and from .csv files.
#
# Command line interface works like this
#    python simple_datetime_solver.py -i sample_data -o solution_directory
#
# This is a very simple app that demos datetime functionality that might be useful for a routing application.
# A parameter defines the start of the model, and each order has a "Deliver By" time requirement. The solution
# (which is just diagnostic information) is the time elapsed (in days) between the start time of the model and the
# "Delvery By" time for each order

from ticdat import PanDatFactory, standard_main
# ------------------------ define the input schema --------------------------------
input_schema = PanDatFactory(parameters=[["Name"], ["Value"]],
                             orders=[["Name"], ["Deliver By"]])
input_schema.set_data_type("orders", "Deliver By", datetime=True)
input_schema.add_parameter("Start Of Model", "Jan 1 2019 8 AM", datetime=True)
# ---------------------------------------------------------------------------------

# ------------------------ define the output schema -------------------------------
solution_schema = PanDatFactory(
    time_to_deliver=[["Name"], ["Maximum Time To Deliver"]])

# ---------------------------------------------------------------------------------


# ------------------------ create a solve function --------------------------------
def solve(dat):
    assert input_schema.good_pan_dat_object(dat)
    assert not input_schema.find_duplicates(dat)
    assert not input_schema.find_data_type_failures(
예제 #13
0
# solution to metrorail_solution_data.json.

# this version of the file uses amplpy and Gurobi
from amplpy import AMPL
from ticdat import PanDatFactory, standard_main
from itertools import product
from pandas import DataFrame

# ------------------------ define the input schema --------------------------------
input_schema = PanDatFactory (
    parameters=[["Parameter"], ["Value"]],
    load_amounts=[["Amount"],[]],
    number_of_one_way_trips=[["Number"],[]],
    amount_leftover=[["Amount"], []])

input_schema.set_data_type("load_amounts", "Amount", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False)

input_schema.set_data_type("number_of_one_way_trips", "Number", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False, must_be_int=True)

input_schema.set_data_type("amount_leftover", "Amount", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)

input_schema.add_parameter("One Way Price", default_value=2.25, min=0, max=float("inf"), inclusive_min=True,
                           inclusive_max=False)
input_schema.add_parameter("Amount Leftover Constraint", default_value="Upper Bound", number_allowed=False,
                           strings_allowed=["Equality", "Upper Bound", "Upper Bound With Leftover Multiple Rule"])
# ---------------------------------------------------------------------------------


# ------------------------ define the output schema -------------------------------