Example #1
0
    def test_time_stamp(self):
        tdf = TicDatFactory(table=[["Blah"], ["Timed Info"]])
        tdf.set_data_type("table", "Timed Info", nullable=True)
        tdf.set_default_value("table", "Timed Info", None)
        dat = tdf.TicDat()
        dat.table[1] = dateutil.parser.parse("2014-05-01 18:47:05.069722")
        dat.table[2] = dateutil.parser.parse("2014-05-02 18:47:05.178768")
        pgtf = tdf.pgsql
        pgtf.write_schema(self.engine,
                          test_schema,
                          forced_field_types={
                              ('table', 'Blah'): "integer",
                              ('table', 'Timed Info'): "timestamp"
                          })
        pgtf.write_data(dat,
                        self.engine,
                        test_schema,
                        dsn=self.postgresql.dsn())
        dat_2 = pgtf.create_tic_dat(self.engine, test_schema)
        self.assertTrue(tdf._same_data(dat, dat_2))
        self.assertTrue(
            all(
                isinstance(row["Timed Info"], datetime.datetime)
                for row in dat_2.table.values()))
        self.assertFalse(
            any(isinstance(k, datetime.datetime) for k in dat_2.table))

        pdf = PanDatFactory.create_from_full_schema(
            tdf.schema(include_ancillary_info=True))

        def same_data(pan_dat, pan_dat_2):
            df1, df2 = pan_dat.table, pan_dat_2.table
            if list(df1["Blah"]) != list(df2["Blah"]):
                return False
            for dt1, dt2 in zip(df1["Timed Info"], df2["Timed Info"]):
                delta = dt1 - dt2
                if abs(delta.total_seconds()) > 1e-6:
                    return False
            return True

        pan_dat = pdf.pgsql.create_pan_dat(self.engine, test_schema)
        pan_dat_2 = pan_dat_maker(tdf.schema(), dat_2)
        self.assertTrue(same_data(pan_dat, pan_dat_2))
        for df in [_.table for _ in [pan_dat, pan_dat_2]]:
            for i in range(len(df)):
                self.assertFalse(
                    isinstance(df.loc[i, "Blah"], datetime.datetime))
                self.assertTrue(
                    isinstance(df.loc[i, "Timed Info"], datetime.datetime))

        pan_dat.table.loc[1, "Timed Info"] = dateutil.parser.parse(
            "2014-05-02 18:48:05.178768")
        self.assertFalse(same_data(pan_dat, pan_dat_2))
        pdf.pgsql.write_data(pan_dat, self.engine, test_schema)
        pan_dat_2 = pdf.pgsql.create_pan_dat(self.engine, test_schema)
        self.assertTrue(same_data(pan_dat, pan_dat_2))

        dat.table[2] = dateutil.parser.parse("2014-05-02 18:48:05.178768")
        self.assertFalse(tdf._same_data(dat, dat_2))
Example #2
0
    def write_schema(self,
                     engine,
                     schema,
                     forced_field_types=None,
                     include_ancillary_info=True):
        """
        :param engine: typically a sqlalchemy database engine with drivertype postgres (really just needs an .execute)

        :param schema: a string naming the postgres schema to populate (will create if needed)

        :param forced_field_types : A dictionary mappying (table, field) to a field type
                                    Absent forcing, types are inferred from tic_dat_factory.data_types if possible,
                                    and set via the assumption that PK fields are text and data fields are floats if
                                    not.
        :param  include_ancillary_info : boolean. If False, no primary key or foreign key info will be written
        :return:
        """
        self._check_good_pgtd_compatible_table_field_names()
        forced_field_types = forced_field_types or {}
        all_fields = lambda t: self.tdf.primary_key_fields.get(t, (
        )) + self.tdf.data_fields.get(t, ())
        good_forced_field_type_entry = lambda k, v: isinstance(k, tuple) and len(k) == 2 \
                        and k[1] in all_fields(k[0]) and v in \
                        ["text", "integer", "float", "bool", "boolean", "timestamp"]
        verify(
            dictish(forced_field_types) and all(
                good_forced_field_type_entry(k, v)
                for k, v in forced_field_types.items()),
            "bad forced_field_types argument")
        if not include_ancillary_info:
            from ticdat import TicDatFactory
            tdf = TicDatFactory(**{
                t: [[], pks + dfs]
                for t, (pks, dfs) in self.tdf.schema().items()
            })
            for t, dts in self.tdf.data_types.items():
                for f, dt in dts.items():
                    tdf.set_data_type(t, f, *dt)
            forced_field_types_ = {
                (t, f): "text"
                for t, (pks, dfs) in self.tdf.schema().items() for f in pks
                if f not in tdf.data_types.get(t, {})
            }
            forced_field_types_.update(forced_field_types)
            return PostgresTicFactory(tdf).write_schema(
                engine, schema, forced_field_types_)

        verify(not getattr(self.tdf, "generic_tables", None),
               "TicDat for postgres does not yet support generic tables")

        if schema not in [
                row[0] for row in engine.execute(
                    "select schema_name from information_schema.schemata")
        ]:
            engine.execute(sa.schema.CreateSchema(schema))
        for str in self._get_schema_sql(self.tdf.all_tables, schema,
                                        forced_field_types):
            engine.execute(str)
Example #3
0
 def test_true_false(self):
     if not self.can_run:
         return
     tdf = TicDatFactory(table=[["pkf"], ["df1", "df2"]])
     tdf.set_data_type("table", "df2", min=-float("inf"))
     dat = tdf.TicDat(table=[["d1", True, 100], ["d2", False, 200],
                             ["d3", False, -float("inf")]])
     self.assertTrue(len(dat.table) == 3)
     self.assertFalse(tdf.find_data_type_failures(dat))
     pgtf = tdf.pgsql
     ex = None
     try:
         pgtf.write_data(None, self.engine, test_schema)
     except utils.TicDatError as te:
         ex = str(te)
     self.assertTrue(ex and "Not a valid TicDat object" in ex)
     pgtf.write_schema(self.engine,
                       test_schema,
                       forced_field_types={("table", "df1"): "bool"})
     pgtf.write_data(dat, self.engine, test_schema)
     self.assertFalse(pgtf.find_duplicates(self.engine, test_schema))
     pg_tic_dat = pgtf.create_tic_dat(self.engine, test_schema)
     self.assertTrue(tdf._same_data(dat, pg_tic_dat))
Example #4
0
 def test_ints_and_strings_and_lists(self):
     if not self.can_run:
         return
     tdf = TicDatFactory(t_one=[[], ["str_field", "int_field"]],
                         t_two=[["str_field", "int_field"], []])
     for t in tdf.all_tables:
         tdf.set_data_type(t,
                           "str_field",
                           strings_allowed=['This', 'That'],
                           number_allowed=False)
         tdf.set_data_type(t, "int_field", must_be_int=True)
     dat = tdf.TicDat(t_one=[["This", 1], ["That", 2], ["This", 111],
                             ["That", 211]],
                      t_two=[["This", 10], ["That", 9]])
     self.assertFalse(tdf.find_data_type_failures(dat))
     self.assertTrue(len(dat.t_one) == 4)
     self.assertTrue(len(dat.t_two) == 2)
     pgtf = tdf.pgsql
     pgtf.write_schema(self.engine, test_schema)
     pgtf.write_data(dat, self.engine, test_schema)
     self.assertFalse(pgtf.find_duplicates(self.engine, test_schema))
     pg_tic_dat = pgtf.create_tic_dat(self.engine, test_schema)
     self.assertTrue(tdf._same_data(dat, pg_tic_dat))
Example #5
0
    def testNullsAndInf(self):
        tdf = TicDatFactory(table=[["field one"], ["field two"]])
        for f in ["field one", "field two"]:
            tdf.set_data_type("table", f, nullable=True)
        dat = tdf.TicDat(
            table=[[None, 100], [200, 109], [0, 300], [300, None], [400, 0]])
        schema = test_schema + "_bool_defaults"
        tdf.pgsql.write_schema(self.engine,
                               schema,
                               include_ancillary_info=False)
        tdf.pgsql.write_data(dat, self.engine, schema)

        dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertTrue(tdf._same_data(dat, dat_1))

        tdf = TicDatFactory(table=[["field one"], ["field two"]])
        for f in ["field one", "field two"]:
            tdf.set_data_type("table", f, max=float("inf"), inclusive_max=True)
        tdf.set_infinity_io_flag(None)
        dat_inf = tdf.TicDat(table=[[float("inf"), 100], [200, 109], [0, 300],
                                    [300, float("inf")], [400, 0]])
        dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)

        self.assertTrue(tdf._same_data(dat_inf, dat_1))
        tdf.pgsql.write_data(dat_inf, self.engine, schema)
        dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertTrue(tdf._same_data(dat_inf, dat_1))

        tdf = TicDatFactory(table=[["field one"], ["field two"]])
        for f in ["field one", "field two"]:
            tdf.set_data_type("table",
                              f,
                              min=-float("inf"),
                              inclusive_min=True)
        tdf.set_infinity_io_flag(None)
        dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertFalse(tdf._same_data(dat_inf, dat_1))
        dat_inf = tdf.TicDat(table=[[float("-inf"), 100], [200, 109], [0, 300],
                                    [300, -float("inf")], [400, 0]])
        self.assertTrue(tdf._same_data(dat_inf, dat_1))
Example #6
0
import gurobipy as gu
from ticdat import TicDatFactory,  standard_main, gurobi_env

# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory(
    workers=[["Name"], ["Payment"]],
    shifts=[["Name"], ["Requirement"]],
    availability=[["Worker", "Shift"], []]
)
# Define the foreign key relationships
input_schema.add_foreign_key("availability", "workers", ['Worker', 'Name'])
input_schema.add_foreign_key("availability", "shifts", ['Shift', 'Name'])

# Define the data types
input_schema.set_data_type("workers", "Payment", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("shifts", "Requirement", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
# ---------------------------------------------------------------------------------

# ------------------------ define the output schema -------------------------------
solution_schema = TicDatFactory(
    assignments=[["Worker", "Shift"], []],
    slacks = [["Shift"], ["Slack"]],
    total_shifts=[["Worker"], ["Total Number Of Shifts"]],
    parameters=[["Parameter"], ["Value"]]
)
# ---------------------------------------------------------------------------------

# ------------------------ solving section-----------------------------------------
def solve(dat):
Example #7
0
# should be 2360.0 obj value

from ticdat import TicDatFactory,  standard_main
from amplpy import AMPL
from ticdat.utils import verify


input_schema = TicDatFactory (
    roster = [["Name"],["Grade"]],
    positions = [["Position"],["Position Importance", "Position Group"]],
    player_ratings = [["Name", "Position Group"], ["Rating"]],
    innings = [["Inning"],["Inning Group"]],
    position_constraints = [["Position Group", "Inning Group", "Grade"],["Min Players", "Max Players"]])

input_schema.set_data_type("positions", "Position Importance",min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)

# input_schema.add_foreign_key("player_ratings", "roster", ["Name", "Name"])
# input_schema.add_foreign_key("player_ratings", "positions", ["Position Group", "Position Group"])
input_schema.set_data_type("player_ratings", "Rating", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)

# input_schema.add_foreign_key("position_constraints", "positions", ["Position Group", "Position Group"])
# input_schema.add_foreign_key("position_constraints", "innings", ["Inning Group", "Inning Group"])
# input_schema.add_foreign_key("position_constraints", "roster", ["Grade", "Grade"])
input_schema.set_data_type("position_constraints", "Min Players", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("position_constraints", "Max Players", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=True)
input_schema.add_data_row_predicate(
    "position_constraints", predicate_name="Position Min Max Check",
Example #8
0
                           default_value=float("inf"),
                           inclusive_min=False,
                           inclusive_max=True,
                           min=0,
                           max=float("inf"),
                           must_be_int=False)
input_schema.add_parameter("Objective",
                           "Minimize Average Service Distance",
                           strings_allowed=[
                               "Minimize Average Service Distance",
                               "Maximize Percent High Service Demand"
                           ],
                           number_allowed=False)
input_schema.set_data_type("cities",
                           "Demand",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("distances",
                           "Distance",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.add_foreign_key("distances", "cities", ['Source', 'Name'])
input_schema.add_foreign_key("distances", "cities", ['Destination', 'Name'])


# The distance matrix is bi-directionally safe. I.e. if the same source/dest and dest/source exist then the
# distances must match. If only one is present, it can fall back to the other in the code.
def _distance_matrix(dat):
Example #9
0
try: # if you don't have gurobipy installed, the code will still load and then fail on solve
    import gurobipy as gu
except:
    gu = None
from ticdat import TicDatFactory, standard_main
from itertools import product

# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory (
    parameters=[["Parameter"], ["Value"]],
    load_amounts=[["Amount"],[]],
    number_of_one_way_trips=[["Number"],[]],
    amount_leftover=[["Amount"], []])

input_schema.set_data_type("load_amounts", "Amount", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False)

input_schema.set_data_type("number_of_one_way_trips", "Number", min=0, max=float("inf"),
                           inclusive_min=False, inclusive_max=False, must_be_int=True)

input_schema.set_data_type("amount_leftover", "Amount", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)

input_schema.add_parameter("One Way Price", default_value=2.25, min=0, max=float("inf"), inclusive_min=True,
                           inclusive_max=False)
input_schema.add_parameter("Amount Leftover Constraint", default_value="Upper Bound", number_allowed=False,
                           strings_allowed=["Equality", "Upper Bound", "Upper Bound With Leftover Multiple Rule"])
# ---------------------------------------------------------------------------------


# ------------------------ define the output schema -------------------------------
Example #10
0
#   python soda_promotion_optimizer.py -i input_data.xlsx -o solution_data.xlsx
# will read from a model stored in the file input_data.xlsx and write the solution
# to solution_data.xlsx.

from ticdat import TicDatFactory, standard_main, Slicer, gurobi_env
import gurobipy as gu

input_schema = TicDatFactory(parameters=[["Key"], ["Value"]],
                             products=[["Name"], ["Family"]],
                             forecast_sales=[["Product", "Cost Per Unit"],
                                             ["Sales"]],
                             max_promotions=[["Product Family"],
                                             ["Max Promotions"]])

input_schema.set_data_type("parameters",
                           "Key",
                           number_allowed=False,
                           strings_allowed=("Maximum Total Investment", ))
input_schema.set_data_type("parameters",
                           "Value",
                           min=0,
                           max=float("inf"),
                           inclusive_max=True)
input_schema.set_data_type("forecast_sales",
                           "Cost Per Unit",
                           min=0,
                           max=float("inf"))
input_schema.set_data_type("max_promotions",
                           "Product Family",
                           number_allowed=False,
                           strings_allowed=("Dark", "Clear"))
input_schema.set_data_type("max_promotions",
Example #11
0
                                         ["Min Nutrition", "Max Nutrition"]],
                             foods=[["Name"], ["Cost"]],
                             nutrition_quantities=[["Food", "Category"],
                                                   ["Quantity"]])
for (tbl, fld), tip in input_schema_tooltips.items():
    input_schema.set_tooltip(tbl, fld, tip)

# Define the foreign key relationships
input_schema.add_foreign_key("nutrition_quantities", "foods", ["Food", "Name"])
input_schema.add_foreign_key("nutrition_quantities", "categories",
                             ["Category", "Name"])

# Define the data types
input_schema.set_data_type("categories",
                           "Min Nutrition",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("categories",
                           "Max Nutrition",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=True)
input_schema.set_data_type("foods",
                           "Cost",
                           min=0,
                           max=float("inf"),
                           inclusive_min=True,
                           inclusive_max=False)
input_schema.set_data_type("nutrition_quantities",
Example #12
0
input_schema = TicDatFactory (
 parameters = [["Key"],["Value"]],
 players = [['Player Name'],
            ['Position', 'Average Draft Position', 'Expected Points', 'Draft Status']],
 roster_requirements = [['Position'],
                       ['Min Num Starters', 'Max Num Starters', 'Min Num Reserve', 'Max Num Reserve',
                        'Flex Status']],
 my_draft_positions = [['Draft Position'],[]]
)

# add foreign key constraints (optional, but helps with preventing garbage-in, garbage-out)
input_schema.add_foreign_key("players", "roster_requirements", ['Position', 'Position'])

# set data types (optional, but helps with preventing garbage-in, garbage-out)
input_schema.set_data_type("parameters", "Key", number_allowed = False,
                          strings_allowed = ["Starter Weight", "Reserve Weight",
                                             "Maximum Number of Flex Starters"])
input_schema.set_data_type("parameters", "Value", min=0, max=float("inf"),
                          inclusive_min = True, inclusive_max = False)
input_schema.set_data_type("players", "Average Draft Position", min=0, max=float("inf"),
                          inclusive_min = False, inclusive_max = False)
input_schema.set_data_type("players", "Expected Points", min=-float("inf"), max=float("inf"),
                          inclusive_min = False, inclusive_max = False)
input_schema.set_data_type("players", "Draft Status",
                          strings_allowed = ["Un-drafted", "Drafted By Me", "Drafted By Someone Else"])
for fld in ("Min Num Starters",  "Min Num Reserve", "Max Num Reserve"):
    input_schema.set_data_type("roster_requirements", fld, min=0, max=float("inf"),
                          inclusive_min = True, inclusive_max = False, must_be_int = True)
input_schema.set_data_type("roster_requirements", "Max Num Starters", min=0, max=float("inf"),
                      inclusive_min = False, inclusive_max = True, must_be_int = True)
input_schema.set_data_type("roster_requirements", "Flex Status", number_allowed = False,
Example #13
0
    def testDateTime(self):
        schema = test_schema + "_datetime"
        tdf = TicDatFactory(table_with_stuffs=[["field one"], ["field two"]],
                            parameters=[["a"], ["b"]])
        tdf.add_parameter("p1", "Dec 15 1970", datetime=True)
        tdf.add_parameter("p2", None, datetime=True, nullable=True)
        tdf.set_data_type("table_with_stuffs", "field one", datetime=True)
        tdf.set_data_type("table_with_stuffs",
                          "field two",
                          datetime=True,
                          nullable=True)

        dat = tdf.TicDat(table_with_stuffs=[[
            dateutil.parser.parse("July 11 1972"), None
        ], [datetime.datetime.now(),
            dateutil.parser.parse("Sept 11 2011")]],
                         parameters=[["p1", "7/11/1911"], ["p2", None]])
        self.assertFalse(
            tdf.find_data_type_failures(dat)
            or tdf.find_data_row_failures(dat))

        tdf.pgsql.write_schema(self.engine, schema)
        tdf.pgsql.write_data(dat, self.engine, schema)
        dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertFalse(
            tdf._same_data(dat, dat_1, nans_are_same_for_data_rows=True))
        self.assertTrue(
            all(
                len(getattr(dat, t)) == len(getattr(dat_1, t))
                for t in tdf.all_tables))
        self.assertFalse(
            tdf.find_data_type_failures(dat_1)
            or tdf.find_data_row_failures(dat_1))
        self.assertTrue(
            isinstance(dat_1.parameters["p1"]["b"], datetime.datetime))
        self.assertTrue(
            all(
                isinstance(_, datetime.datetime)
                for _ in dat_1.table_with_stuffs))
        self.assertTrue(
            len([_ for _ in dat_1.table_with_stuffs if pd.isnull(_)]) == 0)
        self.assertTrue(
            all(
                isinstance(_, datetime.datetime) or pd.isnull(_)
                for v in dat_1.table_with_stuffs.values() for _ in v.values()))
        self.assertTrue(
            len([
                _ for v in dat_1.table_with_stuffs.values()
                for _ in v.values() if pd.isnull(_)
            ]) == 1)
        pdf = PanDatFactory.create_from_full_schema(
            tdf.schema(include_ancillary_info=True))
        pan_dat = pdf.pgsql.create_pan_dat(self.engine, schema)
        dat_2 = pdf.copy_to_tic_dat(pan_dat)
        # pandas can be a real PIA sometimes, hacking around some weird downcasting
        for k in list(dat_2.table_with_stuffs):
            dat_2.table_with_stuffs[pd.Timestamp(
                k)] = dat_2.table_with_stuffs.pop(k)
        self.assertTrue(
            tdf._same_data(dat_1, dat_2, nans_are_same_for_data_rows=True))

        pdf.pgsql.write_data(pan_dat, self.engine, schema)
        dat_3 = pdf.copy_to_tic_dat(
            pdf.pgsql.create_pan_dat(self.engine, schema))
        for k in list(dat_3.table_with_stuffs):
            dat_3.table_with_stuffs[pd.Timestamp(
                k)] = dat_3.table_with_stuffs.pop(k)
        self.assertTrue(
            tdf._same_data(dat_1, dat_3, nans_are_same_for_data_rows=True))
Example #14
0
     arcs = [["source", "destination"],["capacity"]],
     cost = [["commodity", "source", "destination"], ["cost"]],
     inflow = [["commodity", "node"],["quantity"]]
)

# add foreign key constraints
dataFactory.add_foreign_key("arcs", "nodes", ['source', 'name'])
dataFactory.add_foreign_key("arcs", "nodes", ['destination', 'name'])
dataFactory.add_foreign_key("cost", "nodes", ['source', 'name'])
dataFactory.add_foreign_key("cost", "nodes", ['destination', 'name'])
dataFactory.add_foreign_key("cost", "commodities", ['commodity', 'name'])
dataFactory.add_foreign_key("inflow", "commodities", ['commodity', 'name'])
dataFactory.add_foreign_key("inflow", "nodes", ['node', 'name'])

# the whole schema has only three data fields to type
dataFactory.set_data_type("arcs", "capacity")
dataFactory.set_data_type("cost", "cost")
# except quantity which allows negatives
dataFactory.set_data_type("inflow", "quantity", min=-float("inf"), inclusive_min=False)

solutionFactory = TicDatFactory(
        flow = [["commodity", "source", "destination"], ["quantity"]])



def create_model(dat):
    '''
    :param dat: a good ticdat for the dataFactory
    :return: a gurobi model and dictionary of gurboi flow variables
    '''
    assert dataFactory.good_tic_dat_object(dat)
Example #15
0
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields  and 4 data fields.
input_schema = TicDatFactory(sites=[['Name'], ['Demand', 'Center Status']],
                             distance=[['Source', 'Destination'],
                                       ['Distance']],
                             parameters=[["Parameter"], ["Value"]])

# add foreign key constraints
input_schema.add_foreign_key("distance", "sites", ['Source', 'Name'])
input_schema.add_foreign_key("distance", "sites", ['Destination', 'Name'])

# center_status is a flag field which can take one of two string values.
input_schema.set_data_type(
    "sites",
    "Center Status",
    number_allowed=False,
    strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance and demand
input_schema.set_data_type("sites", "Demand")
input_schema.set_data_type("distance", "Distance")

input_schema.add_parameter("Number of Centroids",
                           default_value=1,
                           inclusive_min=False,
                           inclusive_max=False,
                           min=0,
                           max=float("inf"),
                           must_be_int=True)
input_schema.add_parameter("MIP Gap",
                           default_value=0.001,
Example #16
0
#
# Note that file requires jobs.mod to be in the same directory

from ticdat import TicDatFactory, standard_main, opl_run
import time
import datetime

# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = TicDatFactory(
    parameters=[["Parameter"], ["Value"]],
    machines=[["Name"], []],
    jobs=[["Name"], ["Machine1", "Durations1", "Machine2", "Durations2"]])

input_schema.set_data_type("parameters",
                           "Parameter",
                           number_allowed=False,
                           strings_allowed=("Load Duration", ))
input_schema.set_data_type("parameters",
                           "Value",
                           min=0,
                           max=float("inf"),
                           inclusive_max=False)

input_schema.add_foreign_key("jobs", "machines", ["Machine1", "Name"])
input_schema.add_foreign_key("jobs", "machines", ["Machine2", "Name"])

input_schema.set_data_type("jobs",
                           "Durations1",
                           min=0,
                           max=float("inf"),
                           inclusive_max=False,
Example #17
0
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields  and 4 data fields.
input_schema = TicDatFactory(sites=[['Name'], ['Demand', 'Center Status']],
                             distance=[['Source', 'Destination'],
                                       ['Distance']],
                             parameters=[["Parameter"], ["Value"]])

# add foreign key constraints
input_schema.add_foreign_key("distance", "sites", ['Source', 'Name'])
input_schema.add_foreign_key("distance", "sites", ['Destination', 'Name'])

# center_status is a flag field which can take one of two string values.
input_schema.set_data_type(
    "sites",
    "Center Status",
    number_allowed=False,
    strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance
input_schema.set_data_type("distance", "Distance")

# There are three types of parameters
input_schema.set_data_type(
    "parameters",
    "Parameter",
    number_allowed=False,
    strings_allowed=["Number of Centroids", "MIP Gap", "Formulation"])
input_schema.set_data_type("parameters",
                           "Value",
                           number_allowed=True,
                           strings_allowed=["Weak", "Strong"])
Example #18
0
# will read from a model stored in the file input_data.xlsx and write the solution
# to solution_data.xlsx.

# Note that this file requires oil_blend.mod to be in the same directory

from ticdat import TicDatFactory, standard_main, opl_run

# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory (
     parameters = [["Parameter"],["Value"]],
     gas = [["Name"],["Demand","Sales Price","Min Octane Rating", "Max Lead Contents"]],
     oil = [["Name"],["Supply","Purchase Price","Octane Rating", "Lead Contents"]])

# no foreign keys

input_schema.set_data_type("parameters", "Parameter", number_allowed = False,
                           strings_allowed= ["Maximum Production", "Production Cost"])
input_schema.set_data_type("parameters", "Value", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("gas", "Demand", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("gas", "Sales Price", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("gas", "Min Octane Rating", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("gas", "Max Lead Contents", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("oil", "Supply", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("oil", "Purchase Price", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("oil", "Octane Rating", min=0, max=float("inf"),
Example #19
0
# solution to metrorail_solution_data.json.

# this version of the file uses amplpy and Gurobi
from amplpy import AMPL
from ticdat import TicDatFactory, standard_main, ampl_format
from itertools import product

# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory(parameters=[["Key"], ["Value"]],
                             load_amounts=[["Amount"], []],
                             number_of_one_way_trips=[["Number"], []],
                             amount_leftover=[["Amount"], []])

input_schema.set_data_type("load_amounts",
                           "Amount",
                           min=0,
                           max=float("inf"),
                           inclusive_min=False,
                           inclusive_max=False)

input_schema.set_data_type("number_of_one_way_trips",
                           "Number",
                           min=0,
                           max=float("inf"),
                           inclusive_min=False,
                           inclusive_max=False,
                           must_be_int=True)

input_schema.set_data_type("amount_leftover",
                           "Amount",
                           min=0,
                           max=float("inf"),
Example #20
0
     arcs = [["Source", "Destination"],["Capacity"]],
     cost = [["Commodity", "Source", "Destination"], ["Cost"]],
     inflow = [["Commodity", "Node"],["Quantity"]]
)

# Define the foreign key relationships
input_schema.add_foreign_key("arcs", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("arcs", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "nodes", ['Node', 'Name'])

# Define the data types
input_schema.set_data_type("arcs", "Capacity", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("cost", "Cost", min=0, max=float("inf"),
                           inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("inflow", "Quantity", min=-float("inf"), max=float("inf"),
                           inclusive_min=False, inclusive_max=False)

# The default-default of zero makes sense everywhere except for Capacity
input_schema.set_default_value("arcs", "Capacity", float("inf"))
# ---------------------------------------------------------------------------------

# ------------------------ define the output schema -------------------------------
solution_schema = TicDatFactory(
        flow = [["Commodity", "Source", "Destination"], ["Quantity"]],
        parameters = [["Key"],["Value"]])
# ---------------------------------------------------------------------------------
Example #21
0
def _testFantop(modelType, sqlFile):
    dataFactory = TicDatFactory(parameters=[["Key"], ["Value"]],
                                players=[['Player Name'],
                                         [
                                             'Position',
                                             'Average Draft Position',
                                             'Expected Points', 'Draft Status'
                                         ]],
                                roster_requirements=[['Position'],
                                                     [
                                                         'Min Num Starters',
                                                         'Max Num Starters',
                                                         'Min Num Reserve',
                                                         'Max Num Reserve',
                                                         'Flex Status'
                                                     ]],
                                my_draft_positions=[['Draft Position'], []])

    # add foreign key constraints (optional, but helps with preventing garbage-in, garbage-out)
    dataFactory.add_foreign_key("players", "roster_requirements",
                                ['Position', 'Position'])

    # set data types (optional, but helps with preventing garbage-in, garbage-out)
    dataFactory.set_data_type("parameters",
                              "Key",
                              number_allowed=False,
                              strings_allowed=[
                                  "Starter Weight", "Reserve Weight",
                                  "Maximum Number of Flex Starters"
                              ])
    dataFactory.set_data_type("parameters",
                              "Value",
                              min=0,
                              max=float("inf"),
                              inclusive_min=True,
                              inclusive_max=False)
    dataFactory.set_data_type("players",
                              "Average Draft Position",
                              min=0,
                              max=float("inf"),
                              inclusive_min=False,
                              inclusive_max=False)
    dataFactory.set_data_type("players",
                              "Expected Points",
                              min=-float("inf"),
                              max=float("inf"),
                              inclusive_min=False,
                              inclusive_max=False)
    dataFactory.set_data_type("players",
                              "Draft Status",
                              strings_allowed=[
                                  "Un-drafted", "Drafted By Me",
                                  "Drafted By Someone Else"
                              ])
    for fld in ("Min Num Starters", "Min Num Reserve", "Max Num Reserve"):
        dataFactory.set_data_type("roster_requirements",
                                  fld,
                                  min=0,
                                  max=float("inf"),
                                  inclusive_min=True,
                                  inclusive_max=False,
                                  must_be_int=True)
    dataFactory.set_data_type("roster_requirements",
                              "Max Num Starters",
                              min=0,
                              max=float("inf"),
                              inclusive_min=False,
                              inclusive_max=True,
                              must_be_int=True)
    dataFactory.set_data_type(
        "roster_requirements",
        "Flex Status",
        number_allowed=False,
        strings_allowed=["Flex Eligible", "Flex Ineligible"])
    dataFactory.set_data_type("my_draft_positions",
                              "Draft Position",
                              min=0,
                              max=float("inf"),
                              inclusive_min=False,
                              inclusive_max=False,
                              must_be_int=True)

    solutionFactory = TicDatFactory(my_draft=[[
        'Player Name'
    ], [
        'Draft Position', 'Position', 'Planned Or Actual', 'Starter Or Reserve'
    ]])

    dat = dataFactory.sql.create_tic_dat_from_sql(os.path.join(
        _codeDir(), sqlFile),
                                                  freeze_it=True)

    assert dataFactory.good_tic_dat_object(dat)
    assert not dataFactory.find_foreign_key_failures(dat)
    assert not dataFactory.find_data_type_failures(dat)

    expected_draft_position = {}
    # for our purposes, its fine to assume all those drafted by someone else are drafted
    # prior to any players drafted by me
    for player_name in sorted(
            dat.players,
            key=lambda _p: {
                "Un-drafted": dat.players[_p]["Average Draft Position"],
                "Drafted By Me": -1,
                "Drafted By Someone Else": -2
            }[dat.players[_p]["Draft Status"]]):
        expected_draft_position[player_name] = len(expected_draft_position) + 1
    assert max(expected_draft_position.values()) == len(
        set(expected_draft_position.values())) == len(dat.players)
    assert min(expected_draft_position.values()) == 1

    already_drafted_by_me = {
        player_name
        for player_name, row in dat.players.items()
        if row["Draft Status"] == "Drafted By Me"
    }
    can_be_drafted_by_me = {
        player_name
        for player_name, row in dat.players.items()
        if row["Draft Status"] != "Drafted By Someone Else"
    }

    m = Model(modelType, 'fantop')
    my_starters = {
        player_name: m.add_var(type="binary", name="starter_%s" % player_name)
        for player_name in can_be_drafted_by_me
    }
    my_reserves = {
        player_name: m.add_var(type="binary", name="reserve_%s" % player_name)
        for player_name in can_be_drafted_by_me
    }

    for player_name in can_be_drafted_by_me:
        if player_name in already_drafted_by_me:
            m.add_constraint(my_starters[player_name] +
                             my_reserves[player_name] == 1,
                             name="already_drafted_%s" % player_name)
        else:
            m.add_constraint(
                my_starters[player_name] + my_reserves[player_name] <= 1,
                name="cant_draft_twice_%s" % player_name)

    for i, draft_position in enumerate(sorted(dat.my_draft_positions)):
        m.add_constraint(m.sum(
            my_starters[player_name] + my_reserves[player_name]
            for player_name in can_be_drafted_by_me
            if expected_draft_position[player_name] < draft_position) <= i,
                         name="at_most_%s_can_be_ahead_of_%s" %
                         (i, draft_position))

    my_draft_size = m.sum(my_starters[player_name] + my_reserves[player_name]
                          for player_name in can_be_drafted_by_me)
    m.add_constraint(my_draft_size >= len(already_drafted_by_me) + 1,
                     name="need_to_extend_by_at_least_one")
    m.add_constraint(my_draft_size <= len(dat.my_draft_positions),
                     name="cant_exceed_draft_total")

    for position, row in dat.roster_requirements.items():
        players = {
            player_name
            for player_name in can_be_drafted_by_me
            if dat.players[player_name]["Position"] == position
        }
        starters = m.sum(my_starters[player_name] for player_name in players)
        reserves = m.sum(my_reserves[player_name] for player_name in players)
        m.add_constraint(starters >= row["Min Num Starters"],
                         name="min_starters_%s" % position)
        m.add_constraint(starters <= row["Max Num Starters"],
                         name="max_starters_%s" % position)
        m.add_constraint(reserves >= row["Min Num Reserve"],
                         name="min_reserve_%s" % position)
        m.add_constraint(reserves <= row["Max Num Reserve"],
                         name="max_reserve_%s" % position)

    if "Maximum Number of Flex Starters" in dat.parameters:
        players = {
            player_name
            for player_name in can_be_drafted_by_me
            if dat.roster_requirements[dat.players[player_name]["Position"]]
            ["Flex Status"] == "Flex Eligible"
        }
        m.add_constraint(
            m.sum(my_starters[player_name] for player_name in players) <=
            dat.parameters["Maximum Number of Flex Starters"]["Value"],
            name="max_flex")

    starter_weight = dat.parameters["Starter Weight"][
        "Value"] if "Starter Weight" in dat.parameters else 1
    reserve_weight = dat.parameters["Reserve Weight"][
        "Value"] if "Reserve Weight" in dat.parameters else 1
    m.set_objective(m.sum(dat.players[player_name]["Expected Points"] *
                          (my_starters[player_name] * starter_weight +
                           my_reserves[player_name] * reserve_weight)
                          for player_name in can_be_drafted_by_me),
                    sense="maximize")

    if not m.optimize():
        return

    sln = solutionFactory.TicDat()

    def almostone(x):
        return abs(m.get_solution_value(x) - 1) < 0.0001

    picked = sorted([
        player_name for player_name in can_be_drafted_by_me
        if almostone(my_starters[player_name])
        or almostone(my_reserves[player_name])
    ],
                    key=lambda _p: expected_draft_position[_p])
    assert len(picked) <= len(dat.my_draft_positions)
    if len(picked) < len(dat.my_draft_positions):
        print(
            "Your model is over-constrained, and thus only a partial draft was possible"
        )

    draft_yield = 0
    for player_name, draft_position in zip(picked,
                                           sorted(dat.my_draft_positions)):
        draft_yield += dat.players[player_name]["Expected Points"] * \
                       (starter_weight if almostone(my_starters[player_name]) else reserve_weight)
        assert draft_position <= expected_draft_position[player_name]
        sln.my_draft[player_name]["Draft Position"] = draft_position
        sln.my_draft[player_name]["Position"] = dat.players[player_name][
            "Position"]
        sln.my_draft[player_name][
            "Planned Or Actual"] = "Actual" if player_name in already_drafted_by_me else "Planned"
        sln.my_draft[player_name]["Starter Or Reserve"] = \
            "Starter" if almostone(my_starters[player_name]) else "Reserve"
    return sln, draft_yield
Example #22
0
from ticdat import TicDatFactory, Progress, LogFile, Slicer, standard_main

# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields  and 4 data fields.
dataFactory = TicDatFactory(sites=[['name'], ['demand', 'center_status']],
                            distance=[['source', 'destination'], ['distance']],
                            parameters=[["key"], ["value"]])

# add foreign key constraints
dataFactory.add_foreign_key("distance", "sites", ['source', 'name'])
dataFactory.add_foreign_key("distance", "sites", ['destination', 'name'])

# center_status is a flag field which can take one of two string values.
dataFactory.set_data_type(
    "sites",
    "center_status",
    number_allowed=False,
    strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance
dataFactory.set_data_type("distance", "distance")
# ---------------------------------------------------------------------------------

# ------------------------ define the output schema -------------------------------
# There are three solution tables, with 2 primary key fields and 3
# data fields amongst them.
solutionFactory = TicDatFactory(openings=[['site'], []],
                                assignments=[['site', 'assigned_to'], []],
                                parameters=[["key"], ["value"]])

# ---------------------------------------------------------------------------------
Example #23
0
                "Min Innings Played", "Max Innings Played"
            ]],
    positions=[["Position"],
               [
                   "Position Importance", "Position Group",
                   "Consecutive Innings Only"
               ]],
    player_ratings=[["Name", "Position Group"], ["Rating"]],
    innings=[["Inning"], ["Inning Group"]],
    position_constraints=[["Position Group", "Inning Group", "Grade"],
                          ["Min Players", "Max Players"]],
    parameters=[["Key"], ["Value"]])

input_schema.set_data_type("roster",
                           "Min Innings Played",
                           min=0,
                           max=9,
                           inclusive_min=True,
                           inclusive_max=True)
input_schema.set_data_type("roster",
                           "Max Innings Played",
                           min=0,
                           max=9,
                           inclusive_min=False,
                           inclusive_max=True)
input_schema.add_data_row_predicate(
    "roster",
    predicate_name="Roster Min Max Check",
    predicate=lambda row: row["Max Innings Played"] >= row["Min Innings Played"
                                                           ])
input_schema.add_foreign_key("roster", "innings", ["Arrival Inning", "Inning"])
input_schema.add_foreign_key("roster", "innings",