Example #1
0
def copyDataDietWeirdCase2(dat):
    tdf = TicDatFactory(**dietSchemaWeirdCase2())
    tmp = copyDataDietWeirdCase(dat)
    rtn = tdf.TicDat(cateGories = tmp.cateGories, foodS = tmp.foodS)
    for (f,c),r in tmp.nutritionquantities.items():
        rtn.nutrition_quantities[f,c] = r
    return rtn
Example #2
0
 def test_dups(self):
     if not self.can_run:
         return
     tdf = TicDatFactory(one=[["a"], ["b", "c"]],
                         two=[["a", "b"], ["c"]],
                         three=[["a", "b", "c"], []])
     tdf2 = TicDatFactory(
         **{t: [[], ["a", "b", "c"]]
            for t in tdf.all_tables})
     td = tdf2.TicDat(
         **{
             t: [[1, 2, 1], [1, 2, 2], [2, 1, 3], [2, 2, 3], [1, 2, 2],
                 [5, 1, 2]]
             for t in tdf.all_tables
         })
     tdf2.pgsql.write_schema(self.engine, test_schema)
     tdf2.pgsql.write_data(td, self.engine, test_schema)
     dups = tdf.pgsql.find_duplicates(self.engine, test_schema)
     self.assertTrue(dups == {
         'three': {
             (1, 2, 2): 2
         },
         'two': {
             (1, 2): 3
         },
         'one': {
             1: 3,
             2: 2
         }
     })
Example #3
0
    def write_schema(self,
                     engine,
                     schema,
                     forced_field_types=None,
                     include_ancillary_info=True):
        """
        :param engine: typically a sqlalchemy database engine with drivertype postgres (really just needs an .execute)

        :param schema: a string naming the postgres schema to populate (will create if needed)

        :param forced_field_types : A dictionary mappying (table, field) to a field type
                                    Absent forcing, types are inferred from tic_dat_factory.data_types if possible,
                                    and set via the assumption that PK fields are text and data fields are floats if
                                    not.
        :param  include_ancillary_info : boolean. If False, no primary key or foreign key info will be written
        :return:
        """
        self._check_good_pgtd_compatible_table_field_names()
        forced_field_types = forced_field_types or {}
        all_fields = lambda t: self.tdf.primary_key_fields.get(t, (
        )) + self.tdf.data_fields.get(t, ())
        good_forced_field_type_entry = lambda k, v: isinstance(k, tuple) and len(k) == 2 \
                        and k[1] in all_fields(k[0]) and v in \
                        ["text", "integer", "float", "bool", "boolean", "timestamp"]
        verify(
            dictish(forced_field_types) and all(
                good_forced_field_type_entry(k, v)
                for k, v in forced_field_types.items()),
            "bad forced_field_types argument")
        if not include_ancillary_info:
            from ticdat import TicDatFactory
            tdf = TicDatFactory(**{
                t: [[], pks + dfs]
                for t, (pks, dfs) in self.tdf.schema().items()
            })
            for t, dts in self.tdf.data_types.items():
                for f, dt in dts.items():
                    tdf.set_data_type(t, f, *dt)
            forced_field_types_ = {
                (t, f): "text"
                for t, (pks, dfs) in self.tdf.schema().items() for f in pks
                if f not in tdf.data_types.get(t, {})
            }
            forced_field_types_.update(forced_field_types)
            return PostgresTicFactory(tdf).write_schema(
                engine, schema, forced_field_types_)

        verify(not getattr(self.tdf, "generic_tables", None),
               "TicDat for postgres does not yet support generic tables")

        if schema not in [
                row[0] for row in engine.execute(
                    "select schema_name from information_schema.schemata")
        ]:
            engine.execute(sa.schema.CreateSchema(schema))
        for str in self._get_schema_sql(self.tdf.all_tables, schema,
                                        forced_field_types):
            engine.execute(str)
 def _same_data(self, obj1, obj2, epsilon = 0):
     from ticdat import TicDatFactory
     sch = self.schema()
     for t in self.generic_tables:
         if set(getattr(obj1, t).columns) != set(getattr(obj2, t).columns):
             return False
         sch[t] = [[], list(getattr(obj1, t).columns)]
     tdf = TicDatFactory(**sch)
     return tdf._same_data(self._copy_to_tic_dat(obj1, keep_generics_as_df=False),
                           self._copy_to_tic_dat(obj2, keep_generics_as_df=False), epsilon=epsilon)
Example #5
0
def copyDataDietWeirdCase(dat):
    tdf = TicDatFactory(**dietSchemaWeirdCase())
    rtn = tdf.TicDat()
    for c,r in dat.categories.items():
        rtn.cateGories[c]["miNnutrition"] = r["minNutrition"]
        rtn.cateGories[c]["maXnutrition"] = r["maxNutrition"]
    for f,r in dat.foods.items():
        rtn.foodS[f] = r["cost"]
    for (f,c),r in dat.nutritionQuantities.items():
        rtn.nutritionquantities[f,c] = r["qty"]
    return rtn
Example #6
0
 def test_missing_tables(self):
     schema = test_schema + "_missing_tables"
     tdf_1 = TicDatFactory(this=[["Something"], ["Another"]])
     pdf_1 = PanDatFactory(**tdf_1.schema())
     tdf_2 = TicDatFactory(
         **dict(tdf_1.schema(), that=[["What", "Ever"], []]))
     pdf_2 = PanDatFactory(**tdf_2.schema())
     dat = tdf_1.TicDat(this=[["a", 2], ["b", 3], ["c", 5]])
     pan_dat = tdf_1.copy_to_pandas(dat, drop_pk_columns=False)
     tdf_1.pgsql.write_schema(self.engine, schema)
     tdf_1.pgsql.write_data(dat, self.engine, schema)
     pg_dat = tdf_2.pgsql.create_tic_dat(self.engine, schema)
     self.assertTrue(tdf_1._same_data(dat, pg_dat))
     pg_pan_dat = pdf_2.pgsql.create_pan_dat(self.engine, schema)
     self.assertTrue(pdf_1._same_data(pan_dat, pg_pan_dat))
Example #7
0
 def test_wtf(self):
     schema = "wtf"
     tdf = TicDatFactory(
         table_one=[["Cost per Distance", "Cost per Hr. (in-transit)"],
                    ["Stuff"]],
         table_two=[["This", "That"], ["Tho"]])
     tdf.pgsql.write_schema(self.engine, schema)
     data = [["a", "b", 1], ["dd", "ee", 10], ["023", "210", 102.1]]
     tic_dat = tdf.TicDat(table_one=data, table_two=data)
     tdf.pgsql.write_data(tic_dat,
                          self.engine,
                          schema,
                          dsn=self.postgresql.dsn())
     pg_tic_dat = tdf.pgsql.create_tic_dat(self.engine, schema)
     self.assertTrue(tdf._same_data(tic_dat, pg_tic_dat))
 def _copy_to_tic_dat(self, pan_dat, keep_generics_as_df=True):
     sch = self.schema()
     if not keep_generics_as_df:
         for t in self.generic_tables:
             sch[t] = [[], list(getattr(pan_dat, t).columns)]
     from ticdat import TicDatFactory
     tdf = TicDatFactory(**sch)
     def df(t):
         rtn = getattr(pan_dat, t)
         if self.primary_key_fields.get(t, ()):
             return rtn.set_index(list(self.primary_key_fields[t]), drop=False)
         if t in self.generic_tables and not keep_generics_as_df:
             return list(map(list, rtn.itertuples(index=False)))
         return rtn
     return tdf.TicDat(**{t: df(t) for t in self.all_tables})
Example #9
0
    def test_pgtd_active(self):
        if not self.can_run:
            return
        schema = test_schema + "_active"
        tdf = TicDatFactory(
            **{
                k: [pks, (["active_fld"] if k == "categories" else []) + dfs]
                for k, (pks, dfs) in diet_schema.schema().items()
            })
        tdf.pgsql.write_schema(self.engine,
                               schema,
                               include_ancillary_info=False,
                               forced_field_types={
                                   ('categories', 'active_fld'): 'boolean'
                               })
        tdf = diet_schema.clone()
        dat = tdf.copy_tic_dat(diet_dat)
        dat.categories["junk"] = {}
        tdf.pgsql.write_data(dat, self.engine, schema, active_fld="active_fld")
        self.assertTrue(
            set(_[0] for _ in self.engine.execute(
                f"Select active_fld from {schema}.categories")) == {True})
        self.engine.execute(
            f"Update {schema}.categories set active_fld = False where name = 'junk'"
        )
        dat_2 = tdf.pgsql.create_tic_dat(self.engine,
                                         schema,
                                         active_fld="active_fld")
        self.assertTrue(tdf._same_data(dat_2, diet_dat, epsilon=1e-10))

        pdf = PanDatFactory.create_from_full_schema(
            diet_schema.schema(include_ancillary_info=True))
        pan_dat = tdf.copy_to_pandas(diet_dat, drop_pk_columns=False)
        pan_dat_2 = pdf.pgsql.create_pan_dat(self.engine,
                                             schema,
                                             active_fld="active_fld")
        self.assertTrue(pdf._same_data(pan_dat, pan_dat_2, epsilon=1e-10))
        self.assertTrue(
            set(_[0] for _ in self.engine.execute(
                f"Select active_fld from {schema}.categories")) ==
            {True, False})
        pdf.pgsql.write_data(pan_dat,
                             self.engine,
                             schema,
                             active_fld="active_fld")
        self.assertTrue(
            set(_[0] for _ in self.engine.execute(
                f"Select active_fld from {schema}.categories")) == {True})
Example #10
0
    def test_nullables(self):
        schema = test_schema + "nullables"
        pdf = PanDatFactory(table_with_stuffs=[["field one"], ["field two"]])
        pdf.set_data_type("table_with_stuffs", "field one")
        pdf.set_data_type("table_with_stuffs",
                          "field two",
                          number_allowed=False,
                          strings_allowed='*',
                          nullable=True)
        tdf = TicDatFactory.create_from_full_schema(
            pdf.schema(include_ancillary_info=True))
        tic_dat = tdf.TicDat(
            table_with_stuffs=[[101, "022"], [202, None], [303, "111"]])
        dat = tdf.copy_to_pandas(tic_dat, drop_pk_columns=False)
        self.assertFalse(tdf.find_data_type_failures(tic_dat))
        self.assertFalse(pdf.find_data_type_failures(dat))

        pdf.pgsql.write_schema(self.engine, schema)
        pdf.pgsql.write_data(dat, self.engine, schema)
        dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
        self.assertTrue(
            pdf._same_data(dat, dat_1, nans_are_same_for_data_rows=True))
        tic_dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
        self.assertTrue(
            tdf._same_data(tic_dat,
                           tic_dat_1,
                           nans_are_same_for_data_rows=True))
Example #11
0
 def test_parameters_pd(self):
     schema = test_schema + "_parameters_pd"
     pdf = PanDatFactory(parameters=[["Key"], ["Value"]])
     pdf.add_parameter("Something", 100)
     pdf.add_parameter("Different",
                       'boo',
                       strings_allowed='*',
                       number_allowed=False)
     dat = TicDatFactory(**pdf.schema()).TicDat(
         parameters=[["Something", float("inf")], ["Different", "inf"]])
     dat = TicDatFactory(**pdf.schema()).copy_to_pandas(
         dat, drop_pk_columns=False)
     pdf.pgsql.write_schema(self.engine, schema)
     pdf.pgsql.write_data(dat, self.engine, schema)
     dat_ = pdf.pgsql.create_pan_dat(self.engine, schema)
     self.assertTrue(pdf._same_data(dat, dat_))
Example #12
0
    def copy_from_ampl_variables(self, ampl_variables):
        """
        copies the solution results from ampl_variables into a new PanDat object

        :param ampl_variables: a dict mapping from (table_name, field_name) -> amplpy.variable.Variable
                               (amplpy.variable.Variable is the type object returned by
                                AMPL.getVariable)
                                table_name should refer to a table in the schema that has
                                primary key fields.
                                field_name can refer to a data field for table_name, or it
                                can be falsey. If the latter, then AMPL variables that
                                pass the filter (see below) will simply populate the primary key
                                of the table_name.
                                Note that by default, only non-zero data is copied over.
                                If you want to override this filter, then instead of mapping to
                                amplpy.variable.Variable you should map to a
                                (amplpy.variable.Variable, filter) where filter accepts a data value
                                and returns a boolean.

        :return: a deep copy of the ampl_variables into a PanDat object
        """
        # note that the possibility for multiple table_names with different field_names can make
        # for a messy join problem. The easiest solution here is to just use the TicDatFactory logic
        from ticdat import TicDatFactory
        tdf = TicDatFactory.create_from_full_schema(self.schema(include_ancillary_info=True))
        _rtn = tdf.copy_from_ampl_variables(ampl_variables)
        _rtn = tdf.copy_to_pandas(_rtn, drop_pk_columns=False)
        for t in self.all_tables:
            getattr(_rtn, t).reset_index(drop=True, inplace=True)
        rtn = self.PanDat(**{t:getattr(_rtn, t) for t in self.all_tables})
        return rtn
Example #13
0
def solve(dat):
    """
    core solving routine
    :param dat: a good ticdat for the input_schema
    :return: a good ticdat for the solution_schema, or None
    """
    assert input_schema.good_tic_dat_object(dat)
    assert not input_schema.find_foreign_key_failures(dat)
    assert not input_schema.find_data_type_failures(dat)
    assert not input_schema.find_data_row_failures(dat)

    # These are the variables populated by the diet.lng file.
    solution_variables = TicDatFactory(buy_food=[["Food"], ["Quantity"]])

    sln = lingo_run("diet.lng", input_schema, dat, solution_variables)
    if sln:
        rtn = solution_schema.TicDat(buy_food={
            k: r
            for k, r in sln.buy_food.items() if r["Quantity"] > 0
        })
        for (f, c), r in dat.nutrition_quantities.items():
            if f in rtn.buy_food:
                rtn.consume_nutrition[c][
                    "Quantity"] += r["Quantity"] * rtn.buy_food[f]["Quantity"]
        rtn.parameters['Total Cost'] = sum(dat.foods[f]["Cost"] * r["Quantity"]
                                           for f, r in rtn.buy_food.items())
        return rtn
Example #14
0
    def testIssue45(self):
        schema = test_schema + "issue45"
        pdf = PanDatFactory(data=[["a"], ["b"]])
        pdf.set_data_type("data",
                          "b",
                          number_allowed=False,
                          strings_allowed='*')
        tdf = TicDatFactory.create_from_full_schema(
            pdf.schema(include_ancillary_info=True))
        tic_dat = tdf.TicDat(data=[[2, "1"], [4, "3"], [44, "022"]])
        dat = tdf.copy_to_pandas(tic_dat, drop_pk_columns=False)
        self.assertFalse(tdf.find_data_type_failures(tic_dat))
        self.assertFalse(pdf.find_data_type_failures(dat))
        pdf.pgsql.write_schema(self.engine,
                               schema,
                               forced_field_types={("data", "a"): "integer"})
        pdf.pgsql.write_data(dat, self.engine, schema)

        def two_checks():
            dat_1 = pdf.pgsql.create_pan_dat(self.engine, schema)
            self.assertTrue(pdf._same_data(dat, dat_1))
            tic_dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
            self.assertTrue(tdf._same_data(tic_dat, tic_dat_1))

        two_checks()
        tdf.pgsql.write_data(tic_dat, self.engine, schema)
        two_checks()
Example #15
0
    def copy_to_tic_dat(self, pan_dat, freeze_it=False):
        """
        copies the pan_dat object into a new tic_dat object
        performs a deep copy

        :param pan_dat: a pandat object

        :param freeze_it: boolean. should the returned object be frozen?

        :return: a deep copy of the pan_dat argument in tic_dat format
        """
        msg = []
        verify(self.good_pan_dat_object(pan_dat, msg.append),
               "pan_dat not a good object for this factory : %s"%"\n".join(msg))
        rtn = self._copy_to_tic_dat(pan_dat)
        from ticdat import TicDatFactory
        tdf = TicDatFactory(**self.schema())
        return tdf.freeze_me(rtn) if freeze_it else rtn
Example #16
0
 def testDietWithInfFlagging(self):
     tdf = diet_schema.clone()
     dat = tdf.copy_tic_dat(diet_dat)
     tdf.set_infinity_io_flag(999999999)
     schema = test_schema + "_diet_inf_flagging"
     tdf.pgsql.write_schema(self.engine, schema)
     tdf.pgsql.write_data(dat, self.engine, schema)
     dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
     self.assertTrue(tdf._same_data(dat, dat_1))
     tdf = tdf.clone()
     dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
     self.assertTrue(tdf._same_data(dat, dat_1))
     tdf = TicDatFactory(**diet_schema.schema())
     dat_1 = tdf.pgsql.create_tic_dat(self.engine, schema)
     self.assertFalse(tdf._same_data(dat, dat_1))
     self.assertTrue(
         dat_1.categories["protein"]["Max Nutrition"] == 999999999)
     dat_1.categories["protein"]["Max Nutrition"] = float("inf")
     self.assertTrue(tdf._same_data(dat, dat_1))
Example #17
0
 def test_pgtd_active_dups(self):
     if not self.can_run:
         return
     schema = test_schema + "_act_dups"
     tdf_1 = TicDatFactory(t_one=[[],
                                  ["Field One", "Field Two", "Da Active"]],
                           t_two=[[], ["Field One", "Da Active"]])
     dat = tdf_1.TicDat(t_one=[["a", "b", True], ["a", "c", True],
                               ["a", "b", False], ["a", "d", True]],
                        t_two=[["a", True], ["b", False], ["a", False],
                               ["b", False], ["a", False]])
     self.assertTrue(len(dat.t_one) == 4 and len(dat.t_two) == 5)
     tdf_1.pgsql.write_schema(
         self.engine,
         schema,
         include_ancillary_info=False,
         forced_field_types={(t, f): "boolean" if "Active" in f else "text"
                             for t, (pks, dfs) in tdf_1.schema().items()
                             for f in pks + dfs})
     tdf_1.pgsql.write_data(dat, self.engine, schema)
     self.assertTrue(
         tdf_1._same_data(dat,
                          tdf_1.pgsql.create_tic_dat(self.engine, schema),
                          epsilon=1e-8))
     tdf = TicDatFactory(t_one=[["Field One", "Field Two"], []],
                         t_two=[["Field One"], []])
     self.assertTrue(tdf.pgsql.find_duplicates(self.engine, schema))
     self.assertFalse(
         tdf.pgsql.find_duplicates(self.engine,
                                   schema,
                                   active_fld="da_active"))
Example #18
0
 def test_parameters(self):
     schema = test_schema + "_parameters"
     tdf = TicDatFactory(parameters=[["Key"], ["Value"]])
     tdf.add_parameter("Something", 100)
     tdf.add_parameter("Different",
                       'boo',
                       strings_allowed='*',
                       number_allowed=False)
     dat = tdf.TicDat(
         parameters=[["Something", float("inf")], ["Different", "inf"]])
     tdf.pgsql.write_schema(self.engine, schema)
     tdf.pgsql.write_data(dat, self.engine, schema)
     dat_ = tdf.pgsql.create_tic_dat(self.engine, schema)
     self.assertTrue(tdf._same_data(dat, dat_))
Example #19
0
def netflowSolver(modelType):
    tdf = TicDatFactory(**netflowSchema())
    addNetflowForeignKeys(tdf)
    addNetflowDataTypes(tdf)

    dat = tdf.copy_tic_dat(netflowData())
    assert not tdf.find_data_type_failures(
        dat) and not tdf.find_foreign_key_failures(dat)

    mdl = Model(modelType, "netflow")

    flow = {}
    for h, i, j in dat.cost:
        if (i, j) in dat.arcs:
            flow[h, i, j] = mdl.add_var(name='flow_%s_%s_%s' % (h, i, j))

    flowslice = Slicer(flow)

    for i_, j_ in dat.arcs:
        mdl.add_constraint(mdl.sum(flow[h, i, j]
                                   for h, i, j in flowslice.slice('*', i_, j_))
                           <= dat.arcs[i_, j_]["capacity"],
                           name='cap_%s_%s' % (i_, j_))

    for h_, j_ in set(k for k, v in dat.inflow.items()
                      if abs(v["quantity"]) > 0).union(
                          {(h, i)
                           for h, i, j in flow}, {(h, j)
                                                  for h, i, j in flow}):
        mdl.add_constraint(
            mdl.sum(flow[h, i, j]
                    for h, i, j in flowslice.slice(h_, '*', j_)) +
            dat.inflow.get((h_, j_), {"quantity": 0})["quantity"] == mdl.sum(
                flow[h, i, j] for h, i, j in flowslice.slice(h_, j_, '*')),
            name='node_%s_%s' % (h_, j_))

    mdl.set_objective(
        mdl.sum(flow * dat.cost[h, i, j]["cost"]
                for (h, i, j), flow in flow.items()))
    if mdl.optimize():
        solutionFactory = TicDatFactory(
            flow=[["commodity", "source", "destination"], ["quantity"]])
        if mdl.optimize():
            rtn = solutionFactory.TicDat()
            for (h, i, j), var in flow.items():
                if mdl.get_solution_value(var) > 0:
                    rtn.flow[h, i, j] = mdl.get_solution_value(var)
            return rtn, sum(dat.cost[h, i, j]["cost"] * r["quantity"]
                            for (h, i, j), r in rtn.flow.items())
Example #20
0
 def testCircularFks(self):
     schema = test_schema + "circular_fks"
     tdf = TicDatFactory(table_one=[["A Field"], []],
                         table_two=[["B Field"], []],
                         table_three=[["C Field"], []])
     tdf.add_foreign_key("table_one", "table_two", ["A Field", "B Field"])
     tdf.add_foreign_key("table_two", "table_three", ["B Field", "C Field"])
     tdf.add_foreign_key("table_three", "table_one", ["C Field", "A Field"])
     tdf.pgsql.write_schema(self.engine,
                            schema,
                            include_ancillary_info=False)
     t_ = [["a"], ["b"], ["c"]]
     dat = tdf.TicDat(table_one=t_, table_two=t_, table_three=t_)
     tdf.pgsql.write_data(dat, self.engine, schema)
Example #21
0
def dietSolver(modelType):
    tdf = TicDatFactory(**dietSchema())
    addDietForeignKeys(tdf)
    addDietDataTypes(tdf)

    dat = tdf.copy_tic_dat(dietData())
    assert not tdf.find_data_type_failures(
        dat) and not tdf.find_foreign_key_failures(dat)

    mdl = Model(modelType, "diet")

    nutrition = {}
    for c, n in dat.categories.items():
        nutrition[c] = mdl.add_var(lb=n["minNutrition"],
                                   ub=n["maxNutrition"],
                                   name=c)

    # Create decision variables for the foods to buy
    buy = {}
    for f in dat.foods:
        buy[f] = mdl.add_var(name=f)

    # Nutrition constraints
    for c in dat.categories:
        mdl.add_constraint(mdl.sum(dat.nutritionQuantities[f, c]["qty"] *
                                   buy[f] for f in dat.foods) == nutrition[c],
                           name=c)

    mdl.set_objective(mdl.sum(buy[f] * c["cost"]
                              for f, c in dat.foods.items()))

    if mdl.optimize():
        solutionFactory = TicDatFactory(parameters=[[], ["totalCost"]],
                                        buyFood=[["food"], ["qty"]],
                                        consumeNutrition=[["category"],
                                                          ["qty"]])
        sln = solutionFactory.TicDat()
        for f, x in buy.items():
            if mdl.get_solution_value(x) > 0.0001:
                sln.buyFood[f] = mdl.get_solution_value(x)
        for c, x in nutrition.items():
            sln.consumeNutrition[c] = mdl.get_solution_value(x)
        return sln, sum(dat.foods[f]["cost"] * r["qty"]
                        for f, r in sln.buyFood.items())
Example #22
0
 def test_ints_and_strings_and_lists(self):
     if not self.can_run:
         return
     tdf = TicDatFactory(t_one=[[], ["str_field", "int_field"]],
                         t_two=[["str_field", "int_field"], []])
     for t in tdf.all_tables:
         tdf.set_data_type(t,
                           "str_field",
                           strings_allowed=['This', 'That'],
                           number_allowed=False)
         tdf.set_data_type(t, "int_field", must_be_int=True)
     dat = tdf.TicDat(t_one=[["This", 1], ["That", 2], ["This", 111],
                             ["That", 211]],
                      t_two=[["This", 10], ["That", 9]])
     self.assertFalse(tdf.find_data_type_failures(dat))
     self.assertTrue(len(dat.t_one) == 4)
     self.assertTrue(len(dat.t_two) == 2)
     pgtf = tdf.pgsql
     pgtf.write_schema(self.engine, test_schema)
     pgtf.write_data(dat, self.engine, test_schema)
     self.assertFalse(pgtf.find_duplicates(self.engine, test_schema))
     pg_tic_dat = pgtf.create_tic_dat(self.engine, test_schema)
     self.assertTrue(tdf._same_data(dat, pg_tic_dat))
Example #23
0
def solve(dat):
    """
    core solving routine
    :param dat: a good ticdat for the input_schema
    :return: a good ticdat for the solution_schema, or None
    """

    # There are the variables populated by the netflow.lng file.
    solution_variables = TicDatFactory(
        flow=[["Commodity", "Source", "Destination"], ["Quantity"]])

    sln = lingo_run("netflow.lng", input_schema, dat, solution_variables)
    if sln:
        rtn = solution_schema.TicDat(
            flow={k: r
                  for k, r in sln.flow.items() if r["Quantity"] > 0})
        rtn.parameters["Total Cost"] = sum(
            dat.cost[h, i, j]["Cost"] * r["Quantity"]
            for (h, i, j), r in rtn.flow.items())
        return rtn
Example #24
0
 def test_true_false(self):
     if not self.can_run:
         return
     tdf = TicDatFactory(table=[["pkf"], ["df1", "df2"]])
     tdf.set_data_type("table", "df2", min=-float("inf"))
     dat = tdf.TicDat(table=[["d1", True, 100], ["d2", False, 200],
                             ["d3", False, -float("inf")]])
     self.assertTrue(len(dat.table) == 3)
     self.assertFalse(tdf.find_data_type_failures(dat))
     pgtf = tdf.pgsql
     ex = None
     try:
         pgtf.write_data(None, self.engine, test_schema)
     except utils.TicDatError as te:
         ex = str(te)
     self.assertTrue(ex and "Not a valid TicDat object" in ex)
     pgtf.write_schema(self.engine,
                       test_schema,
                       forced_field_types={("table", "df1"): "bool"})
     pgtf.write_data(dat, self.engine, test_schema)
     self.assertFalse(pgtf.find_duplicates(self.engine, test_schema))
     pg_tic_dat = pgtf.create_tic_dat(self.engine, test_schema)
     self.assertTrue(tdf._same_data(dat, pg_tic_dat))
Example #25
0
from ticdat import TicDatFactory
solution_schema = input_schema = TicDatFactory(table=[['field'], []])


def solve(dat):
    return dat


def an_action(dat):
    dat.table['a'] = {}
    return dat


def another_action(dat, sln):
    dat.table['e'] = sln.table['e'] = {}
    return {"dat": dat, "sln": sln}
Example #26
0
# 3. Create a solve function that accepts a data set consistent with the input
#    schema and (if possible) returns a data set consistent with the output schema.
#
# Provides command line interface via ticdat.standard_main
# For example, typing
#   python netflow.py -i csv_data -o solution_csv_data
# will read from a model stored in .csv files in the csv_data directory
# and write the solution to .csv files in the solution_csv_data directory

from ticdat import TicDatFactory, standard_main, Model, Slicer

# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory (
     commodities = [["Name"],[]],
     nodes  = [["Name"],[]],
     arcs = [["Source", "Destination"],["Capacity"]],
     cost = [["Commodity", "Source", "Destination"], ["Cost"]],
     inflow = [["Commodity", "Node"],["Quantity"]]
)

# Define the foreign key relationships
input_schema.add_foreign_key("arcs", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("arcs", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "nodes", ['Node', 'Name'])

# Define the data types
input_schema.set_data_type("arcs", "Capacity", min=0, max=float("inf"),
Example #27
0
# Gurobi required for actual solving

try:  # if you don't have gurobipy installed, the code will still load and then fail on solve
    import gurobipy as gp
except:
    gp = None

from ticdat import TicDatFactory
from tts_diet.tooltips import input_schema_tooltips, solution_schema_tooltips

# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = TicDatFactory(categories=[["Name"],
                                         ["Min Nutrition", "Max Nutrition"]],
                             foods=[["Name"], ["Cost"]],
                             nutrition_quantities=[["Food", "Category"],
                                                   ["Quantity"]])
for (tbl, fld), tip in input_schema_tooltips.items():
    input_schema.set_tooltip(tbl, fld, tip)

# Define the foreign key relationships
input_schema.add_foreign_key("nutrition_quantities", "foods", ["Food", "Name"])
input_schema.add_foreign_key("nutrition_quantities", "categories",
                             ["Category", "Name"])

# Define the data types
input_schema.set_data_type("categories",
                           "Min Nutrition",
                           min=0,
                           max=float("inf"),
Example #28
0
def pan_dat_maker(schema, tic_dat):
    tdf = TicDatFactory(**schema)
    pdf = PanDatFactory(**schema)
    return pdf.copy_pan_dat(copy_to_pandas_with_reset(tdf, tic_dat))
Example #29
0
# 2. Define the output data schema
# 3. Create a solve function that accepts a data set consistent with the input
#    schema and (if possible) returns a data set consistent with the output schema.
#
# Provides command line interface via ticdat.standard_main
# For example, typing
#   python soda_promotion_optimizer.py -i input_data.xlsx -o solution_data.xlsx
# will read from a model stored in the file input_data.xlsx and write the solution
# to solution_data.xlsx.

from ticdat import TicDatFactory, standard_main, Slicer, gurobi_env
import gurobipy as gu

input_schema = TicDatFactory(parameters=[["Key"], ["Value"]],
                             products=[["Name"], ["Family"]],
                             forecast_sales=[["Product", "Cost Per Unit"],
                                             ["Sales"]],
                             max_promotions=[["Product Family"],
                                             ["Max Promotions"]])

input_schema.set_data_type("parameters",
                           "Key",
                           number_allowed=False,
                           strings_allowed=("Maximum Total Investment", ))
input_schema.set_data_type("parameters",
                           "Value",
                           min=0,
                           max=float("inf"),
                           inclusive_max=True)
input_schema.set_data_type("forecast_sales",
                           "Cost Per Unit",
                           min=0,
Example #30
0
#   python cogmodel.py -i csv_data -o solution_csv_data
# will read from a model stored in .csv files in the csv_data directory and
# write the solution to the solution_csv_data directory.

# this version of the file uses CPLEX

import time
import datetime
import os
from docplex.mp.model import Model
from ticdat import TicDatFactory, Progress, LogFile, Slicer, standard_main

# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields  and 4 data fields.
dataFactory = TicDatFactory(sites=[['name'], ['demand', 'center_status']],
                            distance=[['source', 'destination'], ['distance']],
                            parameters=[["key"], ["value"]])

# add foreign key constraints
dataFactory.add_foreign_key("distance", "sites", ['source', 'name'])
dataFactory.add_foreign_key("distance", "sites", ['destination', 'name'])

# center_status is a flag field which can take one of two string values.
dataFactory.set_data_type(
    "sites",
    "center_status",
    number_allowed=False,
    strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance
dataFactory.set_data_type("distance", "distance")
# ---------------------------------------------------------------------------------
Example #31
0
#!/usr/bin/python

# Copyright 2015, Opalytics, Inc.
#
# edited with permission from Gurobi Optimization, Inc.

# Solve a multi-commodity flow problem.

from gurobipy import *
from ticdat import TicDatFactory

# define the input schema.
dataFactory = TicDatFactory (
     commodities = [["name"],[]],
     nodes  = [["name"],[]],
     arcs = [["source", "destination"],["capacity"]],
     cost = [["commodity", "source", "destination"], ["cost"]],
     inflow = [["commodity", "node"],["quantity"]]
)

# add foreign key constraints
dataFactory.add_foreign_key("arcs", "nodes", ['source', 'name'])
dataFactory.add_foreign_key("arcs", "nodes", ['destination', 'name'])
dataFactory.add_foreign_key("cost", "nodes", ['source', 'name'])
dataFactory.add_foreign_key("cost", "nodes", ['destination', 'name'])
dataFactory.add_foreign_key("cost", "commodities", ['commodity', 'name'])
dataFactory.add_foreign_key("inflow", "commodities", ['commodity', 'name'])
dataFactory.add_foreign_key("inflow", "nodes", ['node', 'name'])

# the whole schema has only three data fields to type
dataFactory.set_data_type("arcs", "capacity")