def test_time_stamp(self): tdf = TicDatFactory(table=[["Blah"], ["Timed Info"]]) tdf.set_data_type("table", "Timed Info", nullable=True) tdf.set_default_value("table", "Timed Info", None) dat = tdf.TicDat() dat.table[1] = dateutil.parser.parse("2014-05-01 18:47:05.069722") dat.table[2] = dateutil.parser.parse("2014-05-02 18:47:05.178768") pgtf = tdf.pgsql pgtf.write_schema(self.engine, test_schema, forced_field_types={ ('table', 'Blah'): "integer", ('table', 'Timed Info'): "timestamp" }) pgtf.write_data(dat, self.engine, test_schema, dsn=self.postgresql.dsn()) dat_2 = pgtf.create_tic_dat(self.engine, test_schema) self.assertTrue(tdf._same_data(dat, dat_2)) self.assertTrue( all( isinstance(row["Timed Info"], datetime.datetime) for row in dat_2.table.values())) self.assertFalse( any(isinstance(k, datetime.datetime) for k in dat_2.table)) pdf = PanDatFactory.create_from_full_schema( tdf.schema(include_ancillary_info=True)) def same_data(pan_dat, pan_dat_2): df1, df2 = pan_dat.table, pan_dat_2.table if list(df1["Blah"]) != list(df2["Blah"]): return False for dt1, dt2 in zip(df1["Timed Info"], df2["Timed Info"]): delta = dt1 - dt2 if abs(delta.total_seconds()) > 1e-6: return False return True pan_dat = pdf.pgsql.create_pan_dat(self.engine, test_schema) pan_dat_2 = pan_dat_maker(tdf.schema(), dat_2) self.assertTrue(same_data(pan_dat, pan_dat_2)) for df in [_.table for _ in [pan_dat, pan_dat_2]]: for i in range(len(df)): self.assertFalse( isinstance(df.loc[i, "Blah"], datetime.datetime)) self.assertTrue( isinstance(df.loc[i, "Timed Info"], datetime.datetime)) pan_dat.table.loc[1, "Timed Info"] = dateutil.parser.parse( "2014-05-02 18:48:05.178768") self.assertFalse(same_data(pan_dat, pan_dat_2)) pdf.pgsql.write_data(pan_dat, self.engine, test_schema) pan_dat_2 = pdf.pgsql.create_pan_dat(self.engine, test_schema) self.assertTrue(same_data(pan_dat, pan_dat_2)) dat.table[2] = dateutil.parser.parse("2014-05-02 18:48:05.178768") self.assertFalse(tdf._same_data(dat, dat_2))
input_schema.add_foreign_key("cost", "nodes", ['Source', 'Name']) input_schema.add_foreign_key("cost", "nodes", ['Destination', 'Name']) input_schema.add_foreign_key("cost", "commodities", ['Commodity', 'Name']) input_schema.add_foreign_key("inflow", "commodities", ['Commodity', 'Name']) input_schema.add_foreign_key("inflow", "nodes", ['Node', 'Name']) # Define the data types input_schema.set_data_type("arcs", "Capacity", min=0, max=float("inf"), inclusive_min=True, inclusive_max=True) input_schema.set_data_type("cost", "Cost", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False) input_schema.set_data_type("inflow", "Quantity", min=-float("inf"), max=float("inf"), inclusive_min=False, inclusive_max=False) # The default-default of zero makes sense everywhere except for Capacity input_schema.set_default_value("arcs", "Capacity", float("inf")) # --------------------------------------------------------------------------------- # ------------------------ define the output schema ------------------------------- solution_schema = TicDatFactory( flow = [["Commodity", "Source", "Destination"], ["Quantity"]], parameters = [["Key"],["Value"]]) # --------------------------------------------------------------------------------- # ------------------------ solving section----------------------------------------- _model_type = "gurobi" # could also be 'cplex' or 'xpress' def solve(dat): """ core solving routine :param dat: a good ticdat for the input_schema :return: a good ticdat for the solution_schema, or None
inclusive_max=False) input_schema.set_data_type("nutrition_quantities", "Quantity", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False) # We also want to insure that Max Nutrition doesn't fall below Min Nutrition input_schema.add_data_row_predicate( "categories", predicate_name="Min Max Check", predicate=lambda row: row["Max Nutrition"] >= row["Min Nutrition"]) # The default-default of zero makes sense everywhere except for Max Nutrition input_schema.set_default_value("categories", "Max Nutrition", float("inf")) # --------------------------------------------------------------------------------- # ------------------------ define the output schema ------------------------------- # There are three solution tables, with 3 primary key fields and 3 data fields. solution_schema = TicDatFactory(parameters=[["Parameter"], ["Value"]], buy_food=[["Food"], ["Quantity"]], consume_nutrition=[["Category"], ["Quantity"]]) for (tbl, fld), tip in solution_schema_tooltips.items(): solution_schema.set_tooltip(tbl, fld, tip) # --------------------------------------------------------------------------------- # ------------------------ create a solve function -------------------------------- def solve(dat): """
input_schema.add_parameter("Number of Centroids", default_value=4, inclusive_min=False, inclusive_max=False, min=0, max=float("inf"), must_be_int=True) input_schema.add_parameter("High Service Distance", default_value=0, inclusive_min=True, inclusive_max=True, min=0, max=float("inf"), must_be_int=False) input_schema.add_parameter("Maximum Average Service Distance", default_value=float("inf"), inclusive_min=True, inclusive_max=True, min=0, max=float("inf"), must_be_int=False) input_schema.add_parameter("Minimum Percent High Service Demand", default_value=0, inclusive_min=True, inclusive_max=True, min=0, max=100, must_be_int=False) input_schema.add_parameter("Maximum Individual Service Distance", default_value=float("inf"), inclusive_min=False, inclusive_max=True, min=0, max=float("inf"), must_be_int=False) input_schema.add_parameter("Objective", "Minimize Average Service Distance", strings_allowed=["Minimize Average Service Distance", "Maximize Percent High Service Demand"], number_allowed=False) input_schema.set_data_type("cities", "Demand", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False) input_schema.set_data_type("cities", "Max Assignment Capacity", min=0, max=float("inf"), inclusive_min=True, inclusive_max=True) input_schema.set_default_value("cities", "Max Assignment Capacity", float("inf")) input_schema.set_data_type("distances", "Distance", min=0, max=float("inf"), inclusive_min=True, inclusive_max=False) input_schema.add_foreign_key("distances", "cities", ['Source', 'Name']) input_schema.add_foreign_key("distances", "cities", ['Destination', 'Name']) # The distance matrix is bi-directionally safe. I.e. if the same source/dest and dest/source exist then the # distances must match. If only one is present, it can fall back to the other in the code. def _distance_matrix(dat): return {"distance_matrix": {k: v["Distance"] for k, v in dat.distances.items()}} input_schema.add_data_row_predicate("distances", predicate_name="Check Bi-Directionally Safe", predicate=lambda row, distance_matrix: ((row["Destination"], row["Source"]) not in distance_matrix) or (row["Distance"] == distance_matrix[row["Destination"], row["Source"]]), predicate_kwargs_maker=_distance_matrix)