def run(self): while True: # todo implement worktask based on hash id that checks if there's already a job calculating with the hashid # todo and instead waits for it to be finished and then reuses the same results running_optimization_task = self.session.query(self.ot)\ .filter(self.ot.optimization_state == OPTIMIZATION_RUN).first() if running_optimization_task: self.current_optimization_id = running_optimization_task.optimization_id self.current_ct = get_table_for_optimization_id(self.ct, self.current_optimization_id) new_calculation_task = self.query_first_starting_calculation_task() if new_calculation_task: new_calculation_task.calculation_state = CALCULATION_RUN self.session.commit() self.current_calculation_id = new_calculation_task.calculation_id calculation_data = load_json(new_calculation_task.calculation_data) # Build model flopy_data_model = FlopyDatamodel(version=calculation_data["version"], data=calculation_data["data"], uuid=calculation_data["optimization_id"]) flopy_data_model.add_wells(objects=calculation_data["optimization"]["objects"]) flopy_data_model.build_flopy_models() flopy_data_model.run_models() fitness = flopy_data_model.get_fitness(objectives=calculation_data["optimization"]["objectives"], constraints=calculation_data["optimization"]["constraints"], objects=calculation_data["optimization"]["objects"]) # data_output = {"fitness": fitness} # # write_json(obj=data_output, # filepath=new_calculation_task.calcoutput_filepath) new_calculation_task.scalar_fitness = fitness new_calculation_task.calculation_state = CALCULATION_FINISH if running_optimization_task.optimization_type == OPTIMIZATION_TYPE_EVOLUTION: running_optimization_task.current_population += 1 self.session.commit() continue
def summarize_finished_calculation_tasks( self, generation: int) -> Tuple[List[dict], list]: """ :param generation: :return: """ finished_calculation_tasks = self.query_finished_calculation_tasks( generation=generation) summarized_calculation_data = [] summarized_fitness = [] for calculation in finished_calculation_tasks: summarized_calculation_data.append( load_json(calculation.calcinput_filepath)) summarized_fitness.append(calculation.fitness) # summarized_fitness.append(load_json(calculation.calcoutput_filepath)["fitness"]) return summarized_calculation_data, summarized_fitness
def main(): current_year = current_date.year current_month = current_date.month current_month_name = current_date.strftime("%B") current_weekday = current_date.weekday() current_weekday_name = current_date.strftime("%A") cal = TextCalendar().monthdatescalendar(current_year, current_month) events_dict = load_json("events.json") dates = [] message = [] for week in cal: for day in week: if day.month == current_month and day.weekday() == current_weekday: dates.append(day) if current_month_name in events_dict \ and current_weekday_name in events_dict[current_month_name]: events = events_dict[current_month_name][current_weekday_name] if "First" in events and events["First"]: if current_date.date() == dates[0]: message.append(events["First"]) if "Last" in events and events["Last"]: if current_date.date() == dates[-1]: message.append(events["Last"]) if "Every" in events and events["Every"]: message.append(events["Every"]) if message: send_text(', '.join(message)) else: print("No notifications")
def get_data(): regio_json = load_json('static/regio.json') brieven_json = load_json('static/brieven.json') box_data = get_box_data(regio_json) return {'regios': regio_json, 'brieven': brieven_json}
def run(self): """ Function run is used to keep the manager working constantly. It will work on one optimization only and fulfill the job which includes constantly creating jobs for one generation, then after calculation summarizing the results and creating new generations with new jobs and finally put the solution back in the table and set the optimization to be finished. Returns: None - output is managed over databases """ while True: new_optimization_task = self.query_first_starting_optimization_task( ) if new_optimization_task: print( f"Working on task with id: {new_optimization_task.optimization_id}" ) self.current_optimization_id = new_optimization_task.optimization_id optimization_task = self.query_current_optimization_task() optimization_task.optimization_state = OPTIMIZATION_RUN self.session.commit() # Set temporary attributes self.current_data = load_json( new_optimization_task.data_filepath) variable_map, variable_bounds, _ = self.read_optimization_data( ) self.current_variable_map = variable_map # Set temporary attributes2 self.current_eat = self.ea_toolbox( bounds=variable_bounds, weights=self.get_weights(), # from self.optimization_data parameters=self.current_data["optimization"]["parameters"]) self.current_oh = get_table_for_optimization_id( self.oh, self.current_optimization_id) self.current_ct = get_table_for_optimization_id( self.ct, self.current_optimization_id) Base.metadata.create_all(bind=engine, tables=[ self.current_ct.__table__, self.current_oh.__table__ ], checkfirst=True) solution = self.manage_any_optimization() optimization_task = self.query_current_optimization_task() optimization_task.solution = solution optimization_task.optimization_state = OPTIMIZATION_FINISH self.session.commit() # Remove single job properties self.remove_optimization_and_calculation_data() continue self.remove_old_optimization_tasks_and_tables()
def upload_file() -> jsonify: # This is our most important path if request.method == 'POST': # https://stackoverflow.com/questions/46136478/flask-upload-how-to-get-file-name # See if there's a file in our selection field if not request.files.get('file', None): return render_template('upload.html', error="No file selected!") file_upload = request.files["file"] request_data = json.load(file_upload) schema_upload = load_json(JSON_SCHEMA_MODFLOW_OPTIMIZATION) try: validate(instance=request_data, schema=schema_upload) except ValidationError as e: error = { "message": f"Validation failed. {e.message}", "code": e.validator, "schemapath": e.schema_path } return render_template('upload.html', error=error) except SchemaError as e: return render_template('upload.html', error=str(e)) author = request_data.get("author", "unknown") project = request_data.get("project", "unknown") optimization_id = request_data["optimization_id"] optimization_state = request_data["type"] method = request_data["optimization"]["parameters"]["method"] population_size = request_data["optimization"]["parameters"]["pop_size"] total_generation = request_data["optimization"]["parameters"]["ngen"] # Create folder named after task_id in optimization_data folder data_filepath = create_input_and_output_filepath(folder=OPTIMIZATION_DATA, task_id=optimization_id, file_types=[DATA_FILE])[0] optimizationtask = OptimizationTask( author=author, project=project, optimization_id=optimization_id, optimization_type=method, optimization_state=optimization_state, # Input: "optimization_start" total_population=population_size, total_generation=total_generation, solution=dict(), data_filepath=data_filepath ) try: write_json(obj=request_data, filepath=data_filepath) Session.add(optimizationtask) Session.commit() except (UnicodeDecodeError, IOError): rmtree(Path(OPTIMIZATION_DATA, optimization_id)) # Path(opt_filepath).unlink() # Path(data_filepath).unlink() Session.rollback() return abort(400, "Error: task couldn't be created!") return redirect(f"/optimization") # /{optimization_id} if request.method == 'GET': if request.content_type == "application/json": return json.dumps({ 'message': "test" }) return render_template('upload.html')