def get_dynamic_state(): """ where is the doctstring ? """ logger.debug("called") resp = jsonify(algo.dynamic_state()) resp.status_code = 200 return resp
def delete_compute_environment(self, **kwargs): ''' Delete a compute environmet. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.delete_compute_environment ''' logger.debug("Deleting compute environment.") return self.client.delete_compute_environment(**kwargs)
def run_100(): logger.debug("called") dynamic_state = algo.run_100(1) resp = jsonify(dynamic_state) resp.status_code = 200 return resp
async def fetch(self, request, url=None): if url is None: url = self.origin_url logger.debug('{}.fetch URL:'.format(self.__class__.__name__), url) return await fetch(__new__(Request(url, request)))
def create_function(self, **kwargs): ''' Creates a new Lambda function. http://boto3.readthedocs.io/en/latest/reference/services/lambda.html#Lambda.Client.create_function ''' logger.debug("Creating lambda function.") return self.client.create_function(**kwargs)
def describe_job_definitions(self, **kwargs): ''' Describes a list of job definitions. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.describe_job_definitions ''' logger.debug("Describing job definition.") return self.client.describe_job_definitions(**kwargs)
def deregister_job_definition(self, **kwargs): ''' Deregisters an AWS Batch job definition. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.deregister_job_definition ''' logger.debug("Deleting job definition.") return self.client.deregister_job_definition(**kwargs)
def aggregate(self, collection_name, query_list): """ For example: db.rooms.aggregate([ { $project: { tasks: { $filter: { input: "$tasks", as: "task", cond: { $and: [{$eq: ["$$task.meta.is_archived", false]}, {$eq: ["$$task.meta.is_deleted", false]}] } } } } } ]) :param collection_name: :param query_list: :return: """ try: cursor = self.mongo_db[collection_name].aggregate(query_list) return list(cursor) except Exception as e: print('Exception while aggregating MongoDB', e) logger.debug('Exception while aggregating in MongoDB')
def put(self, id): """send put request with json(fields: 'department_id','name', 'salary', 'birthday') and return json data(fields: 'dep', 'department_id','name', 'salary', 'id','birthday'. If department not exist algorithm creates new department in db. Every field must be filled""" employee = EmployeeService.fetch_employee_by_id(db.session, id) if not employee: return make_response(jsonify({"message": "Employee not found"}), 404) rq = request.json department_name = rq['department_name'] department = DepartmentService.fetch_department_by_name( db.session, name=department_name) if not department: department = self.department_schema_without_id.load( dict(name=department_name), session=db.session) DepartmentService.create(department, db.session) rq['department_id'] = department.id del rq['department_name'] try: employee = self.employee_schema.load(request.json, instance=employee, session=db.session) except ValidationError as e: logger.debug(f"Validation error in put Employee by Api is {e}") return make_response(jsonify({'message': str(e)}), 400) return self.employee_schema.dump( EmployeeService.create(employee, db.session)), 200
def delete_job_queue(self, **kwargs): ''' Delete a job queue. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.delete_job_queue ''' logger.debug("Deleting job queue.") return self.client.delete_job_queue(**kwargs)
def get(self, id=None): """send get request with data(id, date1, date2) or not and return json data, and if answer to request to db not empty json will have fields 'dep', 'department_id','name', 'salary', 'id','birthday'""" if not id: rq = request.args if rq.get('date1') and rq.get('date2'): date1 = rq.get('date1') date2 = rq.get('date2') employees = EmployeeService.fetch_all_employees_with_dep_between_dates( db.session, date1=date1, date2=date2).all() elif rq.get('department_id'): employees = EmployeeService.fetch_all_employees_by_dep_with_names_dep( db.session, id=rq.get('department_id')).all() else: employees = EmployeeService.fetch_all_employees_with_dep( db.session).all() return self.employee_schema_with_dep.dump(employees, many=True), 200 employee = EmployeeService.fetch_employee_by_id(db.session, id) logger.debug(employee) if not employee: return make_response(jsonify({"message": "Employee not found"}), 404) return self.employee_schema.dump(employee), 200
def describe_jobs(self,**kwargs): ''' Describe a job. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.describe_jobs ''' logger.debug("describe a job.") return self.client.describe_jobs(**kwargs)
def create_job_queue(self, **kwargs): ''' Creates a new job queue. http://boto3.readthedocs.io/en/latest/reference/services/batch.html#Batch.Client.create_job_queue ''' logger.debug("Creating job queue.") return self.client.create_job_queue(**kwargs)
def describe_compute_environments(self, **kwargs): ''' Creates a new job queue. http://boto3.readthedocs.io/en/latest/reference/services/batch.html#Batch.Client.describe_compute_environments ''' logger.debug("creating compute environment.") return self.client.describe_compute_environments(**kwargs)
def create_compute_environment(self, **kwargs): ''' Creates a new compute environment. http://boto3.readthedocs.io/en/latest/reference/services/batch.html#Batch.Client.create_compute_environment ''' logger.debug("Creating compute environment.") return self.client.create_compute_environment(**kwargs)
def run(self, n=10): """run n times for n years""" logger.debug("called") for _ in range(n): self._run() if self.is_won: break
def get_static_state(): """ where is the doctstring ? """ logger.debug("called") resp = jsonify(algo.static_state()) # logger.critical(resp) resp.status_code = 200 return resp
def set_log_retention_policy(self, log_group_name, log_retention_policy_in_days): try: logger.debug("Setting log group policy.") self.get_client().put_retention_policy(logGroupName=log_group_name, retentionInDays=log_retention_policy_in_days) except ClientError as ce: logger.error("Error setting log retention policy", "Error setting log retention policy: %s" % ce)
def _eval(self): """eval the best elemand update lurning curve """ logger.debug("called") self._sort_current_population() best = self.current_population[0] self.learning_curve.append(best) self._eval_is_won()
def policy_update(self, learning_rate): """update the policy-value net""" # TODO: maybe change this to single train step with bigger batch size? start = time() assert len(self.states_buffer) == len(self.probs_buffer) assert len(self.states_buffer) == len(self.winners_buffer) for _ in range(self.train_steps): batch_ids = np.random.choice(len(self.states_buffer), self.batch_size, replace=False) state_batch = np.array(self.states_buffer)[batch_ids] mcts_probs_batch = np.array(self.probs_buffer)[batch_ids] winner_batch = np.array(self.winners_buffer)[batch_ids] old_probs, old_v = self.policy_value_net.policy_value(state_batch) loss, entropy = self.policy_value_net.train_step(state_batch=state_batch, mcts_probs=mcts_probs_batch, winner_batch=winner_batch, lr=learning_rate) new_probs, new_v = self.policy_value_net.policy_value(state_batch) kl = np.mean(np.sum(old_probs * (np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)), axis=1)) # if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly # break # # # adaptively adjust the learning rate # if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1: # self.lr_multiplier /= 1.5 # elif kl < self.kl_targ / 2 and self.lr_multiplier < 10: # self.lr_multiplier *= 1.5 explained_var_old = (1 - np.var(np.array(winner_batch) - old_v.flatten()) / np.var(np.array(winner_batch))) explained_var_new = (1 - np.var(np.array(winner_batch) - new_v.flatten()) / np.var(np.array(winner_batch))) update_time = time() - start logger.debug(("kl:{:.3f}, " "loss:{:.3f}, " "entropy:{:.3f}, " "explained_var_old:{:.5f}, " "explained_var_new:{:.5f}, " "time: {:.2f}" ).format(kl, np.mean(loss), entropy, explained_var_old, explained_var_new, update_time)) return loss, entropy
def _sort_current_population(self): """ sort values regardin objective""" logger.debug("called") # first # sort regarding the funct reverse = False if self.objective == "min" else True self.current_population = sorted(self.current_population, key=lambda i: i[1], reverse=reverse)
def initFromModel(): """load nathan Model""" logger.debug("called") _id = secrets.token_hex(8) obj = algo.DummyAlgo(_id) session["algo_dict"].update({_id: obj.serialized}) session["current_algo"] = _id return obj.static_state, 200
def get(self, collection_name, query): try: cursor = self.mongo_db[collection_name].find(query) count = cursor.count() return count, list(cursor) except Exception as e: print('Exception while getting from MongoDB', e) logger.debug( 'Exception while getting from MongoDB as exception: {}'.format( e))
def just_static(): """just return html and css""" logger.debug("called") manage_session("/") initForm = InitForm(request.form) runForm = RunForm(request.form) return render_template("pages/home.html", initForm=initForm, runForm=runForm)
def update(self, collection_name, query, value): try: if isinstance(value, list): self.mongo_db[collection_name].update_many(query, value) else: self.mongo_db[collection_name].update_one(query, value) except Exception as e: print('Exception while updating MongoDB', e) logger.debug( 'Exception while updating MongoDB as exception: {}'.format(e))
def delete(self, collection_name, query): try: self.mongo_db[collection_name].update_many( query, {"$set": { "meta.is_deleted": True }}) except Exception as e: print('Exception while deleting records in MongoDB', e) logger.debug( 'Exception while deleting records in MongoDB as exception: {}'. format(e))
def graph_xs(self): logger.debug("called") L = [ ["years", "x"], ] xs = [i[0] for i in self.learning_curve] x = [i for i, _ in enumerate(xs)] LL = [[i, j] for i, j in zip(x, xs)] L.extend(LL) return L
def create_log_group(self, log_group_name, tags): try: logger.debug("Creating cloudwatch log group.") return self.get_client().create_log_group(logGroupName=log_group_name, tags=tags) except ClientError as ce: if ce.response['Error']['Code'] == 'ResourceAlreadyExistsException': logger.warning("Using existent log group '%s'" % log_group_name) pass else: logger.error("Error creating log groups.", "Error creating log groups: %s" % ce) utils.finish_failed_execution()
def get_functs_data(): """ """ logger.debug("called") functs = { k: {kk: vv for kk, vv in v.items() if "funct" not in kk} for k, v in Functs.by_name.items() } resp = jsonify(functs) resp.status_code = 200 return resp
def graph_years(self): logger.debug("called") L = [ ["years", "year"], ] years = [i[3] for i in self.learning_curve] x = [i for i, _ in enumerate(years)] LL = [[i, j] for i, j in zip(x, years)] L.extend(LL) return L