def set_experiment_complete(experiment_id): """ """ sql = f''' UPDATE experiment SET "status"='COMPLETED' WHERE id='{experiment_id}' ''' query(sql)
def set_experiment_failed(experiment_id): """ """ sql = f''' UPDATE experiment SET "status"='STOPPED' WHERE id='{experiment_id}' ''' query(sql)
def remove_orphan_evals(): """ removes evaluations that don't have an output """ sql = ''' DELETE FROM study_evaluation WHERE "modelOutput" is NULL ''' query(sql) print('cleaned orphan evals')
def remove_orphan_studies(): """ removes studies that don't have a type """ sql = ''' DELETE FROM study WHERE type is NULL ''' query(sql) print('cleaned orphan studies')
def remove_study_by_id(orthanc_id: str): """ Removes a study from the db by its orthanc ID Args: orthanc_id (str): the study id of the orthanc study """ sql = f''' DELETE FROM study WHERE "orthancStudyId"='{orthanc_id}' ''' query(sql)
def stop_all_models(): """ sets a model as quickstarted Args: model_ids (List[int]): the models to mark as quick started """ sql = f''' UPDATE model SET "running"=false ''' query(sql)
def fail_classifer(study_id: int): """ Updates study evalutation status to failed Args: eval_id: the db id of the failed evaluation """ sql = f''' UPDATE study SET failed=true WHERE "orthancStudyId"='{study_id}' ''' query(sql)
def mark_model_as_stopped(model_id): """ sets a model as quickstarted Args: model_ids (List[int]): the models to mark as quick started """ sql = f''' UPDATE model SET "running"=false WHERE id = {model_id} ''' query(sql)
def mark_models_as_quickstarted(model_ids): """ sets a model as quickstarted Args: model_ids (List[int]): the models to mark as quick started """ sql = f''' UPDATE model SET "running"=true WHERE "id" in ({join_for_in_clause(model_ids)}) ''' query(sql)
def fail_eval(eval_id: int): """ Updates study evalutation status to failed Args: eval_id (int): the db id of the study evaluation """ sql = f''' UPDATE study_evaluation SET status='FAILED' WHERE id={eval_id} ''' query(sql)
def add_stdout_to_eval(eval_ids: List[int], lines: List[str]): studies = get_study_evals(eval_ids) stdout = [] if studies[0]['stdout'] is not None: stdout = studies[0]['stdout'] stdout = stdout + lines sql = f''' UPDATE study_evaluation SET stdout=(%s) WHERE id in ({join_for_in_clause(eval_ids)}) ''' query(sql, json.dumps(stdout))
def test_query(self): print("Running query test") self.assertIsNotNone(self.table) self.assertEqual(self.table.table_status, "ACTIVE") response = db.query("email", "*****@*****.**", self.table) self.assertEqual(response.get("Count"), 1) self.assertEqual( response.get("Items")[0].get("email"), "*****@*****.**")
def save_patient_id(patient_id: str, orthanc_id: str, modality: str, study_uid: str, series_uid: str): """ Saves a patient id to the database for a study Args: patient_id (str): the patient id from orthanc orthanc_id (str): the study id from orthanc modality (str): the study id from orthanc study_uid (str): the study uid from the dicom """ sql = f''' UPDATE study SET "patientId"='{patient_id}', modality='{modality}', "studyUid"='{study_uid}', "seriesUid"='{series_uid}' WHERE "orthancStudyId"='{orthanc_id}' ''' query(sql)
def save_study_type(orthanc_id: str, study_type: str) -> Dict: """ Saves a study to the database with it accompanying type Args: orthanc_id (str): the study ID from orthanc study_type (str): the type of the study (e.g. Frontal_CXR) Returns: Dict: the inserted study """ sql = f''' UPDATE study SET type='{study_type}' WHERE "orthancStudyId"='{orthanc_id}' ''' query(sql)
def restart_failed_evals(eval_ids: List[int]): """ sets a failed evaluation to status 'RUNNING' to restart it Args: eval_ids (List[int]): a list of the ids of evals to be restarted """ if len(eval_ids) == 0: return # join ids by , so that it can be used in WHERE ... IN clause ids = ','.join([str(eval_id) for eval_id in eval_ids]) sql = f''' UPDATE study_evaluation SET status='RUNNING' WHERE id in ({ids}) ''' query(sql)
def update_eval_status_and_save(output: ModelOutput, eval_id: int): """ Updates study evalutation status to completed and saves the model output Args: output (ModelOutput): the output of the model eval_id (int): the id of the eval to be update """ # checks output to see if it output an image and adds imgOutputPath to SQL string update_sql_string = '' if output['image']: img_path = output['image'] update_sql_string = f', "imgOutputPath"=\'{img_path}\'' ### set eval as completed and save model output as json sql = f''' UPDATE study_evaluation SET status='COMPLETED', "modelOutput"=('{json.dumps(output)}') {update_sql_string} WHERE id={eval_id} ''' query(sql)
axes.set_axis_bgcolor('white') # axes.yaxis.label.set_size(22) axes.xaxis.label.set_size(22) city = "pisa" dow = "wd" hours = 5 # method = "krandomtraj" # method = "fullrandtraj" method = "distrandtraj" query_tsd = queries.create_tsd_from_table("(select * from " + method + "." + city + "_" + str(hours) + "h_" + dow + " ) a") query_tsd_original = queries.create_tsd(city, dow, hours) dist_tsd = dist.compute_probability_distribution(db_utils.query(query_tsd), is_cum_sum=False) dist_tsd_original = dist.compute_probability_distribution(db_utils.query(query_tsd_original), is_cum_sum=False) print "TSD ",dist_tsd[0], dist_tsd[1] print "TSD ORIGINAL", dist_tsd_original[0], dist_tsd_original[1] plt.plot(dist_tsd[0], dist_tsd[1], label="Random", marker='s', markersize=5, alpha=alpha) plt.plot(dist_tsd_original[0], dist_tsd_original[1], marker='3', markersize=5, label="Original", alpha=alpha) plt.tick_params(axis='both', which='major', labelsize=24, colors="#000000") plt.xlabel('x') plt.ylabel('P') leg = plt.legend(loc=1, prop={'size': 18}) leg.get_frame().set_facecolor("white") #
axes.yaxis.label.set_size(22) axes.xaxis.label.set_size(22) city = "pisa" dow = "wd" hours = 5 # method = "krandomtraj" # method = "fullrandtraj" method = "distrandtraj" query_tsd = queries.create_tsd_from_table("(select * from " + method + "." + city + "_" + str(hours) + "h_" + dow + " ) a") query_tsd_original = queries.create_tsd(city, dow, hours) dist_tsd = dist.compute_probability_distribution(db_utils.query(query_tsd), is_cum_sum=False) dist_tsd_original = dist.compute_probability_distribution( db_utils.query(query_tsd_original), is_cum_sum=False) print "TSD ", dist_tsd[0], dist_tsd[1] print "TSD ORIGINAL", dist_tsd_original[0], dist_tsd_original[1] plt.plot(dist_tsd[0], dist_tsd[1], label="Random", marker='s', markersize=5, alpha=alpha) plt.plot(dist_tsd_original[0], dist_tsd_original[1],
def generate_random_trajectories(city, dow, hours): print city, dow, hours trajectory_result = dict() place_list = db.query_places(city, dow, hours) ################################## QUERIES ################################## tsd = queries.create_tsd(city, dow, hours) tpu = queries.create_tpu(city, dow, hours) ################################## END QUERIES ################################## # LOAD DATA # Compute the distributions n_users = db.get_number_of_users(city, dow, hours) n_trajectories = db.get_number_of_trajectories(city, dow, hours) max_trajectories_per_user = np.max(db.query(tpu)['key']) max_trajectory_size = np.max(db.query(tsd)['key']) tsd = queries.create_tsd(city, dow, hours) tpu = queries.create_tpu(city, dow, hours) trajectories_per_user_distribution = dist.compute_probability_distribution( db.query(tpu)) trajectory_size_distribution = dist.compute_probability_distribution( db.query(tsd)) print n_users n_trajs_total = 0 u = 0 while u < n_users and n_trajs_total < n_trajectories: # number of trajectories print "%s/%s and %s/%s" % (u, n_users, n_trajs_total, n_trajectories) user_trajectories = dist.random_from_probability( trajectories_per_user_distribution) # print 'User '+str(u) + ' with ' + str(n_trajectories) trajectories_count = 0 today = datetime.datetime.fromtimestamp(time.time()) trajectory_result[u] = [] # create n trajectories while trajectories_count < user_trajectories and n_trajs_total < n_trajectories: # number of points n_points = dist.random_from_probability( trajectory_size_distribution) print "Traj Size ", n_points places_count = 0 traj = [] # pick n_points places while places_count < n_points: tomorrow = today + TIME_DELTA place = get_next_place(place_list) places = get_close_places(place, place_list) traj.append((places, today, tomorrow)) places_count += 1 # go forward in time today = tomorrow + TIME_DELTA trajectories_count += 1 n_trajs_total += 1 trajectory_result[u].append(traj) # user increment u += 1 print "Total trajectories: %s(%s) " % (str(n_trajs_total), n_trajectories) table = TABLE_SCHEMA + city + "_" + str(hours) + "h_" + dow db.store_trajectories(trajectory_result, table)
def generate_random_trajectories(city, dow, hours): print city, dow, hours trajectory_result = dict() place_list = db.query_places(city, dow, hours) ################################## QUERIES ################################## tsd = queries.create_tsd(city, dow, hours) tpu = queries.create_tpu(city, dow, hours) # ted = queries.create_ted(city, dow, hours) ################################## END QUERIES ################################## # LOAD DATA # Compute the distributions n_users = db.get_number_of_users(city, dow, hours) trajectories_per_user_distribution = dist.compute_probability_distribution(db.query(tpu)) trajectory_size_distribution = dist.compute_probability_distribution(db.query(tsd)) print "Trajectory Size Distri ", trajectory_size_distribution trajectory_extent_distribution = dist.compute_probability_density_function(db.query_trajectory_extent( city, dow, hours )) print "Trajectory Extent Dist ", trajectory_extent_distribution print "N of users:", n_users n_trajs_total = 0 for u in range(0, n_users): # number of trajectories n_trajectories = dist.random_from_probability(trajectories_per_user_distribution) print 'User ' + str(u) + ' with ' + str(n_trajectories) trajectories_count = 0 trajectory_result[u] = [] # create n trajectories # TODO melhorar a criacao das trajetorias para satisfazer a distribuicao de extent while trajectories_count < n_trajectories: today = datetime.datetime.fromtimestamp(time.time()) aux = dist.random_from_probability_2(trajectory_size_distribution, trajectory_extent_distribution) n_points = aux[0] extent = aux[1] # # number of points # n_points = dist.random_from_probability(trajectory_size_distribution) # # # extent of the trajectory # extent = dist.random_from_probability(trajectory_extent_distribution) # print "Generating Traj ", trajectories_count+1, extent traj = generate_trajectory(n_points, extent, place_list, today) trajectories_count += 1 n_trajs_total += 1 # print "Generated Traj ", utils.compute_trajectory_extent(traj) trajectory_result[u].append(traj) max_extent = np.max(trajectory_extent_distribution[0]) max_points = np.max(trajectory_size_distribution[0]) print max_extent today = datetime.datetime.fromtimestamp(time.time()) traj = generate_trajectory(max_points, max_extent, place_list, today) print "Generated Traj ", utils.compute_trajectory_extent(traj) trajectory_result[n_users] = [traj] n_trajs_total += 1 print "Total trajectories: " + str(n_trajs_total) table = TABLE_SCHEMA + city + "_" + str(hours) +"h_" + dow db.store_trajectories(trajectory_result, table)
def generate_random_trajectories(city, dow, hours): print city, dow, hours trajectory_result = dict() place_list = db.query_places(city, dow, hours) ################################## QUERIES ################################## tsd = queries.create_tsd(city, dow, hours) tpu = queries.create_tpu(city, dow, hours) ################################## END QUERIES ################################## # LOAD DATA # Compute the distributions n_users = db.get_number_of_users(city, dow, hours) n_trajectories = db.get_number_of_trajectories(city, dow, hours) max_trajectories_per_user = np.max(db.query(tpu)['key']) max_trajectory_size = np.max(db.query(tsd)['key']) tsd = queries.create_tsd(city, dow, hours) tpu = queries.create_tpu(city, dow, hours) trajectories_per_user_distribution = dist.compute_probability_distribution(db.query(tpu)) trajectory_size_distribution = dist.compute_probability_distribution(db.query(tsd)) print n_users n_trajs_total = 0 u = 0 while u < n_users and n_trajs_total < n_trajectories: # number of trajectories print "%s/%s and %s/%s" % (u, n_users, n_trajs_total, n_trajectories) user_trajectories = dist.random_from_probability(trajectories_per_user_distribution) # print 'User '+str(u) + ' with ' + str(n_trajectories) trajectories_count = 0 today = datetime.datetime.fromtimestamp(time.time()) trajectory_result[u] = [] # create n trajectories while trajectories_count < user_trajectories and n_trajs_total < n_trajectories: # number of points n_points = dist.random_from_probability(trajectory_size_distribution) print "Traj Size ", n_points places_count = 0 traj = [] # pick n_points places while places_count < n_points: tomorrow = today + TIME_DELTA place = get_next_place(place_list) places = get_close_places(place, place_list) traj.append((places, today, tomorrow)) places_count += 1 # go forward in time today = tomorrow + TIME_DELTA trajectories_count += 1 n_trajs_total += 1 trajectory_result[u].append(traj) # user increment u += 1 print "Total trajectories: %s(%s) " % (str(n_trajs_total), n_trajectories) table = TABLE_SCHEMA+city+"_"+str(hours)+"h_"+dow db.store_trajectories(trajectory_result, table)