def get_benchmark_pareto(iteration_id: int) -> List[creator.Individual]: db_iter = MongoDBConnection.get_benchmarks_iterations_db() iter_obj = db_iter.find_one({'_id': iteration_id}) paretos_gridfs = MongoDBConnection.get_paretos_gridfs() paretos = [] for pareto_file in iter_obj['paretos']: with paretos_gridfs.get(pareto_file) as f: paretos.append(pickle.loads(f.read())) return paretos
def clear_population_performace_iteration(dataset_id=None): find_d = {} if dataset_id is not None: find_d['dataset_id'] = dataset_id db = MongoDBConnection.get_performance_populations_db() iteration = db.find(find_d) paretos_gridfs = MongoDBConnection.get_paretos_gridfs() for it in iteration: for pareto_file in it['paretos']: paretos_gridfs.delete(pareto_file) db.remove(find_d)
def clear_benchmark_iteration(dataset_id=None, benchmark_id=None): find_d = {} if dataset_id is not None: find_d['dataset_id'] = dataset_id if benchmark_id is not None: find_d['benchmark_id'] = benchmark_id db = MongoDBConnection.get_benchmarks_iterations_db() iteration = db.find(find_d) paretos_gridfs = MongoDBConnection.get_paretos_gridfs() for it in iteration: for pareto_file in it['paretos']: paretos_gridfs.delete(pareto_file) db.remove(find_d)
def clear_benchmark_performace_iteration(population_iteration=None, bmk_settings=None): find_d = {} if population_iteration is not None: find_d['population_iteration'] = population_iteration if bmk_settings is not None: find_d['bmk_settings'] = bmk_settings db = MongoDBConnection.get_benchmark_performance_db() iteration = db.find(find_d) paretos_gridfs = MongoDBConnection.get_paretos_gridfs() for it in iteration: for pareto_file in it['paretos']: paretos_gridfs.delete(pareto_file) db.remove(find_d)
def save_population_performance_iteration( dataset_id: str, duration: List[str], population: List[List[creator.Individual]]): db = MongoDBConnection.get_paretos_gridfs() paretos_pickles = [0] * len(population) for n_snp, actual_pareto in enumerate(population): with db.new_file() as f: paretos_pickles[n_snp] = f._id binary_data = Binary(pickle.dumps(actual_pareto, protocol=2), subtype=128) f.write(binary_data) db = MongoDBConnection.get_performance_populations_db() result = db.insert_one({ "dataset_id": dataset_id, "duration": duration, "paretos": paretos_pickles, "version": "v1" }) return result.inserted_id
def save_benchmark_iteration(dataset_id: str, iteration_id: str, benchmark_id: str, duration: List[str], paretos): db = MongoDBConnection.get_paretos_gridfs() paretos_pickles = [0] * len(paretos) for n_snp, actual_pareto in enumerate(paretos): with db.new_file() as f: paretos_pickles[n_snp] = f._id binary_data = Binary(pickle.dumps(actual_pareto, protocol=2), subtype=128) f.write(binary_data) db = MongoDBConnection.get_benchmarks_iterations_db() result = db.insert_one({ "dataset_id": dataset_id, "iteration_id": iteration_id, "benchmark_id": benchmark_id, "duration": duration, "paretos": paretos_pickles, "version": "v1" }) return result.inserted_id
def save_iteration(dataset_id: str, settings_id: str, sp_generations: List[List[float]], paretos, execution_info: Dict): db = MongoDBConnection.get_paretos_gridfs() paretos_pickles = [0] * len(paretos) for n_snp, actual_pareto in enumerate(paretos): with db.new_file() as f: paretos_pickles[n_snp] = f._id binary_data = Binary(pickle.dumps(actual_pareto, protocol=2), subtype=128) f.write(binary_data) db = MongoDBConnection.get_iterations_db() result = db.insert_one({ "dataset_id": dataset_id, "settings_id": settings_id, "execution_info": execution_info, "snapshots": sp_generations, "paretos": paretos_pickles, "version": "v2" }) return result.inserted_id