def get_error_obj(error_code, error_log, path_obj): error_id = util.get_random_string(6) log_file = path_obj["apierrorlogpath"] + "/" + error_code + "-" + error_id + ".log" with open(log_file, "w") as FW: FW.write("%s" % (error_log)) return {"error_list":[{"error_code": "exception-error-" + error_id}]}
def auth_userid(config_obj): db_obj = config_obj[config_obj["server"]]["dbinfo"] dbh, error_obj = util.connect_to_mongodb(db_obj) #connect to mongodb if error_obj != {}: return error_obj #Collect errors error_list = errorlib.get_errors_in_query("auth_userid", {}, config_obj) if error_list != []: return {"error_list": error_list} collection = "c_userid" res_obj = {} i = 0 while True: user_id = util.get_random_string(32).lower() user_obj = {"userid": user_id} if dbh[collection].find(user_obj).count() == 0: ts = datetime.datetime.now( pytz.timezone('US/Eastern')).strftime('%Y-%m-%d %H:%M:%S %Z%z') user_obj["created_ts"] = ts result = dbh[collection].insert_one(user_obj) return {"user": user_id} if i > 100000: return {"error_list": [{"error_code": "userid-generator-failed"}]} i += 1
def editor(): source = request.args.get('source', default=None) original_file_name = source.split('/')[-1] file_type = request.args.get('format', default="csv") callback_url = request.args.get('callback', default="") error_msg = None warning_msg = None file_name = "" if source is None or source.strip() == '': headers = ["AAA", "BBB", "CCC"] # elif source.startswith('file:/'): # original_file_name = source.split('/')[-1] # headers = util.get_headers(source, file_type=file_type) # file_name = source.split('/')[-1].split('.')[0] + "-" + util.get_random_string(4) + "." + source.split('.')[-1] # if headers == []: # warning_msg = "Can't parse the source file %s" % source else: if DOWNLOAD: r = requests.get(source, allow_redirects=True) if r.status_code == 200: fname = source.split('/')[-1].split( '.')[0] + "-" + util.get_random_string( 4) + "." + source.split('.')[-1] file_name = fname uploaded_file_dir = os.path.join(UPLOAD_DIR, fname) f = open(uploaded_file_dir, 'w') f.write(r.content) f.close() headers = util.get_headers(uploaded_file_dir, file_type=file_type) if headers == []: warning_msg = "Can't parse the source file %s" % source else: error_msg = "the source %s can not be accessed" % source print error_msg headers = [] else: headers = util.get_headers(source, file_type=file_type) if headers == []: warning_msg = "Can't parse the source file %s" % source # if callback_url: # files = {'upload_file': open('file.txt', 'rb')} # values = {'DB': 'photcat', 'OUT': 'csv', 'SHORT': 'short'} # r = requests.post(url, files=files, data=values) # return render_template('msg.html', msg="Your mappings has been sent", msg_title="Result") # else: f = open(os.path.join(DATA_DIR, "labels.txt")) return render_template('editor.html', labels_txt=f.read(), headers=headers, callback=callback_url, file_name=original_file_name, error_msg=error_msg, warning_msg=warning_msg)
def add(number): if (number): client = Client( number=number, token=get_random_string(64), ) session = get_session() session.add(client) session.commit() return client
def check_login(): try: email = request.form['email'] except KeyError: email = '' try: password = request.form['password'] except KeyError: password = '' # Check if email is blank if not len(email) > 0: session['error'] = 'Email is required' return redirect('/login') # Check if password is blank if not len(password) > 0: session['error'] = 'password is required' return redirect('/login') # Find email in database user_document = mongo.db.users.find_one({"email": email}) if user_document is None: # if user document not found throw error ession['error'] = 'No account exist with this email address' return redirect('/login') # Verify that password with original password_hash = sha256(password.encode('utf-8')).hexdigest() if user_document['password'] != password_hash: session['error'] = 'Password is wrong' return redirect('/login') # to do Generate token contains userID,hash and created AT random_string = get_random_string() randomSessionHash = sha256(random_string.encode('utf-8')).hexdigest() token_object = mongo.db.user_tokens.insert_one({ 'userID': user_document['_id'], 'sessionHash': randomSessionHash, 'createdAt': datetime.utcnow(), }) # store userToken in session session['userToken'] = randomSessionHash # redirect to '/' return redirect('/')
def add(username, password, access_level, restaurant): if (username and password and is_int(access_level) and restaurant): user = User( username = username, password = password, access_level = access_level, token = get_random_string(64), restaurant_id = restaurant.id ) session = get_session() session.add(user) session.commit() return user
def write_to_disk_numpy(self): batchsz = self.input_doc.shape[0] for i in range(batchsz): _pred_dist = [x[i] for x in self.pred_distributions] _pred_dist = np.stack(_pred_dist, axis=0) ent = entropy(np.exp(_pred_dist), axis=-1) if self.full_data: # _hidden_states = [[y[i][np.newaxis, ...] for y in x] for x in self.all_hidden_states] # _hidden_states = [np.stack(x, axis=1) for x in self.all_hidden_states] # _hidden_states = np.stack(_hidden_states, axis=0) _hidden_states = None _attn = [[y[i] for y in x] for x in self.attentions] else: _hidden_states = None _attn = None _pred_dist = None # _attn = np.stack(_attn, axis=0) _logit = [x[i] for x in self.logits] _logit = np.stack(_logit, axis=0) if self.meta: _meta = self.meta[i] if 'name' in _meta: fname = _meta['name'] else: fname = _meta['id'] else: _meta = {"name": "", "id": ""} fname = get_random_string(8) f = f"model_output_{fname}.pt" with open(os.path.join(self.cur_dir, f), 'wb') as fd: pickle.dump( { 'pred_distributions': _pred_dist, 'attentions': _attn, 'all_hidden_states': _hidden_states, 'logits': _logit, 'input_doc': self.input_doc[i], 'input_doc_mask': self.input_doc_mask[i], 'meta': _meta, 'ent': ent }, fd) logging.debug(f"writing {os.path.join(self.cur_dir, f)}") print(f"writing {os.path.join(self.cur_dir, f)}") self.__init__(full_data=self.full_data, cur_dir=self.cur_dir)
async def create_sims_batch_inference_job(solutionVersionArn, roleArn): batchInferenceJobArn = personalize.create_batch_inference_job( jobName="manhwakyung-title-recommendation-batch-" + util.get_random_string(8), solutionVersionArn=solutionVersionArn, roleArn=roleArn, jobInput={ 's3DataSource': { 'path': f's3://{BUCKET_NAME}/data/title/batch-input-sims.txt' } }, jobOutput={ 's3DataDestination': { 'path': f's3://{BUCKET_NAME}/results/by-title-id/' } })['batchInferenceJobArn'] await util.wait_until_status_async( lambdaToGetStatus=lambda _="": personalize. describe_batch_inference_job(batchInferenceJobArn=batchInferenceJobArn )['batchInferenceJob']['status'], messagePrefix="Running sims batch inference job...", expectedStatus="ACTIVE")
def globalsearch_search(query_obj, config_obj): db_obj = config_obj[config_obj["server"]]["dbinfo"] dbh, error_obj = util.connect_to_mongodb(db_obj) #connect to mongodb if error_obj != {}: return error_obj #Collect errors error_list = errorlib.get_errors_in_query("globalsearch_search", query_obj, config_obj) if error_list != []: return {"error_list": error_list} #Load search config and plug in values search_obj = json.loads(open("./conf/global_search.json", "r").read()) query_obj["term"] = query_obj["term"].replace("(", "\\(").replace(")", "\\)") for obj in search_obj: if "$text" in obj["mongoquery"]: obj["mongoquery"] = { '$text': { '$search': '\"' + query_obj["term"] + '\"' } } elif "$or" in obj["mongoquery"]: for o in obj["mongoquery"]["$or"]: for k in o: if "$regex" in o[k]: o[k]["$regex"] = query_obj["term"] elif "$and" in obj["mongoquery"]: for o in obj["mongoquery"]["$and"]: if "$or" in o: for oo in o["$or"]: for k in oo: if "$regex" in oo[k]: oo[k]["$regex"] = query_obj["term"] elif "$text" in o: obj["mongoquery"]["$and"][0] = { '$text': { '$search': '\"' + query_obj["term"] + '\"' } } else: for k in o: if "$regex" in o[k]: o[k]["$regex"] = query_obj["term"] else: for k in obj["mongoquery"]: o = obj["mongoquery"][k] if "$regex" in o: o["$regex"] = query_obj["term"] elif "$eq" in o: o["$eq"] = query_obj["term"] #Filter cached global search results res_obj = {"exact_match": [], "other_matches": {"total_match_count": 0}} seen_exact_match = {} results_dict = {} for obj in search_obj: key_one, key_two = obj["searchname"].split(".") if key_one not in res_obj["other_matches"]: res_obj["other_matches"][key_one] = {key_two: {}} if key_one not in results_dict: results_dict[key_one] = {"all": []} if key_two not in results_dict[key_one]: results_dict[key_one][key_two] = [] target_collection = obj["targetcollection"] cache_collection = "c_cache" qry_obj = obj["mongoquery"] prj_obj = config_obj["projectedfields"][target_collection] doc_list = list(dbh[target_collection].find( qry_obj, prj_obj)) if key_two != "all" else [] for doc in doc_list: doc.pop("_id") record_type, record_id, record_name = "", "", "" if target_collection == "c_protein": list_obj = protein_apilib.get_protein_list_object(doc) #results_dict[key_one][key_two].append(list_obj) #results_dict[key_one]["all"].append(list_obj) #results.append(list_obj) record_type, record_id = "protein", doc["uniprot_canonical_ac"] record_name = list_obj["protein_name"] canon, ac = record_id, record_id.split("-")[0] record_id_list = [canon] record_id_list_lower = [canon.lower(), ac.lower()] results_dict[key_one]["all"] += record_id_list results_dict[key_one][key_two] += record_id_list if query_obj["term"].lower() in record_id_list_lower: if record_id not in seen_exact_match: exact_obj = { "id": record_id, "type": record_type, "name": record_name } res_obj["exact_match"].append(exact_obj) seen_exact_match[record_id] = True elif target_collection == "c_glycan": list_obj = get_glycan_list_record(doc) #results_dict[key_one]["all"].append(list_obj) #results.append(list_obj) #results_dict[key_one][key_two].append(list_obj) record_type, record_id = "glycan", doc["glytoucan_ac"] record_name = record_id record_id_list = [record_id] record_id_list_lower = [record_id.lower()] results_dict[key_one]["all"].append(record_id) results_dict[key_one][key_two].append(record_id) if query_obj["term"].lower() in record_id_list_lower: if record_id not in seen_exact_match: exact_obj = { "id": record_id, "type": record_type, "name": record_name } res_obj["exact_match"].append(exact_obj) seen_exact_match[record_id] = True if len(results_dict[key_one][key_two]) > 0: record_type = key_one record_type = "protein" if record_type == "glycoprotein" else record_type ts = datetime.datetime.now( pytz.timezone("US/Eastern")).strftime("%Y-%m-%d %H:%M:%S %Z%z") random_string = util.get_random_string(128) hash_obj = hashlib.md5(random_string) list_id = hash_obj.hexdigest() res = dbh[cache_collection].delete_many({"list_id": list_id}) result_count = len(results_dict[key_one][key_two]) partition_count = result_count / config_obj["cache_batch_size"] for i in xrange(0, partition_count + 1): start = i * config_obj["cache_batch_size"] end = start + config_obj["cache_batch_size"] end = result_count if end > result_count else end if start < result_count: results_part = results_dict[key_one][key_two][start:end] cache_info = { "query": query_obj, "ts": ts, "record_type": record_type, "search_type": "search" } util.cache_record_list(dbh, list_id, results_part, cache_info, cache_collection, config_obj) #hit_count = len(results_dict[key_one][key_two]) hit_count = len(set(results_dict[key_one][key_two])) res_obj["other_matches"][key_one][key_two] = { "list_id": list_id, "count": hit_count } res_obj["other_matches"]["total_match_count"] += len( results_dict[key_one][key_two]) else: res_obj["other_matches"][key_one][key_two] = { "list_id": "", "count": 0 } return res_obj
def editor(): if 'format' in request.form: file_type = request.form['format'] else: file_type = 'csv' if 'callback' in request.form: callback_url = request.form['callback'] else: callback_url = "" kg = None if 'kg' in request.form: if request.form['kg'].strip() != "": kg = request.form['kg'].strip() ontologies = request.form.getlist('ontologies') if len(ontologies) == 0: return render_template( 'msg.html', msg="You should select at least one ontology", msg_title="Error") logger.debug("number of ontologies: " + str(len(ontologies))) logger.debug(str(ontologies)) logger.debug(str(request.form)) error_msg = None warning_msg = None uploaded = False if 'source' not in request.form or request.form['source'].strip( ) == "": if 'sourcefile' in request.files: sourcefile = request.files['sourcefile'] if sourcefile.filename != "": original_file_name = sourcefile.filename filename = secure_filename(sourcefile.filename) fname = util.get_random_string(4) + "-" + filename uploaded_file_dir = os.path.join(UPLOAD_DIR, fname) if not os.path.exists(UPLOAD_DIR): os.makedirs(UPLOAD_DIR) sourcefile.save(uploaded_file_dir) uploaded = True else: logger.debug("blank source file") else: logger.debug('not sourcefile') if not uploaded: return render_template('msg.html', msg="Expecting an input file", msg_title="Error") else: source = request.form['source'] original_file_name = source.split('/')[-1] filename = secure_filename(original_file_name) r = requests.get(source, allow_redirects=True) if r.status_code == 200: fname = util.get_random_string(4) + "-" + filename uploaded_file_dir = os.path.join(UPLOAD_DIR, fname) if not os.path.exists(UPLOAD_DIR): os.makedirs(UPLOAD_DIR) f = open(uploaded_file_dir, 'w', encoding='utf-8') f.write(r.text) f.close() else: error_msg = "the source %s can not be accessed" % source logger.debug(error_msg) return render_template('msg.html', msg=error_msg, msg_title="Error") headers = util.get_headers(uploaded_file_dir, file_type=file_type) if headers == []: error_msg = "Can't parse the source file " return render_template('msg.html', msg=error_msg, msg_title="Error") logger.debug("headers: ") logger.debug(str(headers)) labels = "" for o in ontologies: o_labels = None try: o_labels = util.get_classes_as_txt([o], data_dir=DATA_DIR) except: o_labels = util.get_classes_as_txt([o], data_dir=ONT_DIR) if o_labels: labels += o_labels logger.debug("labels: ") logger.debug(str(labels)) return render_template('editor.html', labels_txt=labels, ontologies_txt=",".join(ontologies), headers=headers, kg=kg, callback=callback_url, file_name=fname, error_msg=error_msg, warning_msg=warning_msg)
def editor(): if 'format' in request.form: file_type = request.form['format'] else: file_type = 'csv' if 'callback' in request.form: callback_url = request.form['callback'] else: callback_url = "" ontologies = request.form.getlist('ontologies') if len(ontologies) == 0: return render_template('msg.html', msg="You should select at least one ontology", msg_title="Error") print("number of ontologies: " + str(len(ontologies))) print(ontologies) print(request.form) error_msg = None warning_msg = None uploaded = False if 'source' not in request.form or request.form['source'].strip() == "": if 'sourcefile' in request.files: sourcefile = request.files['sourcefile'] if sourcefile.filename != "": original_file_name = sourcefile.filename filename = secure_filename(sourcefile.filename) fname = util.get_random_string(4) + "-" + filename uploaded_file_dir = os.path.join(UPLOAD_DIR, fname) if not os.path.exists(UPLOAD_DIR): os.mkdir(UPLOAD_DIR) sourcefile.save(uploaded_file_dir) uploaded = True else: print("blank source file") else: print('not sourcefile') if not uploaded: return render_template('msg.html', msg="Expecting an input file", msg_title="Error") else: source = request.form['source'] original_file_name = source.split('/')[-1] filename = secure_filename(original_file_name) r = requests.get(source, allow_redirects=True) if r.status_code == 200: fname = util.get_random_string(4) + "-" + filename uploaded_file_dir = os.path.join(UPLOAD_DIR, fname) f = open(uploaded_file_dir, 'w') f.write(r.content) f.close() else: error_msg = "the source %s can not be accessed" % source print(error_msg) return render_template('msg.html', msg=error_msg, msg_title="Error") headers = util.get_headers(uploaded_file_dir, file_type=file_type) if headers == []: error_msg = "Can't parse the source file " return render_template('msg.html', msg=error_msg, msg_title="Error") labels = util.get_classes_as_txt(ontologies, data_dir=DATA_DIR) # f = open(os.path.join(DATA_DIR, "labels.txt")) return render_template('editor.html', labels_txt=labels, ontologies_txt=",".join(ontologies), headers=headers, callback=callback_url, file_name=fname, error_msg=error_msg, warning_msg=warning_msg)