def fun_sel(self): "" if self.operation == []: print("您没有任何权限,请联系管理员加权限") else: play = input(f"{self.operation}\n请选择对应的操作:").strip() if play == "office": office.analysis().test_methon() elif play == "analysis": als.analysis().test_methon()
def strategy_analysis_retrieval(query, faq_config: FaqConfig): start = time.process_time() logger = logging.getLogger('strategy') query_item = analysis(query, faq_config) # end1 = time.process_time() # cost_time = (end1 - start) * 1000 # logger.info('analysis cost time: ' + str(cost_time) + ' ms.') faq_items = retrieval(query_item, faq_config) # end2 = time.process_time() # cost_time = (end2 - end1) * 1000 # logger.info('retrieval cost time: ' + str(cost_time) + ' ms.') # faq_items = match(query_item, faq_items) # end3 = time.process_time() # cost_time = (end3 - end2) * 1000 # logger.info('match cost time: ' + str(cost_time) + ' ms.') # faq_items = rank(faq_items, faq_config) # end4 = time.process_time() # cost_time = (end4 - end3) * 1000 # logger.info('rank cost time: ' + str(cost_time) + ' ms.') rsp = {} rsp['request'] = query_item_to_dict(query_item) rsp['response'] = faq_items_to_list(faq_items) rsp_json = json.dumps(rsp, ensure_ascii=False) logger.info('do strategy SUCCESS !') logger.info(rsp_json) end = time.process_time() cost_time = (end - start) * 1000 logger.debug('total cost time: ' + str(cost_time) + ' ms.') return rsp_json
def main(): args = parse() followers = args.x streams = args.y print(followers) print(streams) data = an.acquire() filtered = an.filters(data, followers, streams) results = an.analysis(filtered) chart = ch.visualize(results, followers, streams) ch.save_chart(chart) filtered2 = san.filters2(data) results2 = san.analysis2(filtered2) artist_data = san.acquire2() mail.send_email(results2, artist_data)
def upload_file(): # Uploading Multiple File if request.method == 'POST': if 'files[]' not in request.files: flash('No file part') return redirect(request.url) files = request.files.getlist('files[]') for i, f in enumerate(files): if f and allowed_file(f.filename): # fname = secure_filename(f.filename) fname = "675676476587585856567v{}.json" f.save(os.path.join(app.config['UPLOAD_FOLDER'], fname.format(i))) # Format the Uploaded Files -> Combine, Store and Delete -src/formator.py start = time.time() data = formator() end = time.time() - start # debug print('Formator time:', end) # TODO: upload data to SQLite Database start = time.time() # create tables, and insert data into database db._init('messages.db', data) end = time.time() - start print("Database init(create table, insert):", end) # Run the analysis -src/analysis.py start = time.time() report = analysis(data) end = time.time() - start # delete database tables db.delete_database('messages.db') # debug print('Analysis time:', end) return render_template('index.html', data = report)
def example(path: str): """ return the page given an example path to a file """ # collect test file f = os.path.join('.', 'static', 'test_file', path) # copy test file to uploads folder copy(f, os.path.join('.', 'uploads')) # # Format the Uploaded Files -> Combine, Store and Delete -src/formator.py data = formator() db._init('messages.db', data) # # Run the analysis -src/analysis.py report = analysis(data) # delete database db.delete_database('messages.db') return render_template('index.html', data = report)
#!/usr/bin/env python """ This is the master script for recreating the results It imports each of the key other scripts and runs them one by one. Run the whole thing from the root directory to replicate all of the python analysis """ import src.download_raw_data as dl_raw import src.create_clean_data as cln_data import src.analysis as analysis dl_raw.download_raw_data() cln_data.create_clean_data() analysis.analysis()
t_rsp = term_retrieval( query_item, faq_config.term_retrieval, faq_config.elastic_search) s_rsp = semantic_retrieval( query_item, faq_config.semantic_retrieval, faq_config.annoy_search, faq_config.elastic_search) rsp = remove_duplication(t_rsp + s_rsp) return rsp if __name__ == '__main__': from src.tfidf_transformer import init_tfidf_transformer from src.tfidf_transformer import TfidfTransformer from src.annoy_search import init_annoy_search from src.analysis import analysis from src.config import init_faq_config f_config = init_faq_config('faq.config') init_tfidf_transformer(f_config.tfidf_transformer) tt = TfidfTransformer() init_annoy_search(f_config.annoy_search) q = analysis('who are you ?', f_config) res = retrieval(q, f_config) from src.utils import faq_items_to_list print(faq_items_to_list(res))
def analysis(self): return analysis(self.BATCH_SIZE, self.TIME_STEPS, self.EPOCH, self.ITERATIONS, self.SUBJECT, self.FEATURES_COUNT, self.DROPOUT_SIZE, self.LSTM_UNITS, self.LEARNING_RATE)