def post_json_prediction_accuracy(company_code, company_name, start_date, end_date): while start_date <= end_date: w1, w2, w3 = prediction.start(company_code, start_date) # 바뀐 방식으로 예측 종가를 계산 predicted_value = closing_calculation.predict(company_code, start_date, w1, w2, w3) predicted_value = round(predicted_value / 10) * 10 lstm_price, closing_price = get_lstm_prediction_data( company_code, start_date - datetime.timedelta(days=1)) _, correct_closing_price = get_lstm_prediction_data( company_code, start_date) result = { "date": str(start_date), "lstm_closing_price": float(lstm_price), "correct_closing_price": float(correct_closing_price), "predicted_closing_price": float(predicted_value), "company_name": str(company_name), "company_code": str(company_code), } # print(result) # elasticsearch 로 데이터 전송 json_data = json.dumps(result, ensure_ascii=False) index = f"prediction-accuracy-{start_date}" store_record(index, json_data) start_date += datetime.timedelta(days=1)
def post_json_weight(company_code, company_name, start_date, end_date): while start_date <= end_date: w1, w2, w3 = prediction.start(company_code, start_date) result = { "date": str(start_date), "lstm_weight": float(w1), "emotional_weight": float(w2), "per_weight": float(w3), "company_name": str(company_name), "company_code": str(company_code), } # print(result) # elasticsearch 로 데이터 전송 json_data = json.dumps(result, ensure_ascii=False) index = f"prediction-weight-{start_date}" store_record(index, json_data) start_date += datetime.timedelta(days=1)
if __name__ == '__main__': for company in COMPANIES: # 1. PER 정보 크롤링 per_crawler.start(company[0]) # 2. 뉴스기사 크롤링 news_contents_crawler.start(company[0], START_DATE, END_DATE) # 3. kosac 을 이용해 뉴스기사 데이터 전처리 kosac_preprocessor.start(company[0], START_DATE, END_DATE) # 4. 뉴스기사 감성분석 news_contents_sentimental_analysis.start(company[0], START_DATE, END_DATE) learning_date = START_DATE while learning_date <= END_DATE: # 5. lstm 계산 lstm_calculator.start(company[0], learning_date) learning_date += datetime.timedelta(days=1) # 6. 가중치 a, b, c 계산 w1, w2, w3 = prediction.start(company[0], learning_date) # 7. 계산된 가중치 a, b, c 를 활용하여 다음날 주가 예측 predicted_value = closing_calculation.predict(company[0], learning_date, w1, w2, w3) # 7. elasticsearch 로 데이터 전송 # 날짜를 START_DATE 와 END_DATE 를 같게 해야 합니다. #elasticsearch_client.post_data(company[0], company[1], END_DATE, END_DATE)
help=f"which prediction method execute. {execution_help}", required=True, ) parser.add_argument( "-o", "--output", dest="output", type=str, nargs=None, help=f"name of the file that will have the final submission", required=True, ) parser.add_argument( "-e", "--experiment", dest="experiment", type=str, nargs=None, help="description of the experiment", required=True, ) args = parser.parse_args() method = args.method train_file = "./data/02_intermediate/opiniones_train_opiniones_1.csv" test_file = "./data/02_intermediate/opiniones_test_opiniones_2.csv" submission_file = f"./data/07_model_output/{args.output}.csv" logger.info(f"Experiment description: {args.experiment}") start(method, train_file, test_file, submission_file, logger)
def predict_number(sub_img): """calls prediction class to predict the number""" prediction.start(sub_img)
random_state = cfg.getProperty("Challenge.LibroQueLeo.randomState") if method == "ensamble": # (predictor_identifier, weight), model_file weights = cfg.getProperty("Challenge.LibroQueLeo.Ensamble.Weights") models = cfg.getProperty("Challenge.LibroQueLeo.Ensamble.Models") model_tuples = [] for predictor, weight in weights.items(): model_file = model_path + models[predictor] model_tuples += [((predictor, weight), model_file)] ensamble( model_tuples, train_file, test_file, submission_file, logger, random_state, ) else: start( method, train_file, test_file, submission_file, logger, model_file=model_file, random_state=random_state, )