def select(id): prediction = None sql = "SELECT * FROM predictions WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: player = player_repository.select(result['player_id']) match = match_repository.select(result['match_id']) if result['team_1_id'] == None: home_player_team = None else: home_player_team = player_team_repository.select( result['team_1_id']) if result['team_2_id'] == None: away_player_team = None else: away_player_team = player_team_repository.select( result['team_2_id']) prediction = Prediction(player, match, home_player_team, away_player_team, result['id']) prediction.has_prediction = result['has_prediction'] prediction.goals['home'] = result['home_goals'] prediction.goals['away'] = result['away_goals'] return prediction
def select_all(): predictions = [] sql = "SELECT * FROM predictions" results = run_sql(sql) for row in results: player = player_repository.select(row['player_id']) match = match_repository.select(row['match_id']) if row['team_1_id'] == None: home_player_team = None else: home_player_team = player_team_repository.select(row['team_1_id']) if row['team_2_id'] == None: away_player_team = None else: away_player_team = player_team_repository.select(row['team_2_id']) prediction = Prediction(player, match, home_player_team, away_player_team, row['id']) prediction.has_prediction = row['has_prediction'] prediction.goals['home'] = row['home_goals'] prediction.goals['away'] = row['away_goals'] predictions.append(prediction) return predictions
def __init__(self, task): self.task = task self.instrument = task.setting.instrument start = self.task.get_param("start") end = self.task.get_param("end") quotations = Quotation.get_from_interval(start, end, self.instrument.id) self.task.update_status("checker_total_quotations", len(quotations)) last_quotation = None if Providers.config().flush_history: Prediction.empty_table(task) Pattern.empty_table(task) Signal.empty_table(task) if len(quotations) > 0: checked_quotations = self.task.get_param( "checker_checked_quotations") if not checked_quotations: checked_quotations = 0 for row in quotations: analyzer = Analyzer(task) analyzer.quotation = row analyzer.do_analysis() last_quotation = analyzer.quotation Prediction.calculation_cost_for_topical(task, last_quotation) Controller.update_expired_signals(self.task, last_quotation) checked_quotations += 1 if checked_quotations % 10 == 0: # Обновляем параметры стоимости прогнозов self.task.update_status("checker_checked_quotations", checked_quotations) # Запускаем демона для проверки кеша и получения результата торгов self.checker_predictions(last_quotation) if checked_quotations % 100 == 0: success_percent = Signal.get_success_percent(self.task) print(datetime.datetime.fromtimestamp(last_quotation.ts), success_percent) # Обновляем параметры стоимости прогнозов if last_quotation: self.checker_predictions(last_quotation)
def create_predictions_for_knockout_matches(): for player in player_repository.select_all(): # Predictions for match in match_repository.select_all(): if match.id > 36: if match.team_1 != None and match.team_2 != None: home_player_team = player_team_repository.select_by_player_and_team( player, match.team_1) away_player_team = player_team_repository.select_by_player_and_team( player, match.team_2) prediction_repository.save( Prediction(player, match, home_player_team, away_player_team)) else: prediction_repository.save( Prediction(player, match, None, None))
def do_analysis(self): """Метод подготовки прогнозов""" # Получаем свечи разной длинны # candles = Candle.get_last(self.quotation.ts, self.task.setting.analyzer_deep, # self.task.setting.instrument_id, "parent") candles = Candle.get_last_with_nesting( self.quotation.ts, self.task.setting.analyzer_deep, self.task.setting.instrument_id, self.task.setting.candles_durations, "parent") # Получаем разные вариации последовательностей c глубиной вхождения sequences = Sequence.get_sequences_json(self.task, candles) sequences_models = [] for sequence in sequences: if len(sequence) >= self.task.setting.analyzer_min_deep: sequences_models.append(Sequence.make(sequence)) if len(sequences_models) > 0: patterns_models = [] predictions_models = [] for time_bid in self.task.setting.analyzer_bid_times: for seq_raw in sequences_models: prediction = Prediction.make(self.task, time_bid, self.quotation) # Проверка оставшегося времени до ставки if prediction.time_to_expiration >= ( time_bid['time'] - time_bid['admission']): pattern = Pattern.make(self.task, seq_raw, time_bid, self.quotation) predictions_models.append(prediction) patterns_models.append(pattern) if len(patterns_models) > 0: patterns = Pattern.save_many(patterns_models) i = 0 for pat_rec in patterns: predictions_models[i].pattern_id = pat_rec.id if Controller.check_on_make_prediction(self.task, pat_rec): self.task.storage.predictions.append( predictions_models[i]) if Controller.check_on_make_signal(self.task, pat_rec, predictions_models[i], self.quotation): # Проверка условий вероятности при создании сигнала direction = Signaler.check(self.task, pat_rec) if direction: Signaler.make_and_save(self.task, direction, pat_rec, predictions_models[i]) if self.task.get_param("history_num", 0) > 0: signals_count = self.task.get_status( "checker_signals_count", 0) self.task.update_status( "checker_signals_count", signals_count + 1) i += 1
def print_prediction(prediction: Prediction, table): answer = prediction.answer answer = answer if len(answer) < 21 else answer[:20] + '...' doc_title = prediction.doc_title elastic_score = prediction.elastic_score tfidf_score = prediction.tfidf_score passage_score = prediction.passage_score bidaf_score = prediction.bidaf_score final_score = prediction.calc_final_score() format_num = lambda x: '{0:.2f}'.format(x) table.add_row([answer, doc_title, prediction.passage_id, format_num(elastic_score), format_num(tfidf_score), format_num(passage_score), format_num(bidaf_score), format_num(final_score)])
def set_result(message): arguments = message.text.split()[1:] race_choosed = arguments[0] + " " + arguments[1] if (len(arguments) != 5): bot.send_message(message.chat.id, "Error. Usage: /result GP Spain HAM BOT VER") return None if (message.from_user.id == int(os.getenv('ADMIN_ID'))): races = [] # all_races() if race_choosed not in races: bot.send_message(message.chat.id, "Error, race not found") return None podium = [arguments[2], arguments[3], arguments[4]] for driver in podium: if driver not in drivers: bot.send_message(message.chat.id, "Error, driver not found") return None cursor.execute("SELECT * FROM predictions WHERE race=?", (race_choosed, )) predictions_fetched = cursor.fetchall() predictions = [] for prediction in predictions_fetched: driver_prediction = [prediction[1], prediction[2], prediction[3]] cursor.execute("SELECT * FROM users WHERE user_id=?", (prediction[0], )) username = cursor.fetchall() predictions.append( Prediction(username[0][0], driver_prediction, prediction[4])) for prediction in predictions: sum_points = 0 if prediction.driver_prediction[0] == podium[0]: sum_points += 2 if prediction.driver_prediction[1] == podium[1]: sum_points += 2 if prediction.driver_prediction[2] == podium[2]: sum_points += 2 for driver in prediction.driver_prediction: if driver in podium: sum_points += 1 cursor.execute("SELECT * FROM users WHERE username=?", (prediction.username, )) user = cursor.fetchall() updated_user = list(user[0]) updated_user[2] += sum_points points: int = updated_user[2] user_id: int = user[0][1] cursor.execute("UPDATE users SET points=(?) WHERE user_id=(?)", (points, user_id)) conn.commit()
def predictions(player): predictions = [] sql = "SELECT * FROM predictions WHERE player_id = %s ORDER BY id ASC" values = [player.id] results = run_sql(sql, values) for row in results: match = match_repository.select(row['match_id']) if row['team_1_id'] == None: home_player_team = None else: home_player_team = player_team_repository.select(row['team_1_id']) if row['team_2_id'] == None: away_player_team = None else: away_player_team = player_team_repository.select(row['team_2_id']) prediction = Prediction(player, match, home_player_team, away_player_team, row['id']) prediction.set_goals(row['home_goals'], row['away_goals']) predictions.append(prediction) return predictions
def fetch_predictions_and_trips(self, route_ids, stop_id): params = { "filter[stop]": stop_id, "filter[direction_id]": 0, "filter[route]": ",".join(route_ids), "include": "trip", } predictions, included = self.fetch(path="predictions", params=params) if predictions: trips_by_id = {} for t in included: trip = Trip(t) trips_by_id[trip.id] = trip return [Prediction(p) for p in predictions], trips_by_id else: return [], {}
def get_predictions(message): current_race = check_race() cursor.execute("SELECT * FROM predictions WHERE race=?", (current_race, )) predictions_fetched = cursor.fetchall() predictions = [] for prediction in predictions_fetched: driver_prediction = [prediction[1], prediction[2], prediction[3]] cursor.execute("SELECT * FROM users WHERE user_id=?", (prediction[0], )) username = cursor.fetchall() predictions.append( Prediction(username[0][0], driver_prediction, prediction[4])) table = create_predictions_table(predictions) bot.send_message(message.chat.id, \ f'<b>{current_race}</b>' + f'<pre>{table}</pre>', parse_mode=ParseMode.HTML) conn.commit()
def prediction_pipeline(passages: Passages, question: str, nlp_toolkit: NLPToolkit) -> Predictions: predictor = AnswerPredictor(nlp_toolkit) predictions = Predictions() for index, passage in enumerate(passages.passages[:TOP_N_PASSAGES]): if passage.text: Logger.debug('Send (passage,question) to bidaf: (' + passage.text + ',' + question + ')') pred = predictor.predict(passage.text, question) prediction = Prediction( pred['answer'], pred['context'], passage.parent_doc.title, passage.get_id(), passage.elastic_score, passage.tfidf_score, passage.get_passage_score(), pred['confidence'] ) predictions.add(prediction) scorer = Scorer() scorer.min_max_norm(predictions) return predictions
def setup_new_player(player): add_to_overall_league(player) # Player Teams for team in team_repository.select_all(): player_team_repository.save(PlayerTeam(player, team)) # Player Groups for group in group_repository.select_all(): player_teams = [] for team in group.teams: player_teams.append( player_team_repository.select_by_player_and_team(player, team)) player_group_repository.save( PlayerGroup(player, group.name, player_teams)) # Predictions for match in match_repository.select_all(): home_player_team = player_team_repository.select_by_player_and_team( player, match.team_1) away_player_team = player_team_repository.select_by_player_and_team( player, match.team_2) prediction_repository.save( Prediction(player, match, home_player_team, away_player_team))
def get(self): """ return the statistical information query across feedback and prediction model :return: return statistical information """ from models.prediction import Prediction from models.feedback import Feedback prediction_collection = Prediction._get_collection() feedback_collection = Feedback._get_collection() statistical_data = copy.deepcopy(CONSTANTS['STATISTICAL_DATA']) # initial statistical_data based on age and ascending order total_age = [] for i in range(21): total_age.append(str(i * 5) + '+') for each_age in total_age: statistical_data['prediction_data']['age'][each_age] = 0 statistical_data['feedback_data']['age']['wrong'][each_age] = 0 statistical_data['feedback_data']['age']['correct'][each_age] = 0 # photo by date today = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) total_number_of_photo_within_one_week = { (today - datetime.timedelta(days=6)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today - datetime.timedelta(days=5), '$gte': today - datetime.timedelta(days=6) } }), (today - datetime.timedelta(days=5)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today - datetime.timedelta(days=4), '$gte': today - datetime.timedelta(days=5) } }), (today - datetime.timedelta(days=4)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today - datetime.timedelta(days=3), '$gte': today - datetime.timedelta(days=4) } }), (today - datetime.timedelta(days=3)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today - datetime.timedelta(days=2), '$gte': today - datetime.timedelta(days=3) } }), (today - datetime.timedelta(days=2)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today - datetime.timedelta(days=1), '$gte': today - datetime.timedelta(days=2) } }), (today - datetime.timedelta(days=1)).strftime('%d/%m/%Y'): prediction_collection.count_documents({ 'date': { '$lt': today, '$gte': today - datetime.timedelta(days=1) } }), today.strftime('%d/%m/%Y'): prediction_collection.count_documents({'date': { '$gte': today }}), } # prediction total_prediction = prediction_collection.find({}) for each_prediction in total_prediction: prediction_results = each_prediction['predictionResults'] for each_result in prediction_results: statistical_data['prediction_number'] = statistical_data.get( 'prediction_number', 0) + 1 # age, gender, emotion each_age = str(5 * math.floor(each_result['age'] / 5)) + '+' statistical_data['prediction_data']['age'][each_age] = \ statistical_data['prediction_data']['age'].get(each_age, 0) + 1 statistical_data['prediction_data']['gender'][each_result['gender']] = \ statistical_data['prediction_data']['gender'].get(each_result['gender'], 0) + 1 statistical_data['prediction_data']['emotion'][each_result['emotion']] = \ statistical_data['prediction_data']['emotion'].get(each_result['emotion'], 0) + 1 # feedback total_feedback = feedback_collection.find({}) for each_feedback in total_feedback: feedback_content = each_feedback['content'] for each_content in feedback_content: statistical_data['feedback_number'] = statistical_data.get( 'feedback_number', 0) + 1 # age if each_content['ageCorrectness'] == False: statistical_data['feedback_data']['age']['wrong'][ each_content['ageFeedback']] = \ statistical_data['feedback_data']['age'][ 'wrong'].get(each_content['ageFeedback'], 0) + 1 else: statistical_data['feedback_data']['age']['correct'][ each_content['ageFeedback']] = \ statistical_data['feedback_data']['age']['correct'].get( each_content['ageFeedback'], 0) + 1 # gender if each_content['genderCorrectness'] == False: statistical_data['feedback_data']['gender']['wrong'][ each_content['genderFeedback']] = \ statistical_data['feedback_data']['gender'][ 'wrong'].get(each_content['genderFeedback'], 0) + 1 else: statistical_data['feedback_data']['gender']['correct'][ each_content['genderFeedback']] = \ statistical_data['feedback_data']['gender']['correct'].get( each_content['genderFeedback'], 0) + 1 if each_content['emotionCorrectness'] == False: statistical_data['feedback_data']['emotion']['wrong'][ each_content['emotionFeedback']] = \ statistical_data['feedback_data']['emotion']['wrong'].get( each_content['emotionFeedback'], 0) + 1 else: statistical_data['feedback_data']['emotion']['correct'][ each_content['emotionFeedback']] = \ statistical_data['feedback_data']['emotion']['correct'].get( each_content['emotionFeedback'], 0) + 1 result = { 'totalNumberOfPhotoUploaded': prediction_collection.count_documents({}), 'totalNumberOfPrediction': statistical_data['prediction_number'], 'totalNumberOfFeedback': statistical_data['feedback_number'], 'totalNumberOfAgePrediction': statistical_data['prediction_data']['age'], 'totalNumberOfAgeCorrectFeedback': statistical_data['feedback_data']['age']['correct'], 'totalNumberOfAgeWrongFeedback': statistical_data['feedback_data']['age']['wrong'], 'totalNumberOfGenderPrediction': statistical_data['prediction_data']['gender'], 'totalNumberOfGenderCorrectFeedback': statistical_data['feedback_data']['gender']['correct'], 'totalNumberOfGenderWrongFeedback': statistical_data['feedback_data']['gender']['wrong'], 'totalNumberOfEmotionPrediction': statistical_data['prediction_data']['emotion'], 'totalNumberOfEmotionCorrectFeedback': statistical_data['feedback_data']['emotion']['correct'], 'totalNumberOfEmotionWrongFeedback': statistical_data['feedback_data']['emotion']['wrong'], 'numberOfPhotoByDate': total_number_of_photo_within_one_week } return send_json_response(result, 200)
def check_expired_predictions(task, quotation): timestamp = quotation.ts if Providers.config().no_write: Prediction.get_expired(task, timestamp) else: ended_predictions = Prediction.get_expired(task, timestamp) if ended_predictions: # Формируем массив патернов для исключения повторения taken_patterns = {} for prediction in ended_predictions: # Закрываем стоимость прогноза prediction.expiration_cost = quotation.value prediction.expiration_ask = quotation.ask prediction.expiration_bid = quotation.bid # Получаем паттерн с массива полученных патернов if prediction.pattern_id not in taken_patterns: taken_patterns[ prediction.pattern_id] = prediction.pattern pattern = taken_patterns[prediction.pattern_id] # Рассчитываем аттрибуты стоимости pattern.calculation_cost_from_prediction(prediction) if quotation.value < prediction.created_cost: pattern.puts_count += 1 if pattern.trend < 0: pattern.trend -= 1 else: pattern.trend = -1 if pattern.trend_max_put_count < abs(pattern.trend): pattern.trend_max_put_count = abs(pattern.trend) if quotation.value > prediction.created_cost: pattern.calls_count += 1 if pattern.trend > 0: pattern.trend += 1 else: pattern.trend = 1 if pattern.trend_max_call_count < pattern.trend: pattern.trend_max_call_count = pattern.trend if quotation.value == prediction.created_cost: pattern.same_count += 1 pattern.trend = 0 if abs( pattern.trend ) >= task.setting.signaler_min_repeats and pattern.delay == 0: pattern.delay = task.setting.signaler_delay_on_trend if pattern.delay > 0: pattern.delay -= 1 Prediction.save_many(ended_predictions) # Обновляем паттерн и устанавливаем счетчики if len(taken_patterns) > 0: for item in taken_patterns: taken_patterns[item].update()
def post(self): """ return prediction results and save it to the database :return: prediction results """ from models.prediction import Prediction from config import deepface message = request.get_json(force=True) encoded = message['image'] decoded = base64.b64decode(encoded) image = Image.open(io.BytesIO(decoded)).convert('RGB') img, detections = deepface.analyze(image) # encode image and jsonify detections buffered = io.BytesIO() img.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()) base64_string = img_str.decode('utf-8') result = { 'img_str': base64_string, 'results': detections, 'message': '', 'status': 'success' } if len(detections) == 0: result[ 'message'] = "We’re not very sure of what this may be, could you try with another image", result['status'] = 'failure' elif len(detections) == 1: result['isShowId'] = 'false' if len(detections) > 0: formatted_prediction_results = [] for each in detections: age = each['age'] gender = each['gender'] emotion = each['emotion']['dominant'] emotion_score = each['emotion']['dominant_score'] formatted_prediction_results.append({ 'age': age, 'gender': gender, 'emotion': emotion, 'emotionScore': emotion_score }) # store results to db new_prediction = Prediction( **{ 'predictionResults': formatted_prediction_results, 'rawPredictionResults': detections, 'date': datetime.datetime.now(), }) new_prediction.save() return send_json_response(result, 200)
for group in group_repository.select_all(): player_teams = [] for team in group.teams: player_teams.append( player_team_repository.select_by_player_and_team(player_1, team)) player_group_repository.save( PlayerGroup(player_1, group.name, player_teams)) # Predictions for match in match_repository.select_all(): home_player_team = player_team_repository.select_by_player_and_team( player_1, match.team_1) away_player_team = player_team_repository.select_by_player_and_team( player_1, match.team_2) prediction_repository.save( Prediction(player_1, match, home_player_team, away_player_team)) # prediction_1 = Prediction(player_1, match_1, player_team_1, player_team_2) # prediction_repository.save(prediction_1) # Update # player_1.first_name = "Guillaume" # player_1.last_name = "Tell" # player_repository.update(player_1) # team_4.group_info["rank"] = 1 # team_repository.update(team_4) # group_1.name = "F" # group_repository.update(group_1) # match_1.set_goals(4,3)
def set_score(self, prediction: Prediction, score: float): prediction.bidaf_score = score
def run(task): analyzer = Analyzer(task) save_handle = False analysis_handle = False value_repeats = 0 max_value_repeats = 30 # Последнее пришедшее значение стоимости котировки last_quotation_value = 0 # Последнее зафиксированое время обработки last_fixed_ts = 0 while True: # Фиксируем настоящее время обработчика итерации time_now = int(time.time()) if not analyzer.thread_stream: analyzer.start_stream() if analyzer.quotation.value: # Счетчик устаревших данных котировок if last_quotation_value != analyzer.quotation.value: last_quotation_value = analyzer.quotation.value wait = False value_repeats = 1 else: wait = True value_repeats += 1 if max_value_repeats == value_repeats: analyzer.terminate_stream() value_repeats = 1 # Проверка возможности начать работу коллектора save_surplus_time = time_now % analyzer.task.setting.analyzer_collect_interval_sec if save_surplus_time == 0: save_handle = True # Проверка возможности начать работу анализатору surplus_time = time_now % analyzer.task.setting.analyzer_working_interval_sec if surplus_time == 0: analysis_handle = True # Перезагружаем настройки task.flush_setting() # Проверка на рабочее время инструмента if not task.setting.instrument.is_works(analyzer.quotation.ts): print("Рынок не работает") save_handle = False analysis_handle = False # Защита от повторного срабатывания секунды if last_fixed_ts < time_now and not wait: last_fixed_ts = time_now check_expired_predictions_thread = ExThread( target=Controller.check_expired_predictions, args=(task, analyzer.quotation)) check_expired_predictions_thread.task = task check_expired_predictions_thread.start() check_expired_signals = ExThread( target=Controller.update_expired_signals, args=(task, analyzer.quotation)) check_expired_signals.task = task check_expired_signals.start() if save_handle: # Устанавливаем настоящее время для котировки и сохраняем # Providers.telebot().send_quotation(task.setting.instrument.instrument + ": " # + str(analyzer.quotation.value)) analyzer.quotation.ts = time_now analyzer.quotation.save() print(analyzer.quotation.value) # Сохраняем свечи analyzer.save_candles() # Обновляем параметры стоимости прогнозов Prediction.calculation_cost_for_topical( task, analyzer.quotation) save_handle = False if analysis_handle: # Запускаем поток на анализ analysis_thread = ExThread(target=analyzer.do_analysis) analysis_thread.daemon = True analysis_thread.task = task analysis_thread.start() # Запускаем поток на проверку прогнозов analysis_handle = False time.sleep(0.5)