def main(): start_time = datetime.now() log_func.info(f"Start time {start_time}") conn = psycopg2.connect( dbname=get_env("db_name"), user=get_env("db_user"), password=get_env("db_password"), host=get_env("db_url"), ) cursor = conn.cursor() create_tables(conn, cursor) try: cursor.execute(f"SELECT * FROM {LAST_ROW_TABLE};") buf = cursor.fetchall() file_year = buf[0][0] row_number = buf[0][1] except Exception as e: log_func.warning(f"Cannot get data from {LAST_ROW_TABLE}: {e}") file_year = YEARS[0] row_number = 0 conn.commit() log_func.info( f"Starting inserting from {row_number} row from file for {file_year} year" ) if file_year: index = YEARS.index(file_year) for year in YEARS[index:]: insert_data( conn, cursor, f"Odata{year}File.csv", year, row_number, start_time ) row_number = 0 else: for year in YEARS: insert_data( conn, cursor, f"Odata{year}File.csv", year, row_number, start_time ) row_number = 0 get_user_query(cursor) inserting_time = get_previous_run_time(cursor) end_time = datetime.now() log_func.info(f"End time {end_time}") log_func.info(f"Inserting executing time {inserting_time}") cursor.close() conn.close() log_func.info("Program is finished")
def __init__(self): self.sender = SlackMessageSender() self.job = get_env('JOB_ID') self.run = get_env('GITHUB_RUN_ID') self.repository = get_env('GITHUB_REPOSITORY') self.sha = get_env('GITHUB_SHA') self.token = get_env('GITHUB_TOKEN') self.workflow_name = get_env('GITHUB_WORKFLOW') self.root_dir = os.path.join(os.path.dirname(__file__), '../')
def main(): start_time = datetime.now() log_func.info(f"Start time {start_time}") client = MongoClient(port=int(get_env("PORT"))) db = client["lab4"] col1, col2 = create_tables(db) try: last_row = col2.find_one() row_number = last_row["rows"] file_year = last_row["year"] except Exception as e: log_func.warning(f"Cannot get data from {LAST_ROW_TABLE}: {e}") file_year = YEARS[0] row_number = 0 log_func.info( f"Starting inserting from {row_number} row from file for {file_year} year" ) if file_year: index = YEARS.index(file_year) for year in YEARS[index:]: insert_data( col1, col2, f"Odata{year}File.csv", year, row_number, start_time ) row_number = 0 else: for year in YEARS: insert_data( col1, col2, f"Odata{year}File.csv", year, row_number, start_time ) row_number = 0 get_user_query(col1) inserting_time = col2.find_one() end_time = datetime.now() log_func.info(f"End time {end_time}") log_func.info( f"Inserting executing time {timedelta(microseconds=inserting_time['execution_time'])}" ) log_func.info("Program is finished")
def evaluate(self, env_agent, player_one_char, n_episodes): start_time = time.time() states = len(self.test_agent.board_states) env_states = len(env_agent.board_states) if self.verbose: print("\n\n --- Evaluating vs {}. First Player: {}. Episodes: {}.". format(env_agent.name, player_one_char, n_episodes)) win_count = 0 loss_count = 0 draw_count = 0 invalid_count = 0 # This function will only work for a single Environment env = get_env(self.test_agent.obs_format, env_agent, player_one_char, self.test_agent.rewards) if isinstance(env_agent, RLAgent): if type(env) is TicTacToeEnv: core_env = env else: core_env = env.env # print("env:", core_env) # print("env id:", hex(id(core_env))) # print("env.self_play = True") core_env.self_play = True all_episode_rewards = [] for episode in range(1, n_episodes + 1): episode_rewards = [] done = False obs = env.reset() if self.verbose: print("\n\nGame number {}".format(episode)) while not done: if self.verbose: env.render() action = self.test_agent.play(obs) obs, reward, done, info = env.step(action) episode_rewards.append(reward) # if done and info['outcome'] == TicTacToeEnv.INVALID: if self.verbose: naught_action = -1 if 'naught_action' in info: naught_action = info['naught_action'] info_str = "CROSS: {:1d} | NAUGHT: {:2d} | Reward: {:2.0f}".format( action, naught_action, reward) if done: info_str = "{} | Outcome: {} | First Player: {}".format( info_str, info['outcome'], info['player_one']) print(info_str) env.render() print() else: print(info_str) all_episode_rewards.append(sum(episode_rewards)) if self.verbose and episode == int(n_episodes / 2): mean_reward = np.mean(all_episode_rewards) print("Episode: {:6d} | Mean reward: {:5.2f}".format( episode, mean_reward)) outcome = info['outcome'] if outcome == TicTacToeEnv.CROSS: win_count += 1 elif outcome == TicTacToeEnv.DRAW: draw_count += 1 elif outcome == TicTacToeEnv.NAUGHT: loss_count += 1 elif outcome == TicTacToeEnv.INVALID: invalid_count += 1 mean_reward = np.mean(all_episode_rewards) if self.verbose: print("Episode: {:6d} | Mean reward: {:5.2f}".format( n_episodes, mean_reward)) total_states = len(self.test_agent.board_states) new_states = total_states - states env_agent_states = len(env_agent.board_states) new_env_states = env_agent_states - env_states if not isinstance(env_agent, RLAgent): board_states = OrderedDict() board_states['env_agent'] = env_agent.name board_states['player_one'] = player_one_char board_states['current_idx'] = self.current_idx board_states['env_agent_states'] = env_agent_states board_states['env_states'] = env_agent.board_states.copy() self.all_board_states.append(board_states) win_perc = win_count * 100.0 / n_episodes draw_perc = draw_count * 100.0 / n_episodes loss_perc = loss_count * 100.0 / n_episodes invalid_perc = invalid_count * 100.0 / n_episodes partial_score = None if isinstance(env_agent, RandomAgent): partial_score = self.WIN_SCORE * win_perc \ + self.DRAW_SCORE_RANDOM * draw_perc \ + self.LOSS_SCORE * loss_perc \ + self.INVALID_SCORE * invalid_perc elif isinstance(env_agent, MinMaxAgent): partial_score = self.WIN_SCORE * draw_perc \ + self.LOSS_SCORE * loss_perc \ + self.INVALID_SCORE * invalid_perc _, test_time_h = get_elapsed_time(time.time(), start_time) return [ win_perc, draw_perc, loss_perc, invalid_perc, float(mean_reward), partial_score, test_time_h, new_states, total_states, new_env_states, env_agent_states ]
def __init__(self): self.slack_webhook_url = get_env('SLACK_WEBHOOK_URL') self.force_send = os.environ.get('FORCE_SEND', 'false') self.ctx = SlackMessageSender._create_non_verifying_context() self.branch_or_tag = get_env('GITHUB_REF')
def train(alg, obs_format, env_agent: Agent, self_play: bool, train_episodes=10000, eval_freq=1000, player_one_char='-', gamma=1.0, net_arch=[64, 128], filter_size=3, pad='SAME', rewards=TicTacToeEnv.DEFAULT_REWARDS, env_exploration_rate=0.0, n_envs=1, tensorboard_log=None): now = datetime.now() env_agent_name = env_agent.name if self_play: env_agent_name = "Self" if alg.__name__ == "DQN": n_envs = 1 if obs_format != OBS_FORMAT_2D and obs_format != OBS_FORMAT_2D_FLAT: filter_size = 0 pad = 0 params = OrderedDict() params['alg'] = alg.__name__ params['env_agent'] = env_agent_name params['train_episodes'] = train_episodes params['eval_freq'] = eval_freq params['obs_format'] = obs_format params['env_exploration_rate'] = env_exploration_rate params['n_envs'] = n_envs params['gamma'] = gamma params['net_arch'] = net_arch params['filter_size'] = filter_size params['pad'] = pad params['r_win'] = rewards[0] params['r_draw'] = rewards[1] params['r_still_playing'] = rewards[2] params['r_lose'] = rewards[3] params['r_invalid'] = rewards[4] params['player_one_char'] = player_one_char params['datetime'] = now.replace(microsecond=0).isoformat() net_arch_str = '-'.join([str(elem) for elem in net_arch]) # rewards_str = '-'.join([str(elem) for elem in rewards]) log_dir = "logs/{}_{}/{}_{}_{}_{}_{}_{}_{}_{}_{}".format( alg.__name__, env_agent_name, now.strftime('%y%m%d-%H%M%S'), train_episodes, obs_format, env_exploration_rate, n_envs, gamma, net_arch_str, filter_size, pad) os.makedirs(log_dir, exist_ok=True) print("\nLog dir:", log_dir) with open(log_dir + "/params.json", "w") as f: json.dump(params, f, indent=4) train_env = get_env(obs_format, env_agent, player_one_char, rewards, env_exploration_rate, monitor=True, n_envs=n_envs) policy_network = "MlpPolicy" policy_kwargs = None if obs_format == OBS_FORMAT_2D or obs_format == OBS_FORMAT_2D_FLAT: policy_network = "CnnPolicy" policy_kwargs = { 'cnn_extractor': tic_tac_toe_cnn, 'cnn_arch': net_arch, 'filter_size': filter_size, 'pad': pad } if alg.__name__ == "PPO2": if not policy_kwargs: policy_kwargs = {'net_arch': net_arch} model = alg(policy_network, train_env, gamma=gamma, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=0) elif alg.__name__ == "DQN": if not policy_kwargs: policy_kwargs = {'layers': net_arch, 'dueling': True} else: policy_kwargs['dueling'] = True model = alg(policy_network, train_env, gamma=gamma, policy_kwargs=policy_kwargs, prioritized_replay=True, double_q=True, tensorboard_log=tensorboard_log, verbose=0) max_train_timesteps = train_episodes * 9 with PlotTestSaveCallback(train_episodes, eval_freq, log_dir, alg.__name__, self_play, train_env) as callback: model.learn(max_train_timesteps, callback=callback, log_interval=eval_freq)
from rest_framework.views import APIView from django.http import JsonResponse from utils.utils import get_env from utils.test_selenium import test import os apiBaseKey = get_env("apiBaseKey",'astrit123') print(apiBaseKey) class API(APIView): def post(self,request): apiKey = request.data['apiKey'] if apiKey == apiBaseKey: test() return JsonResponse({'status':True,'msg':'Succesfully submited ticket'},status=200) return JsonResponse({'status':'False','msg':'Cannot submit ticket','data':{}},status=400)
def _test_module_message(self): python_version = get_env('PYTHON_VERSION') test_module = get_env('TEST_MODULE') return f"(Python *{python_version}*, Test Module *{test_module}*)"
def _reports_message(self): reports_url = get_env('REPORTS_URL') return f"Full reports can be found <{reports_url}|here> "