Beispiel #1
0
            def process_method_functions(path_to_methods):
                if os.path.isdir(path_to_methods):
                    method_folders = utils.get_all_sub_directory_names(
                        path_to_methods)
                    for method in method_folders:

                        # 1. Zip Lambda Function Deployable
                        method_folder_path = path_to_methods + '/' + method

                        function_name = utils.get_all_sub_directory_names(
                            method_folder_path)[
                                0]  # only one function per method
                        print('processing function: ' + function_name)
                        function_folder_path = method_folder_path + '/' + function_name

                        function_zip_path = function_folder_path + '/lambda_function.zip'
                        utils.zip_lambda_deployable(
                            function_folder_path + '/' +
                            utils.LAMBDA_FUNCTION_FILENAME, function_zip_path)

                        # 2. Update Terraform Template for Lambda Function
                        utils.append_new_line(TERRAFORM_TEMPLATE_PATH, '\n')
                        with open(TEMPLATE_LAMBDA_FUNCTION, 'r') as reader:
                            for line in reader:
                                # token replacement for line of template
                                line_to_append = line.replace(
                                    '__FUNCTION_NAME__',
                                    function_name).replace(
                                        '__FUNCTION_ZIP_PATH__',
                                        function_zip_path)
                                utils.append_new_line(TERRAFORM_TEMPLATE_PATH,
                                                      line_to_append)
Beispiel #2
0
 def gateway_rest_api_workflow():
     print('starting api gateway "rest api" workflow')
     with open(TEMPLATE_API_GATEWAY_REST_API, 'r') as reader:
         for line in reader:
             line_to_append = line.replace('__API_GATEWAY_REST_API_NAME__',
                                           API_GATEWAY_REST_API_NAME)
             utils.append_new_line(TERRAFORM_TEMPLATE_PATH, line_to_append)
Beispiel #3
0
        def process_methods(path_to_resource):
            path_to_methods = path_to_resource.rstrip(
                '/') + '/' + utils.METHODS_DIR_NAME
            path_to_resources = path_to_resource.rstrip(
                '/') + '/' + utils.RESOURCES_DIR_NAME
            resource_name = path_to_methods.split('/')[-2]
            if ("{" in resource_name) or ("}" in resource_name):
                resource_name = resource_name.replace('{', '').replace('}', '')

            # Process all methods for resource
            if os.path.isdir(path_to_methods):
                method_folders = utils.get_all_sub_directory_names(
                    path_to_methods)
                for method in method_folders:

                    # 1. Get Lambda Function Name for API Gateway Integration
                    method_folder_path = path_to_methods + '/' + method
                    function_name = utils.get_all_sub_directory_names(
                        method_folder_path)[0]  # only one function per method
                    print('processing api-gateway integration for function: ' +
                          function_name)

                    # 2. Update Terraform Template for AWS API Gateway Integration
                    utils.append_new_line(TERRAFORM_TEMPLATE_PATH, '\n')
                    with open(TEMPLATE_API_GATEWAY_INTEGRATION, 'r') as reader:
                        for line in reader:
                            # token replacement for line of template
                            line_to_append = line.replace(
                                '__API_GATEWAY_RESOURCE_NAME__',
                                resource_name).replace(
                                    '__API_GATEWAY_REST_API_NAME__',
                                    API_GATEWAY_REST_API_NAME).replace(
                                        '__API_GATEWAY_HTTP_METHOD__',
                                        method.upper()).replace(
                                            '__FUNCTION_NAME__', function_name)
                            utils.append_new_line(TERRAFORM_TEMPLATE_PATH,
                                                  line_to_append)

            # Recusively process all subresources
            if os.path.isdir(path_to_resources):
                subresources = utils.get_all_sub_directory_names(
                    path_to_resources)

                for subresource in subresources:
                    path_to_subresource = path_to_resources + '/' + subresource
                    process_methods(path_to_subresource)
Beispiel #4
0
        def process_resources(path_to_resources):

            # Get Parent Resource ID for Resource Template Token
            path_to_resources = path_to_resources.rstrip('/')
            parent_resource = path_to_resources.split('/')[-2]

            if parent_resource == ROOT_RESOURCE_NAME:
                parent_resource_id_variable = 'aws_api_gateway_rest_api.' + API_GATEWAY_REST_API_NAME + '.root_resource_id'
            else:
                if ("{" in parent_resource) or ("}" in parent_resource):
                    parent_resource = parent_resource.replace('{', '').replace(
                        '}', '')
                parent_resource_id_variable = 'aws_api_gateway_resource.' + parent_resource + '.id'

            if os.path.isdir(path_to_resources):
                resource_folders = utils.get_all_sub_directory_names(
                    path_to_resources)
                for resource in resource_folders:

                    # TODO: refactor/rename 'resources' to 'paths' to match actual meaning
                    path_part = resource
                    if ("{" in resource) or ("}" in resource):
                        resource = resource.replace('{', '').replace('}', '')

                    print('processing resource: ' + resource)

                    # 1. Update Terraform Template for API Gateway Resource
                    utils.append_new_line(TERRAFORM_TEMPLATE_PATH, '\n')
                    with open(TEMPLATE_API_GATEWAY_RESOURCE, 'r') as reader:
                        for line in reader:
                            # token replacement for line of template
                            line_to_append = line.replace(
                                '__API_GATEWAY_RESOURCE_NAME__',
                                resource).replace(
                                    '__API_GATEWAY_REST_API_NAME__',
                                    API_GATEWAY_REST_API_NAME).replace(
                                        '__PARENT_RESOURCE_ID_VARIABLE__',
                                        parent_resource_id_variable).replace(
                                            '__API_GATEWAY_PATH_PART__',
                                            path_part)
                            utils.append_new_line(TERRAFORM_TEMPLATE_PATH,
                                                  line_to_append)

                    # 2. Recurse through Subresources
                    subresources_folder_path = path_to_resources + '/' + path_part + '/' + utils.RESOURCES_DIR_NAME
                    process_resources(subresources_folder_path)
Beispiel #5
0
    def layers_worfklow():
        layers_directories = utils.get_all_sub_directory_names(
            LAMBDA_LAYERS_DIR)
        for layer in layers_directories:
            # 1. Zip Lambda Layer Deployable
            zip_path = LAMBDA_LAYERS_DIR + '/' + layer + '/layer'
            target_dir = LAMBDA_LAYERS_DIR + '/' + layer + '/python'

            utils.zip_lambda_deployable(target_dir, zip_path)

            # 2. Update Terraform Template for Lambda Layer
            utils.append_new_line(TERRAFORM_TEMPLATE_PATH, '\n')
            with open(TEMPLATE_LAMBDA_LAYER, 'r') as reader:
                for line in reader:
                    # token replacement for line of template
                    line_to_append = line.replace('__LAYER_NAME__',
                                                  layer).replace(
                                                      '__LAYER_ZIP_PATH__',
                                                      zip_path + '.zip')
                    utils.append_new_line(TERRAFORM_TEMPLATE_PATH,
                                          line_to_append)
Beispiel #6
0
def train(args, env):
    for e in range(episodes):

        for agent in agents: agent.reset_episode_infos()

        first_obs = np.array(env.reset())*0.01
        current_obs = first_obs

        if e % args.save_rate == args.save_rate - 1:
            env.eng.set_save_replay(True)
            env.eng.set_replay_file("replay_%s.txt" % e)
        else:
            env.eng.set_save_replay(False)

        episodes_rewards = [0] * n_agents
        episodes_decision_num = [0] * n_agents

        i = 0
        while i < args.steps:
            
            ### Requsita nova ação (phase + time) quando acaba o tempo da ação atual
            for agent_id, agent in enumerate(agents):
                agent_obs = current_obs[agent_id]
                if agent.episode_action_time <= i:
                    if agent.episode_action_time == i:
                        agent.change_phase()
                        
                        initial_phase = agent.actual_phase
                        a_phase = initial_phase
                        obs_te = env.world.get_state_of_three_by_phase(agent.I,a_phase)
                        while obs_te[0] == 0:
                            agent.change_phase() 
                            a_phase = agent.actual_phase
                            obs_te = env.world.get_state_of_three_by_phase(agent.I,a_phase)
                            if initial_phase == a_phase:
                                break

                        
                        agent.replay() 
                        #agent.action_time = -1
                        #print(i,agent.get_phase())

                    if agent.episode_action_time+yellow_phase_time+offset_phase <= i:
                        
                        #print(first_obs[agent_id], agent_obs)
                        #print("----")
                        first_obs[agent_id] = agent_obs
                        
                        time = agent.get_action(first_obs[agent_id])
                        agent.action_time = time
                        agent.episode_action_time += (time+1)*5 ## Parte de 0 segundos + tempo decidido pelo modelo (0,5,10,15,20...)
                        phase = agent.I.current_phase
                        #print(i,agent_obs,time,phase,agent.actual_phase)
                        #print(time)

            ### Para cada action interval
            for _ in range(args.action_interval):
                actions = [agent.get_phase() for agent in agents]
                current_obs, current_rewards, dones, current_info = env.step(actions)
                current_obs = np.array(current_obs)*0.01
                i += 1
                
                #u.append_new_line_states(file_name+"_0",[e,i,first_obs,current_obs,[agents[0].get_phase(),agents[0].I.current_phase],[current_rewards[0],agents[0].real_reward(first_obs[0],current_obs[0])]])
                
                for agent_id, agent in enumerate(agents):


                    reward = agent.real_reward(first_obs[agent_id],current_obs[agent_id])
                    #print(reward,current_rewards[agent_id])

                    agent.current_reward.append(current_rewards[agent_id]) if flag_default_reward else agent.current_reward.append(reward) 

                    if agent.episode_action_time+yellow_phase_time+offset_phase == i:
                        action_time = agent.action_time

                        agent_reward = np.mean(agent.current_reward) if flag_mean_reward else agent.current_reward[-yellow_phase_time]
                        #print('----------------')
                        #print("Reward: ", agent_reward,"; min:",np.min(agent.current_reward),"; Méd:",np.mean(agent.current_reward),"; Max:",np.max(agent.current_reward),"; Contagem:",len(agent.current_reward) )
                        #print('----------------')
                        agent.current_reward = []

                        phase = agent.actual_phase
                        next_p = agent.next_phase(phase)

                        u.append_new_line(file_name+f"_{agent_id}",[[first_obs[agent_id],phase], action_time, agent_reward, [current_obs[agent_id],next_p],e,i])
                        ob = first_obs[agent_id].tolist()
                        nob = current_obs[agent_id].tolist()
                        agent.remember( [ob,phase] , action_time, agent_reward, [nob,next_p])
                            
                        episodes_rewards[agent_id] += agent_reward
                        episodes_decision_num[agent_id] += 1

        if agent.total_decision > agent.learning_start:
            agent.decay_epsilon()
            #agent.replay()
            agent.update_target_network()
        #if agent.total_decision > agent.learning_start and not(agent.total_decision%agent.update_target_model_freq) :

        if not (e % args.save_rate):
            if not os.path.exists(args.save_dir):
                os.makedirs(args.save_dir)
            for agent in agents:
                agent.save_model(args.save_dir)
                
        eval_dict = {}

        logger.info(f"episode:{e}/{episodes-1}, steps:{i}")
        eval_dict["episode"]=e
        eval_dict["steps"]=i

        for metric in env.metric:
            logger.info(f"{metric.name}: {metric.eval()}")
            eval_dict[metric.name]=metric.eval()

        for agent_id, agent in enumerate(agents):
            logger.info(f"agent:{agent_id}, epsilon:{agent.epsilon}, mean_episode_reward:{episodes_rewards[agent_id] / episodes_decision_num[agent_id]}")

        eval_dict["epsilon"]=agents[0].epsilon
        eval_dict["mean_episode_reward"]=episodes_rewards[0] / episodes_decision_num[0]
        
        u.wand_log(eval_dict)

    logger.info("Parametros Utilizados")
    agent = agents[0]
    #logger.info(f"BUFFER: buffer_size:{agent.buffer_size}; batch_size:{agent.batch_size}; learning_start:{agent.learning_start};")
    #logger.info(f"MODEL UPDATE: update_model_freq:{agent.update_model_freq}; update_target_model_freq:{agent.update_target_model_freq};")
    #logger.info(f"LEARNING: gamma:{agent.gamma}; epsilon:{agent.epsilon_start}; epsilon_min:{agent.epsilon_min}; epsilon_decay:{agent.epsilon_decay}; learning_rate:{agent.learning_rate};")
    logger.info(f"PHASE: n_phases:{agent.n_phases}; start_phase:{agent.start_phase};")
    logger.info(f"TRAINING: total_decision:{agent.total_decision};")
    #logger.info(f"ACTIVATION: activation:{agent.activation};")
    logger.info(f"STATE: ob_generator:{agent.ob_generator.fns[0]};")
    logger.info(f"REWARD: reward_generator:{agent.reward_generator.fns[0]};")
    logger.info(str(info_file))
Beispiel #7
0
def train(args, env):
    total_decision_num = 0
    for e in range(episodes):
        
        last_obs = env.reset()
        if e % args.save_rate == args.save_rate - 1:
            env.eng.set_save_replay(True)
            env.eng.set_replay_file("replay_%s.txt" % e)
        else:
            env.eng.set_save_replay(False)
        episodes_rewards = [0 for i in agents]
        episodes_decision_num = 0
        i = 0

        while i < args.steps:

            if i % action_interval == 0:
                actions = []
                for agent_id, agent in enumerate(agents):
                    if total_decision_num > agent.learning_start:
                        actions.append(main_agent.get_action(last_obs[agent_id]))
                    else:
                        actions.append(main_agent.sample())

                rewards_list = []
                for _ in range(action_interval):
                    obs, rewards, dones, _ = env.step(actions)
                    i += 1
                    rewards_list.append(rewards)

                rewards = np.mean(rewards_list, axis=0)

                for agent_id, agent in enumerate(agents):
                    u.append_new_line(file_name+f"_{agent_id}",[[last_obs[agent_id],-1], actions[agent_id], rewards[agent_id], [obs[agent_id],-1],e,i])
                    main_agent.remember(last_obs[agent_id], actions[agent_id], rewards[agent_id], obs[agent_id])
                    episodes_rewards[agent_id] += rewards[agent_id]
                    episodes_decision_num += 1
                    
                total_decision_num += 1
                last_obs = obs

                #for agent_id, agent in enumerate(agents):
                if total_decision_num > main_agent.learning_start and total_decision_num % main_agent.update_model_freq == main_agent.update_model_freq - 1:
                    main_agent.replay()
                if total_decision_num > main_agent.learning_start and total_decision_num % main_agent.update_target_model_freq == main_agent.update_target_model_freq - 1:
                    main_agent.update_target_network()
                        
            if all(dones):
                break

        if e % args.save_rate == args.save_rate - 1:
            if not os.path.exists(args.save_dir):
                os.makedirs(args.save_dir)
            
            main_agent.save_model(args.save_dir)

        eval_dict = {}

        logger.info(f"episode:{e}/{episodes-1}, steps:{i}")
        eval_dict["episode"]=e
        eval_dict["steps"]=i
            
        for agent_id, agent in enumerate(agents):
            logger.info("\tagent:{}, mean_episode_reward:{}".format(agent_id, episodes_rewards[agent_id] / episodes_decision_num))
        
        for metric in env.metric:
            logger.info(f"\t{metric.name}: {metric.eval()}")
            eval_dict[metric.name]=metric.eval()

        eval_dict["epsilon"]=main_agent.epsilon
        eval_dict["mean_episode_reward"]=episodes_rewards[0] / episodes_decision_num
        
        u.wand_log(eval_dict)
        
    #for agent in agents:
    main_agent.save_model(args.save_dir)
Beispiel #8
0
from podparser.parser import Parser
from utils import append_new_line
import re

p = Parser(config='1861-1862/config', dir_path='1861-1862/', verbose = False, start = 71, end = 340)
# p = Parser(config='1861-1862/config', dir_path='1861-1862/', verbose = False, start = 340, end = 340)
dir = p.run_parser()
for page in dir.pages:
    for entry in page.entries:
        entry = re.sub(r"\n$", "", str(entry))
        entry = "Page {0}\t".format(page.number) + entry
        append_new_line('1861-1862/general-directory.txt', entry)