Esempio n. 1
0
def create_two_layer_dense_model():
    a = Config(config_dict=None,
               standard_key_list=TwoLayerDenseModel.standard_key_list)
    a.load_config(path=CONFIG_PATH +
                  '/model/testTwoLayerDenseAttackConfig.json')
    model = TwoLayerDenseModel(config=a)
    return model
Esempio n. 2
0
 def __init__(self):
     self._config = Config()
     self._db_server = self._config.get_db_server()
     self._db_port = self._config.get_db_port()
     self._db_database = self._config.get_db_database()
     self._client = MongoClient(self._db_server, self._db_port)
     self._date_helper = DateHelper()
Esempio n. 3
0
    def enviar_email_notificacao(self,
                                 cc: List[str] = None,
                                 nome: str = '',
                                 grupo: str = ''):

        if self.__status == False:
            return

        reports = get_info('reports', grupo)

        assunto = Config.get('email', grupo, 'assunto')
        to = [Config.get('email', grupo, 'email')]
        conteudo = Config.get('email', grupo, 'conteudo')

        # Cria o cabecalho da mensagem
        if cc is None:
            cc = []
        msg = MIMEMultipart('related; charset=utf-8')
        msg['Subject'] = f'{assunto}'
        msg['From'] = self.__from
        msg['To'] = ','.join(to)
        msg['Cc'] = ','.join(cc)

        # Carrega o arquivo de template
        template = str(Path(assets.__path__[0]) / 'email_notificacao.html')
        msg.attach(self.__carregar_template(template, nome, conteudo))
        for i in reports:
            msg.attach(self.__carregar_anexo(i))
            os.remove(i)
            print('removeu')

        # Envia o email
        self.__enviar_email(to + cc, msg)
Esempio n. 4
0
    def __init__(self):
        logger.info("Initializing Trello crawler")

        factory_controller = FactoryController()
        self._config = Config()
        self._request_helper = RequestHelper()
        self._date_helper = DateHelper()

        self._trello_card_controller = factory_controller.get_trello_card()
Esempio n. 5
0
def create_data():
    conf = Config(standard_key_list=FISData.standard_key_list,
                  config_dict=None)
    config = utils.load_json(file_path=CONFIG_PATH +
                             '/data/testFisAttackDataConfig.json')
    config['FILE_PATH'] = DATASET1_PATH + '/Attack.csv'
    conf.load_config(path=None, json_dict=config)
    data = FISData(config=conf)
    data.load_data()
    return data
def load_all_config(config_path):
    res = []
    dir_list = glob.glob(pathname=config_path + '/**/*.json', recursive=True)
    for dir in dir_list:
        conf = Config.load_json(file_path=dir)
        res.append((dir, conf))
    return res
def abstractToMatlab(event_list, output_file):
    UI().objectUI.showMessage("Starting to write mat file", "w")
    file_data = {}
    c = Config()

    if c.matlab:
        struct_type = c.config_data["matlab"]["default"]
    else:
        struct_type = UI().objectUI.chooseWindow(
            "Which struct type do you prefer? ", cte.MATLAB_TYPES_ADMITTED)

    arrays = [[], [], [], []]
    for ev in event_list:
        arrays[0].append(double(ev.x))
        arrays[1].append(double(ev.y))
        arrays[2].append(double(ev.pol))
        arrays[3].append(double(secsToNsecs(ev.ts)))

    # 1 struct
    if struct_type == cte.MATLAB_TYPES_ADMITTED[0]:
        if c.matlab:
            struct_name = c.config_data["matlab"]["1 struct"]["struct_name"]
            data_names = c.config_data["matlab"]["1 struct"]["names"]
        else:
            struct_name = UI().objectUI.simpleInput(
                "What is the name of the struct?: ")
            data_names = UI().objectUI.multiInputsWindow(
                "How are the names of these parameters",
                cte.MATLAB_STRUCT_NAMES)

        file_data[struct_name] = {}
        for arr, name in zip(arrays, data_names):
            file_data[struct_name][name] = arr

    # Matrix nx4
    elif struct_type == cte.MATLAB_TYPES_ADMITTED[1]:
        if c.matlab:
            struct_name = c.config_data["matlab"]["Matrix nx4"]["struct_name"]
        else:
            struct_name = UI().objectUI.simpleInput(
                "What is the name of the struct?: ")

        file_data[struct_name] = column_stack(
            (arrays[0], arrays[1], arrays[2], arrays[3]))

    # 4 structs (one for each event's parameter)
    elif struct_type == cte.MATLAB_TYPES_ADMITTED[2]:
        if c.matlab:
            struct_names = c.config_data["matlab"]["4 structs"]["names"]
        else:
            struct_names = UI().objectUI.multiInputsWindow(
                "How are the names of these structs", cte.MATLAB_STRUCT_NAMES)

        for arr, name in zip(arrays, struct_names):
            file_data[name] = arr

    UI().objectUI.showMessage(
        "Starting to save the data (no progress bar available)", "w")
    sio.savemat(output_file, file_data, oned_as="column")
    UI().objectUI.showMessage("Finishing writing the mat file", "c")
Esempio n. 8
0
def abstractToRosbag(event_list, output_file):
    UI().objectUI.showMessage("Starting to write bag file", "w")
    bag = rosbag.Bag(output_file, "w")
    c = Config()

    if c.rosbag:
        topic = c.config_data["rosbag"]["topic"]
    else:
        topic = UI().objectUI.simpleInput(
            "Introduce the name of the topic where the events are going to be write: "
        )

    num_progress = getNumProgress(len(event_list))
    for i, event in enumerate(event_list):
        if i % num_progress == 0:
            UI().objectUI.sumProgress()

        e = _Event()
        e.x = event.x
        e.y = event.y
        e.polarity = event.pol
        e.ts = rospy.Time.from_sec(event.ts)

        bag.write(topic, e)

    UI().objectUI.sumProgress(True)
    bag.close()
    UI().objectUI.showMessage("Finishing writing the bag file", "c")
class ElectricalCarQLearningModel(TabularQLearningModel):
    key_list = Config.load_json(file_path=CONFIG_KEY + '/tabularQLearingModelKey.json')

    def return_table_value(self, action, state):
        return self.q_table[action][state[0]][state[1]][state[4]][state[5]][state[6]][state[7]][state[8]]

    def set_table_value(self, action, state, val):
        self.q_table[action][state[0]][state[1]][state[4]][state[5]][state[6]][state[7]][state[8]] = val
Esempio n. 10
0
class DBHelper:
    def __init__(self):
        self._config = Config()
        self._db_server = self._config.get_db_server()
        self._db_port = self._config.get_db_port()
        self._db_database = self._config.get_db_database()
        self._client = MongoClient(self._db_server, self._db_port)
        self._date_helper = DateHelper()

    def _open(self, colletion_name):
        db = self._client[self._db_database]
        collection = db[colletion_name]
        return collection

    def _close(self):
        self._client.close()

    def insert(self, colletion_name, item):
        collection = self._open(colletion_name)
        result = collection.insert(item)
        self._close()
        return result

    def get(self, colletion_name, filters, sort=[["_id", 1]]):
        collection = self._open(colletion_name)
        result = collection.find(filters).sort(sort)
        items = list(result)
        self._close()
        return items

    def update(self, colletion_name, filters, entity):
        collection = self._open(colletion_name)
        entity["$set"]["updated_date"] = self._date_helper.now()
        result = collection.update_many(filters, entity)
        self._close()
        return result

    def delete(self, colletion_name, filters):
        collection = self._open(colletion_name)
        result = collection.delete_many(filters)
        self._close()
        return result

    @staticmethod
    def to_object_id(id):
        return ObjectId(id)
Esempio n. 11
0
 def __init__(self, parameters_file, obs_bound):
     self.par = Config.load_json(file_path=parameters_file)
     self.degree = int(self.par[0])
     self.coef = self.par[1: len(self.par) - 1]
     self.intec = self.par[len(self.par) - 1]
     self.poly_feature = PolynomialFeatures(degree=self.degree)
     self.obs_low = obs_bound['STATE_LOW']
     self.obs_high = obs_bound['STATE_HIGH']
Esempio n. 12
0
def main():

    UI("terminal")

    Config("src/config/config.json")

    # testFileAndType("data/aedat/aedat4/Cars_sequence.aedat4", "aedat")

    testTypes("aedat", "aedat")
Esempio n. 13
0
 def __init__(self, config, data=None):
     super(Model, self).__init__(config)
     self.config = config
     if not self.config:
         self.config = Config(standard_key_list=[], config_dict={})
     self.data = data
     self.input = None
     self.delta_state_output = None
     self.snapshot_var = []
     self.save_snapshot_op = []
     self.load_snapshot_op = []
Esempio n. 14
0
def get_info(raiz: str, grupo: str):
    items = [Config.get(raiz, grupo)]
    lista = []
    for i in items:
        lista = (list(i.keys()))

    if lista:
        lista_url = []
        for i in lista:
            lista_url.append(Config.get(raiz, grupo, i))

        return lista_url


# def get_info(raiz: str, items: List[str]):
#     if items:
#         lista_url = []
#         for i in items:
#             lista_url.append(Config.get(raiz, i))
#
#         return lista_url
Esempio n. 15
0
    def __init__(self, ):

        self.__host = Config.get('mailer', 'server')
        self.__port = Config.get('mailer', 'port')
        self.__ttls = Config.get('mailer', 'ttls') == 'true'
        self.__status = Config.get('mailer', 'status')
        #
        self.__user = Config.get('mailer', 'login')
        self.__pass = Config.get('mailer', 'password')
        self.__from = Config.get('mailer', 'from')
def _basic_init(env_id, bound_file):
    real_env = make_env(env_id)
    test_env = make_env(env_id)
    import config as cfg

    if 'SWIMMER_HORIZON' in cfg.config_dict and env_id == 'Swimmer-v1':
        real_env._max_episode_steps = cfg.config_dict['SWIMMER_HORIZON']
    bound = Config.load_json(file_path=bound_file)

    action_bound = (np.array(bound['ACTION_LOW']),
                    np.array(bound['ACTION_HIGH']))
    obs_bound = (np.array(bound['STATE_LOW']), np.array(bound['STATE_HIGH']))

    return real_env, test_env, bound, action_bound, obs_bound
class IntelligentRandomTrainerAgent(Agent):
    key_list = Config.load_json(file_path=CONFIG_KEY + '/intelligentRandomTrainerAgentKey.json')

    def __init__(self, config, model, env):
        super(IntelligentRandomTrainerAgent, self).__init__(config=config,
                                                            model=model,
                                                            env=env)
        self.sample_count = 0
        self.sess = tf.get_default_session()
        self.action_space = MultiDiscrete([[0, 1], [0, 1], [0, 1]])

    def predict(self, state, *args, **kwargs):
        res = self.action_space.sample()
        for i in range(3):
            prob = np.random.rand(1)
            if prob <= 0.5:
                res[i] = 1.0
            else:
                res[i] = 0.2
        self.sample_count += 1
        return np.array(res)

    def update(self):
        # TODO finish your own update by using API with self.model
        pass
        # self.model.update()

    def store_one_sample(self, state, next_state, action, reward, done, *arg, **kwargs):
        # TODO store the one sample to whatever you want

        # self.model.store_one_sample(state=state,
        #                             next_state=next_state,
        #                             action=action,
        #                             reward=reward,
        #                             done=done)
        self.log_file_content.append({
            'STATE': np.array(state).tolist(),
            'NEW_STATE': np.array(next_state).tolist(),
            'ACTION': np.array(action).tolist(),
            'REWARD': reward,
            'DONE': done,
            'INDEX': self.log_print_count
        })
        self.log_print_count += 1

    def init(self):
        # TODO init your agent and your model
        # this function will be called at the start of the whole train process
        # self.model.init()
        pass
Esempio n. 18
0
class BaselineTrainerAgent(Agent):
    key_list = Config.load_json(file_path=CONFIG_KEY +
                                '/baselineTrainerAgentKey.json')

    def __init__(self, config, model, env):
        super(BaselineTrainerAgent, self).__init__(config=config,
                                                   model=model,
                                                   env=env)

    def predict(self, state, *args, **kwargs):
        if self.assigned_action is not None:
            ac = list(self.assigned_action)
            self.assigned_action = None
            state = np.reshape(state, [1, -1])
            re = np.array(self.model.predict(state=state))
            if len(re) > len(ac):
                for i in range(len(ac), len(re)):
                    ac.append(re[i])
            return np.array(ac)
        else:
            ac = np.array(self.model.predict(state=state))
            if 'F1=0' in cfg.config_dict and cfg.config_dict['F1=0'] is True:
                ac[0] = 0.0
            if 'F2=0' in cfg.config_dict and cfg.config_dict['F2=0'] is True:
                ac[1] = 0.0
            return ac

    def init(self):
        self.model.init()
        super().init()

    def store_one_sample(self, state, next_state, action, reward, done, *arg,
                         **kwargs):
        # TODO store the one sample to whatever you want

        self.model.store_one_sample(state=state,
                                    next_state=next_state,
                                    action=action,
                                    reward=reward,
                                    done=done)
        self.log_file_content.append({
            'STATE': np.array(state).tolist(),
            'NEW_STATE': np.array(next_state).tolist(),
            'ACTION': np.array(action).tolist(),
            'REWARD': reward,
            'DONE': done,
            'INDEX': self.log_print_count
        })
        self.log_print_count += 1
Esempio n. 19
0
def abstractToAedat(event_list, output_file):
    c = Config()

    if c.aedat:
        version = c.config_data["aedat"]["version"]
    else:
        version = UI().objectUI.chooseWindow("Choose a version: ",
                                             cte.AEDAT_ACCEPTED_VERSIONS)

    if version == cte.AEDAT_ACCEPTED_VERSIONS[0]:
        abstractToAedat2(event_list, output_file)
    elif version == cte.AEDAT_ACCEPTED_VERSIONS[1]:
        abstractToAedat3(event_list, output_file)
    elif version == cte.AEDAT_ACCEPTED_VERSIONS[2]:
        abstractToAedat4(event_list, output_file)
Esempio n. 20
0
    def __init__(self, db_name: str = 'data_main'):
        """Constructor for Mongo()

        :param db_name: default database overwrite, defaults to 'dat_main'
        :type db_name: str, optional
        """

        config = Config()

        client = pymongo.MongoClient(config.MONGO_URI)
        logger.info(f"Connected to MongoDB with {config.MONGO_URI}")
        db = client.get_database(db_name)
        logger.info(f"Connected to database {db_name}")

        self._client = client
        self._db = db
Esempio n. 21
0
class CryptoHelper:
    def __init__(self):
        logger.info("Initialize CryptoHelper")
        self._config = Config()

    def to_sha256(self, text):
        logger.info("Initialize encoding")

        text_encoded = text.encode()
        salt = self._config.get_salt()

        text_to_transform = text_encoded + salt.encode()

        sha256_text = hashlib.sha256(text_to_transform).hexdigest()

        return sha256_text
class GridMapTurnCostEnvironment(GridMapEnvironment):
    key_list = Config.load_json(file_path=CONFIG_KEY + '/turnCostGridWorldEnvironmentKey.json')

    def __init__(self, config):
        super().__init__(config)
        self.turn_cost_dict = {
            "S": ("E", "N"),
            "N": ("W", "S"),
            "W": ("S", "E"),
            "E": ("N", "W")
        }

    def computer_reward(self, bound_hit_flag, old_state, new_state):
        reward = super().computer_reward(bound_hit_flag, old_state, new_state)
        if new_state['DIRECTION'] in self.turn_cost_dict[old_state['DIRECTION']]:
            print("Turn occurred!!!")
            reward -= self.config.config_dict['TURN_LEFT_COST']
        return reward
    def __init__(self, cost_fn=None, config=None):
        super().__init__(config=config)
        if self.config is None:
            self.config = Config(standard_key_list=['REAL_ENVIRONMENT_STATUS',
                                                    'CYBER_ENVIRONMENT_STATUS', 'TEST_ENVIRONMENT_STATUS'])
        self.config.config_dict['REAL_ENVIRONMENT_STATUS'] = 1
        self.config.config_dict['CYBER_ENVIRONMENT_STATUS'] = 0
        self.config.config_dict['TEST_ENVIRONMENT_STATUS'] = 2

        self._test_data = SamplerData()
        self._cyber_data = SamplerData()
        self._real_data = SamplerData()
        self.cost_fn = cost_fn

        self.data = None

        self._env_status = None

        self.env_status = 1
Esempio n. 24
0
def main():
    # Args parse.
    input_file, output_file, input_type, output_type, use_config, config_path, ui_type = parseArguments(
    )

    # Init config features.
    if use_config:
        Config(config_path)

    # Create UI.
    if ui_type == "graphic":
        UI("graphic")
    elif ui_type == "terminal":
        UI("terminal")

    # Init UI.
    try:
        UI().objectUI.initialWindow(convert, input_file, output_file,
                                    input_type, output_type, use_config,
                                    config_path)
    except Exception as e:
        UI().objectUI.errorWindow(e)
Esempio n. 25
0
class FixedOutputModel(Model):
    key_list = Config.load_json(file_path=CONFIG_KEY +
                                '/fixedOutputModelKey.json')

    def __init__(self, config):
        super(FixedOutputModel, self).__init__(config)

    def predict(self, sess=None, state=None):
        action = [0 for _ in range(self.config.config_dict['ACTION_SPACE'][0])]
        action[0] = self.config.config_dict['F1']
        action[1] = self.config.config_dict['PROB_SAMPLE_ON_REAL']
        action[2] = self.config.config_dict['PROB_TRAIN_ON_REAL']

        return np.array(action)

    def reset(self):
        pass

    def update(self, *args, **kwargs):
        pass

    def print_log_queue(self, status):
        pass
Esempio n. 26
0
def rosbagToAbstract(input_file):
    UI().objectUI.showMessage("Starting to read bag file", "w")
    bag = rosbag.Bag(input_file)
    c = Config()

    if c.rosbag:
        topic = c.config_data["rosbag"]["topic"]
    else:
        topics = bag.get_type_and_topic_info().topics
        topic = UI().objectUI.chooseWindow(
            "Which is the topic that contains the events?: ", topics)

    event_list = []

    num_progress = getNumProgress(bag.get_message_count(topic))
    i = 0
    for topic, msg, t in bag.read_messages(topics=topic):

        if i % num_progress == 0:
            UI().objectUI.sumProgress()
        i += 1

        aux_list = []
        if "EventArray" in str(type(msg)):  # msg._type
            aux_list = msg.events
        else:
            aux_list.append(msg)

        for event in aux_list:
            event_list.append(
                Event(event.x, event.y, event.polarity,
                      combine(event.ts.secs, event.ts.nsecs)))

    bag.close()
    UI().objectUI.sumProgress(True)
    UI().objectUI.showMessage("Finishing reading the bag file", "c")
    return event_list
Esempio n. 27
0
            n_units=self.config.config_dict['DENSE_LAYER_2_UNIT'],
            act=tf.nn.leaky_relu,
            name=name_prefix + 'DENSE_LAYER_2')

        net = tl.layers.DenseLayer(
            layer=net,
            n_units=self.config.config_dict['OUTPUT_DIM'],
            act=tf.nn.softmax,
            name=name_prefix + 'OUTPUT_LAYER')

        return net

    def create_training_method(self):
        # weight_decay = tf.add_n([self.config.config_dict['L2'] * tf.nn.l2_loss(var) for var in self.var_list])
        # loss = tf.reduce_mean(tf.square(self.label - self.net.outputs)) + weight_decay
        loss = tf.reduce_mean(tf.square(self.label - self.net.outputs))
        optimizer = tf.train.AdamOptimizer(
            self.config.config_dict['LEARNING_RATE'])
        return loss, optimizer


if __name__ == '__main__':
    from src.config.config import Config
    from src.configuration import CONFIG_PATH

    a = Config(config_dict=None,
               standard_key_list=DenseModel.standard_key_list)
    a.load_config(path=CONFIG_PATH + '/testDenseConfig.json')
    actor = DenseModel(config=a)
    pass
Esempio n. 28
0
class IntelligentTrainerAgent(Agent):
    key_list = Config.load_json(file_path=CONFIG_KEY +
                                '/intelligentTrainerAgentKey.json')

    def __init__(self, config, model, env):
        super(IntelligentTrainerAgent, self).__init__(config=config,
                                                      model=model,
                                                      env=env)
        self.sample_count = 0
        self.sess = tf.get_default_session()

    def predict(self, state, *args, **kwargs):
        if self.assigned_action is not None:
            ac = list(self.assigned_action)
            self.assigned_action = None
            state = np.reshape(state, [1, -1])
            re = np.array(self.model.predict(self.sess, state))
            if len(re) > len(ac):
                for i in range(len(ac), len(re)):
                    ac.append(re[i])
            self.sample_count += 1
            if 'F1=0' in cfg.config_dict and cfg.config_dict['F1=0'] is True:
                ac[0] = 0.0
            if 'F2=0' in cfg.config_dict and cfg.config_dict['F2=0'] is True:
                ac[1] = 0.0
            return np.array(ac)
        else:
            state = np.reshape(state, [1, -1])
            count = self.sample_count
            eps = 1.0 - (self.config.config_dict['EPS'] - self.config.config_dict['EPS_GREEDY_FINAL_VALUE']) * \
                  (count / self.config.config_dict['EPS_ZERO_FLAG'])
            if eps < 0:
                eps = 0.0
            rand_eps = np.random.rand(1)
            if self.config.config_dict[
                    'EPS_GREEDY_FLAG'] == 1 and rand_eps < eps:
                res = self.model.action_iterator[np.random.randint(
                    len(self.model.action_iterator))]
            else:
                res = np.array(self.model.predict(self.sess, state))
            if 'F1=0' in cfg.config_dict and cfg.config_dict['F1=0'] is True:
                res[0] = 0.0
            if 'F2=0' in cfg.config_dict and cfg.config_dict['F2=0'] is True:
                res[1] = 0.0
            self.sample_count += 1
            return res

    def update(self):
        # TODO finish your own update by using API with self.model
        self.model.update()

    def store_one_sample(self, state, next_state, action, reward, done, *arg,
                         **kwargs):
        # TODO store the one sample to whatever you want
        if self.model and hasattr(self.model, 'print_log_queue') and callable(
                self.model.print_log_queue):
            self.model.print_log_queue(status=self.status)
        self.model.store_one_sample(state=state,
                                    next_state=next_state,
                                    action=action,
                                    reward=reward,
                                    done=done)
        self.log_file_content.append({
            'STATE': np.array(state).tolist(),
            'NEW_STATE': np.array(next_state).tolist(),
            'ACTION': np.array(action).tolist(),
            'REWARD': reward,
            'DONE': done,
            'INDEX': self.log_print_count
        })
        self.log_print_count += 1

    def init(self):
        # TODO init your agent and your model
        # this function will be called at the start of the whole train process
        self.model.init()
Esempio n. 29
0
            n_units=self.config.config_dict['LSTM_DENSE_LAYER_2_UNIT'],
            act=tf.nn.tanh,
            name=name_prefix + 'LSTM_DENSE_LAYER_2')

        net = tl.layers.DenseLayer(
            layer=lstm_fc2,
            n_units=self.config.config_dict['MERGED_LAYER_1_UNIT'],
            act=tf.nn.relu,
            name=name_prefix + 'MERGED_DENSE_LAYER_1')
        net = tl.layers.DenseLayer(
            layer=net,
            n_units=self.config.config_dict['MERGED_LAYER_2_UNIT'],
            act=tf.nn.relu,
            name=name_prefix + 'MERGED_DENSE_LAYER_2')
        return net


if __name__ == '__main__':
    from src.config.config import Config
    from configuration import CONFIG_PATH

    a = Config(config_dict=None,
               standard_key_list=LSTMCritic.standard_key_list)
    a.load_config(path=CONFIG_PATH + '/testLSTMCriticconfig.json')
    critic = LSTMCritic(config=a)
    with tf.Session() as sess:
        with sess.as_default():
            tl.layers.initialize_global_variables(sess)
            critic.net.print_params()
    pass
class DQNModel(TensorflowBasedModel):
    key_list = Config.load_json(file_path=None)

    def __init__(self, config, action_bound):
        super(DQNModel, self).__init__(config=config)
        self.proposed_action_list = []
        self.action_bound = action_bound
        action_list = []
        for i in range(len(action_bound[0])):
            low = action_bound[0][i]
            high = action_bound[1][i]
            action_list.append(
                np.arange(start=low,
                          stop=high,
                          step=(high - low) /
                          self.config.config_dict['ACTION_SPLIT_COUNT']))
        action_iterator = itertools.product(*action_list)
        self.action_selection_list = []
        for action_sample in action_iterator:
            self.action_selection_list.append(tf.constant(action_sample))

        self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)

        self.state_input = tf.placeholder(
            shape=[None] + list(self.config.config_dict['STATE_SPACE']),
            dtype=tf.float32)
        self.next_state_input = tf.placeholder(
            shape=[None] + list(self.config.config_dict['STATE_SPACE']),
            dtype=tf.float32)
        self.action_input = tf.placeholder(
            shape=[None] + list(self.config.config_dict['ACTION_SPACE']),
            dtype=tf.float32)
        self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)
        self.input = tf.concat([self.state_input, self.action_input])
        self.done = tf.cast(self.done_input, dtype=tf.float32)

        self.q_value_list = []
        var_list = None
        for action_sample in self.action_selection_list:
            q_net, q_output, var_list = NetworkCreator.create_network(
                input=tf.concat(self.state_input, action_sample),
                network_config=self.config.config_dict['NET_CONFIG'],
                net_name=self.config.config_dict['NAME'])
            self.q_value_list.append(q_output)
        self.var_list = var_list

        self.target_q_value_list = []
        for action_sample in self.action_selection_list:
            q_net, q_output, var_list = NetworkCreator.create_network(
                input=tf.concat(self.next_state_input, action_sample),
                network_config=self.config.config_dict['NET_CONFIG'],
                net_name='TARGET' + self.config.config_dict['NAME'])
            self.target_var_list.append(q_output)
        self.target_var_list = var_list

        self.loss, self.optimizer, self.optimize = self.create_training_method(
        )
        self.update_target_q_op = self.create_target_q_update()
        self.memory = Memory(
            limit=1e100,
            action_shape=self.config.config_dict['ACTION_SPACE'],
            observation_shape=self.config.config_dict['STATE_SPACE'])
        self.sess = tf.get_default_session()

    def update(self):
        for i in range(self.config.config_dict['ITERATION_EVER_EPOCH']):
            batch_data = self.memory.sample(
                batch_size=self.config.config_dict['BATCH_SIZE'])
            loss = self.sess.run(fetches=[self.loss, self.optimize],
                                 feed_dict={
                                     self.reward_input: batch_data['rewards'],
                                     self.action_input: batch_data['actions'],
                                     self.state_input: batch_data['obs0'],
                                     self.done_input: batch_data['terminals1']
                                 })

    def predict(self, obs, q_value):
        pass

    def print_log_queue(self, status):
        self.status = status
        while self.log_queue.qsize() > 0:
            log = self.log_queue.get()
            print("%s: Critic loss %f: " %
                  (self.name, log[self.name + '_CRITIC']))
            log['INDEX'] = self.log_print_count
            self.log_file_content.append(log)
            self.log_print_count += 1

    def create_training_method(self):
        l1_l2 = tfcontrib.layers.l1_l2_regularizer()
        loss = tf.reduce_sum((self.predict_q_value - self.q_output) ** 2) + \
               tfcontrib.layers.apply_regularization(l1_l2, weights_list=self.var_list)
        optimizer = tf.train.AdadeltaOptimizer(
            learning_rate=self.config.config_dict['LEARNING_RATE'])
        optimize_op = optimizer.minimize(loss=loss, var_list=self.var_list)
        return loss, optimizer, optimize_op

    def create_predict_q_value_op(self):

        predict_q_value = (1. - self.done) * self.config.config_dict['DISCOUNT'] * self.target_q_output \
                          + self.reward_input
        return predict_q_value

    def create_target_q_update(self):
        op = []
        for var, target_var in zip(self.var_list, self.target_var_list):
            ref_val = self.config.config_dict['DECAY'] * target_var + (
                1.0 - self.config.config_dict['DECAY']) * var
            op.append(tf.assign(ref_val, var))
        return op

    def store_one_sample(self, state, next_state, action, reward, done, *arg,
                         **kwargs):
        self.memory.append(obs0=state,
                           obs1=next_state,
                           action=action,
                           reward=reward,
                           terminal1=done)
 def __init__(self, fileConfName="camio.conf"):
     Config.__init__(self, fileConfName)