def learn(self, knowledge):
        # If there is no capacity in neuron list, double size
        if self._index_ready_to_learn == (len(self.neuron_list) - 1):
            new_list = []
            # Fill neuron list with nre RelNeuron instances

            # 3.2.2.2 todo: parallel
            # Detect system and determine threads number to use
            detect_system = DetectSystem()
            # Init thread's pool, with the determined threads number
            pool = Pool(detect_system.cpu_count())

            new_list = pool.map(lambda index: RelNeuron(),
                                range(len(self.neuron_list)))
            #for index in range(len(self.neuron_list)):
            #    new_list.append(RelNeuron())
            self.neuron_list = self.neuron_list + new_list
        # Check for neurons that already have given knowledge ids
        for index in range(self._index_ready_to_learn):
            if self.neuron_list[index].has_ids(knowledge.get_h_id(),
                                               knowledge.get_s_id()):
                return False
        # If there are no neurons with given pair of ids, learn
        self.neuron_list[self._index_ready_to_learn].learn(knowledge)
        self._index_ready_to_learn += 1
        return True
    def _filter_biology(self):

        # 3.3.3.1  todo: parallel
        # Helper method, used to execute in parallel
        def __process_input(memory):
            # BCF for every memory is stored in the tail of the cultural group
            bcf = memory.get_tail_knowledge()
            distance = abs(
                (bcf.get_biology() + self.internal_state.get_biology()) / 2.0 -
                self.desired_state.get_biology())
            return distance, memory

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp_result = pool.map(__process_input, self.inputs)
        # Calculate the minimum distance
        __min_result = min(__temp_result, key=lambda t: t[0])
        # Extract the memory from the tuple
        best_biology = __min_result[1]

        #best_biology = self.inputs[0]
        #bcf = best_biology.get_tail_knowledge()
        #min_distance = abs((bcf.get_biology() + self.internal_state.get_biology())/2.0 - self.desired_state.get_biology())

        #for memory in self.inputs:
        # BCF for every memory is stored in the tail of the cultural group
        #    bcf = memory.get_tail_knowledge()
        #    distance = abs((bcf.get_biology() + self.internal_state.get_biology())/2.0 - self.desired_state.get_biology())
        #    if distance < min_distance:
        #        best_biology = memory
        #        min_distance = distance
        return best_biology
    def _filter_feelings(self):
        # 3.3.3.3 todo: parallel

        # Helper method, used to execute in parallel
        def __process_input(memory):
            # BCF for every memory is stored in the tail of the cultural group
            bcf = memory.get_tail_knowledge()
            feeling = bcf.get_feelings()
            return feeling, memory

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp_result = pool.map(__process_input, self.inputs)
        # Calculate the maximum feeling
        __max_result = max(__temp_result, key=lambda t: t[0])
        # Extract the memory from the tuple
        best_feelings = __max_result[1]

        # best_feelings = self.inputs[0]
        # bcf = best_feelings.get_tail_knowledge()
        # max = bcf.get_feelings()
        #
        # for memory in self.inputs:
        #     # BCF for every memory is stored in the tail of the cultural group
        #     bcf = memory.get_tail_knowledge()
        #     if bcf.get_feelings() > max:
        #         best_feelings = memory
        #         max = bcf.get_feelings()
        return best_feelings
    def __init__(self, neuron_count):
        # Create neuron list
        self.neuron_list = []
        # Fill neuron list with nre RelNeuron instances

        # 3.2.2.1 todo: parallel
        # Detect system and determine threads number to use
        detect_system = DetectSystem()
        # Init thread's pool, with the determined threads number
        pool = Pool(detect_system.cpu_count())

        self.neuron_list = pool.map(lambda index: RelNeuron(),
                                    range(neuron_count))

        #for index in range(neuron_count):
        #    self.neuron_list.append(RelNeuron())
        # Index of ready to learn neuron
        self._index_ready_to_learn = 0
    def retrieve_exact_memory(self, trigger):
        # Use bbcc protocol
        self.bum()

        # 3.2.7.1 TODO: parallel
        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp = pool.map(lambda index: self.bip(trigger[index]),
                          range(len(trigger) - 1))
        return self.group_list[self.check(trigger[len(trigger) - 1])]
    def resize(self):
        new_list = []
        # Fill neuron list with memories
        # 3.2.5.2  todo: parallel

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        new_list = pool.map(lambda index: CulturalGroup(),
                            range(len(self.group_list)))

        # for index in range(len(self.group_list)):
        #     new_list.append(CulturalGroup())
        self.group_list = self.group_list + new_list
    def __init__(self, group_count=1):
        self.group_list = []
        # 3.2.5.1 todo: parallel

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        self.group_list = pool.map(lambda index: CulturalGroup(),
                                   range(group_count))

        #for index in range(group_count):
        #     self.group_list.append(CulturalGroup())
        self._index_ready_to_learn = 0
        self._clack = False
        self._recognized_indexes = []
    def get_sight_rels(self, s_id):
        # List of sight relations
        sight_rels = []
        # 3.2.2.3 todo: parallel
        # Detect system and create threads pool
        pool = Pool(DetectSystem().cpu_count())

        sight_rels = pool.map(
            lambda index: self.neuron_list[index].get_knowledge()
            if self.neuron_list[index].recognize_sight(s_id) else None,
            range(self._index_ready_to_learn))
        sight_rels = filter(None, sight_rels)
        #for index in range(self._index_ready_to_learn):
        #    if self.neuron_list[index].recognize_sight(s_id):
        #        sight_rels.append(self.neuron_list[index].get_knowledge())
        return sight_rels
    def __init__(self):
        # Desired state
        self.desired_state = InternalState()
        self.desired_state.set_state([0.5,1,1])
        # Initial internal state
        self.internal_state = InternalState([0.5,0.5,0.5])

        # Decision by prediction network
        self.decision_prediction_block = DecisionByPredictionBlock()
        self.decision_prediction_block.set_desired_state(self.desired_state)
        self.decision_prediction_block.set_internal_state(self.internal_state)

        # DEFAULT TRAINING, IT CAN LATER BE OVERRIDEN
        # Create a random training set so that the net can learn the relation prediction = (ei + choice.bcf)/2
        # We require a minimum of 18 points
        training_set = []
        # 3.2.4.1 todo: parallelize

        # Helper method, used to execute in parallel
        def __generate_training(index):
            ei = [random.random(), random.random(), random.random()]
            choice_bcf = [random.random(), random.random(), random.random()]
            prediction = [ei_j / 2.0 + choice_bcf_j / 2.0 for ei_j, choice_bcf_j in zip(ei, choice_bcf)]
            return ei + choice_bcf, prediction

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        training_set = pool.map(__generate_training, range(20))


        # for index in range(20):
        #     ei = [random.random(), random.random(), random.random()]
        #     choice_bcf = [random.random(), random.random(), random.random()]
        #     prediction = [ei_j / 2.0 + choice_bcf_j / 2.0 for ei_j, choice_bcf_j in zip(ei, choice_bcf)]
        #     training_set.append((ei + choice_bcf, prediction))

        # Remodel predictive net
        self.decision_prediction_block.remodel_predictive_net(training_set)

        self._inputs = None
        self._new_inputs = False
        self.decision = None
        self._last_decision_type = None
        self._last_selected_input = None
        self._last_decision_internal_state = None
    def get_output_memory(self):
        self.unconscious_block.set_internal_state(self.internal_state)
        self.unconscious_block.set_desired_state(self.desired_state)
        self.unconscious_block.set_inputs(self.input_memories)
        self.unconscious_output = self.unconscious_block.get_outputs()
        self.conscious_block.set_desired_state(self.desired_state)
        self.conscious_block.set_internal_state(self.internal_state)
        conscious_inputs = []

        # 3.2.6.1  todo: parallel
        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        conscious_inputs = pool.map(lambda memory: memory.get_tail_knowledge(), self.unconscious_output)

        # for memory in self.unconscious_output:
        #     conscious_inputs.append(memory.get_tail_knowledge())
        self.conscious_block.set_inputs(conscious_inputs)
        conscious_output_index = self.conscious_block.get_decision()
        self.conscious_output = self.unconscious_output[conscious_output_index]
        return self.conscious_output
    def __init__(self):
        grid_size = 16
        # HEURISTICS: radius = (1/3)*2^(ENCODING_SIZE)
        # where ENCODING_SIZE is bit size of every pattern element (8 bits for us)
        radius = 24
        # Calculate pattern size based on grid_size and size of a Nibble (4)
        pattern_size = pow(grid_size, 2) / 4
        # Set neural network data size
        RbfNetwork.PATTERN_SIZE = pattern_size
        # Set neural network default radius
        RbfNetwork.DEFAULT_RADIUS = radius
        # Set pattern size in RBF knowledge
        RbfKnowledge.PATTERN_SIZE = pattern_size

        # If there are no persisten memory related files, create them
        if not os.path.isfile("persistent_memory/sight_snb.p"):
            self.erase_all_knowledge()

        # 3.2.1.1 TODO: use detected processor number, and equation logic 3.1.3.
        # Detect system and determine threads number to use
        detect_system = DetectSystem()
        # Init thread's pool, with the determined threads number
        pool = Pool(detect_system.thread_number(12))

        # SNB
        #self.snb = SensoryNeuralBlock("persistent_memory/sight_snb.p", "persistent_memory/hearing_snb.p")
        self.snb = pool.apply_async(lambda x: SensoryNeuralBlock("persistent_memory/sight_snb.p", "persistent_memory/hearing_snb.p"), [None]).get()
        # Relational Neural Block
        self.rnb = pool.apply_async(lambda x: RelNetwork.deserialize("persistent_memory/rnb.p"), [None]).get()
        # Analytical neuron
        self.analytical_n = pool.apply_async(lambda x: AnalyticalNeuron(), [None]).get()
        # Addition by memory network
        self.am_net = pool.apply_async(lambda x: CulturalNetwork.deserialize("persistent_memory/am_net.p"), [None]).get()
        # Geometric Neural Block
        self.gnb = pool.apply_async(lambda x: GeometricNeuralBlock.deserialize("persistent_memory/gnb.p"), [None]).get()
        # Syllables net
        self.syllables_net = pool.apply_async(lambda x: CulturalNetwork.deserialize("persistent_memory/syllables_net.p"), [None]).get()
        # Words net
        self.words_net = pool.apply_async(lambda x: CulturalNetwork.deserialize("persistent_memory/words_net.p"), [None]).get()
        # Sight-Syllables rel network
        self.ss_rnb = pool.apply_async(lambda x: RelNetwork.deserialize("persistent_memory/ss_rnb.p"), [None]).get()

        # ################### INTENTIONS MODULES ########################################################################
        self.episodic_memory = pool.apply_async(lambda x: EpisodicMemoriesBlock.deserialize("persistent_memory/episodic_memory.p"), [None]).get()
        self.decisions_block = pool.apply_async(lambda x: DecisionsBlock.deserialize("persistent_memory/decisions_block.p"), [None]).get()

        self.internal_state = pool.apply_async(lambda x: InternalState.deserialize("persistent_memory/internal_state.p"), [None]).get()
        self.desired_state = pool.apply_async(lambda x: InternalState.deserialize("persistent_memory/desired_state.p"), [None]).get()

        # Internal state "Ports" (Three components real valued vector)
        self._internal_state_in = None

        # Memory that stores short term bip inputs for making a decision
        self._intentions_short_term_memory = []
        self._output_memory = None
        # ###############################################################################################################

        # _bbcc_words
        self._learning_words = False
        self._learning_syllables = False
        self._enable_bbcc = False

        # Output "ports" (related to senses)
        self.s_knowledge_out = None
        self.h_knowledge_out = None

        # Input "ports" (senses)
        self.s_knowledge_in = None
        self.h_knowledge_in = None

        self._working_domain = "ADDITION"
        self.state = "MISS"