def __init__(self): self.container = NeuroContainer() self.samples = [] self.current_tick = 0 self.current_epoch = 0 self.num_epochs = 0 self.batch_size = 5
def __init__(self, environment: Environment, **kwargs): from lang.assembly_builder import AssemblyBuilder self.config = kwargs self.container = NeuroContainer(self) dp = DataProvider(environment.filename) environment.scenario_length = dp.scenario_length self.assembly_builder = AssemblyBuilder(agent=self, data_provider=dp) self.samples = [] self.environment = environment self.loop_ended = False self._messages = [] self.init_zones() self.doped_ticks = [] self.stressed_ticks = []
def main(): container = NeuroContainer() input_layer = Layer(container=container) input_layer.allocate_neurons() input_layer.fire_random_pattern(28) troubled_neuron = container.get_neuron_by_id('27') #troubled_neuron.potential = 1 clump_layer = ClumpLayer(container=container) clump_layer.allocate_clumps() clump_layer.connect_to_layer(input_layer) network = Network(container=container) network.run(max_ticks=15)
class NetworkBuilder: def __init__(self): self.nodes = [] self.stop_words = ['a', 'is', 'in'] self.container = NeuroContainer() def load_list_from_file(filename): lines = [] with open(filename, 'r', encoding='utf-8') as file: for line in file: lines.append(line.strip()) return lines def build_net(self, filename): lines = Builder.load_list_from_file(filename) for line in lines: nodes = [] tokens = line.split() for token in tokens: if token in self.stop_words: continue clump = self._check_make_clump(token) # if node not in nodes: # nodes.append(node) # if len(nodes) > 2: # self._make_clump(nodes) def _check_make_clump(self, token): clump = self.container.get_clump_by_pattern(token) if not clump: clump = Clump(self.container.next_clump_id(), pattern=token, container=self.container, abstract=False) clump.allocate_neurons() self.container.append_clump(clump) return clump def store(self, filename): out_val = { 'clumps': self.container.clumps, 'neurons': self.container.neurons, 'synapses': self.container.synapses } with open(filename, mode='wt', encoding='utf-8') as output_file: print(json_serialize(out_val), file=output_file)
def __init__(self, environment: Environment, **kwargs): from lang.assembly_builder import AssemblyBuilder self.config = kwargs self.container = NeuroContainer(self) self.container.network = self dp = DataProvider(environment.filename) environment.scenario_length = dp.scenario_length self.assembly_builder = AssemblyBuilder(agent=self, data_provider=dp) self.samples = [] # self.current_tick = 0 self.num_epochs = 0 self.environment = environment self.loop_ended = False self._gaba_release = False self._gaba_release_start_tick = 0 self._messages = [] self.init_zones() self.doped_ticks = [] self.stressed_ticks = []
class Agent: """ A baby-learner agent """ def __init__(self, environment: Environment, **kwargs): from lang.assembly_builder import AssemblyBuilder self.config = kwargs self.container = NeuroContainer(self) dp = DataProvider(environment.filename) environment.scenario_length = dp.scenario_length self.assembly_builder = AssemblyBuilder(agent=self, data_provider=dp) self.samples = [] self.environment = environment self.loop_ended = False self._messages = [] self.init_zones() self.doped_ticks = [] self.stressed_ticks = [] def reset(self): self.container.current_tick = 0 def queue_message(self, msg_name: str, data: dict = None): msg = InterAreaMessage(msg_name) msg.data = data self._messages.append(msg) def update(self): self.queue_message('on_tick_beginning') self.load_assemblies() self._update_state() # self._update_weights() def build_predefined_assemblies(self): for zone in self.container.zones: zone.build_predefined_assemblies() def _handle_message_queue(self): messages_to_delete = [] for msg in self._messages: handled = False for area in self.container.areas: handled = area.handle_message(msg) if handled: messages_to_delete.append(msg) break self._messages.clear() def load_assemblies(self): current_tick = self.environment.current_tick assembly_source = self.assembly_builder.get_assembly_source( current_tick) if assembly_source: if self.environment.verbosity > 0: print() print(f'Scenario line: {assembly_source.source_line}') for zone in self.container.zones: zone.prepare_assemblies(assembly_source, current_tick) def _update_state(self): self._handle_message_queue() for zone in self.container.zones: zone.before_assemblies_update(self.environment.current_tick) self._handle_message_queue() for na in self.container.assemblies: na.update(self.environment.current_tick) for conn in self.container.connections: conn.update() self._handle_message_queue() self.assembly_builder.build_new_assemblies() self.absorb_neurotransmitters() if self.environment.verbosity > 0: self._report_fired_assemblies() def _report_fired_assemblies(self): for na in self.container.assemblies: if na.fired: self.environment.report_on_area( na.area, f'area {na.area} assembly {na} fired') for area in self.container.areas: if self.environment.current_tick in area.inhibited_at_ticks: self.environment.report_on_area(area, f'area {area} is inhibited') def utter(self, utterance: str): self.environment.receive_utterance(self, utterance) def receive_dope(self): self.doped_ticks.append(self.environment.current_tick + 1) def absorb_neurotransmitters(self): # Dopamine-induced excitation if self.environment.current_tick in self.doped_ticks: for zone in self.container.zones: zone.receive_dope() # Stress-induced inhibition if self.environment.current_tick in self.stressed_ticks: for zone in self.container.zones: zone.receive_cortisol() def current_assembly_source(self): current_tick = self.environment.current_tick assembly_source = None for tick in self.assembly_builder.data_provider: if tick > current_tick: return assembly_source assembly_source = self.assembly_builder.data_provider[tick] return assembly_source def save_model(self, filename): out_val = { 'neurons': self.container.neurons, 'synapses': self.container.synapses } with open(filename, mode='wt', encoding='utf-8') as output_file: print(json_serialize(out_val), file=output_file) def get_state(self): repr = ' '.join([str(neuron) for neuron in self.container.neurons]) return '{}: {}'.format(str(self.current_tick), repr) def init_zones(self): # zones pr = PhoneticRecognitionZone(agent=self) vr = VisualRecognitionZone(agent=self) vl = VisualLexiconZone(agent=self) nvo = NamedVisualObjectsZone(agent=self) nvo.connect_to(vr, pr) vl.connect_to([nvo, vr]) phrase_integrator = PhraseIntegratorZone(agent=self) phrase_integrator.connect_to(pr, nvo) phrase_enc = PhraseEncoderZone(agent=self) phrase_enc.connect_to(phrase_integrator) speech_production = SpeechProductionZone(agent=self) speech_production.connect_to([vl]) self.container.add_zone(vr) self.container.add_zone(pr) self.container.add_zone(vl) self.container.add_zone(phrase_enc) self.container.add_zone(phrase_integrator) self.container.add_zone(nvo) self.container.add_zone(speech_production) # Gates vl_syntax_gate = NeuralGate(agent=self, source=vl.output_area, target=speech_production.input_area) self.container.add_gate(vl_syntax_gate) # controller zones # speech_controller = SpeechControllerZone(agent=self) phrase_repeater_controller = PhraseRepeaterControllerZone(agent=self) phrase_repeater_controller.connect_to_sensors(pr.output_areas()) speech_production.connect_to([phrase_repeater_controller]) self.container.add_zone(phrase_repeater_controller) vl_controller = VisualLexiconControllerZone(agent=self) vl_controller.connect_to_sensors([vl.output_tone_area]) vl_controller.connect_to_gate(vl_syntax_gate) vl_controller.connect_to_master_action(phrase_repeater_controller) self.container.add_zone(vl_controller)
class Agent: """ A baby-learner agent """ def __init__(self, environment: Environment, **kwargs): from lang.assembly_builder import AssemblyBuilder self.config = kwargs self.container = NeuroContainer(self) self.container.network = self dp = DataProvider(environment.filename) environment.scenario_length = dp.scenario_length self.assembly_builder = AssemblyBuilder(agent=self, data_provider=dp) self.samples = [] # self.current_tick = 0 self.num_epochs = 0 self.environment = environment self.loop_ended = False self._gaba_release = False self._gaba_release_start_tick = 0 self._messages = [] self.init_zones() self.doped_ticks = [] self.stressed_ticks = [] # @property # def gaba_release(self): # """ # Current state of GABA release # If True, on_negative_reward neurons may fire # :return: # """ # return self._gaba_release # @gaba_release.setter # def gaba_release(self, val): # if not self._gaba_release and val: # self._gaba_release_start_tick = self.current_tick # self._gaba_release = val def reset(self): self.container.current_tick = 0 def queue_message(self, msg_name: str, data: dict = None): msg = InterAreaMessage(msg_name) msg.data = data self._messages.append(msg) def update(self): self.queue_message('on_tick_beginning') self.load_assemblies() self._update_state() # self._update_weights() def build_predefined_assemblies(self): for zone in self.container.zones: zone.build_predefined_assemblies() def _handle_message_queue(self): messages_to_delete = [] for msg in self._messages: handled = False for area in self.container.areas: handled = area.handle_message(msg) if handled: messages_to_delete.append(msg) break self._messages.clear() # def load_assemblies0(self): # self.assembly_builder.prepare_assemblies(self.environment.current_tick) def load_assemblies(self): current_tick = self.environment.current_tick assembly_source = self.assembly_builder.get_assembly_source( current_tick) if assembly_source: for zone in self.container.zones: zone.prepare_assemblies(assembly_source, current_tick) def _update_state(self): self._handle_message_queue() for zone in self.container.zones: zone.before_assemblies_update(self.environment.current_tick) self._handle_message_queue() for na in self.container.assemblies: na.update(self.environment.current_tick) for conn in self.container.connections: conn.update() self._handle_message_queue() self.assembly_builder.build_new_assemblies() self.absorb_neurotransmitters() if self.environment.verbosity > 0: self._report_fired_assemblies() def _report_fired_assemblies(self): for na in self.container.assemblies: if na.fired: print(f'area {na.area} assembly {na} fired') for area in self.container.areas: if self.environment.current_tick in area.inhibited_at_ticks: print(f'area {area} is inhibited') def utter(self, utterance: str): self.environment.receive_utterance(self, utterance) def receive_dope(self): self.doped_ticks.append(self.environment.current_tick + 1) def absorb_neurotransmitters(self): # Dopamine-induced excitation if self.environment.current_tick in self.doped_ticks: for zone in self.container.zones: zone.receive_dope() # Stress-induced inhibition if self.environment.current_tick in self.stressed_ticks: for zone in self.container.zones: zone.receive_cortisol() def save_model(self, filename): out_val = { 'neurons': self.container.neurons, 'synapses': self.container.synapses } with open(filename, mode='wt', encoding='utf-8') as output_file: print(json_serialize(out_val), file=output_file) def get_state(self): repr = ' '.join([str(neuron) for neuron in self.container.neurons]) return '{}: {}'.format(str(self.current_tick), repr) def clear_state(self): for neuron in self.container.neurons: neuron.potential = 0 neuron.history.clear() for synapse in self.container.synapses: synapse.pulsing = False for sab in self.container.sabs: sab.history.clear() def init_zones(self): # zones pr = PhoneticRecognitionZone(agent=self) phrase_rec = PhraseRecognitionZone(agent=self) phrase_rec.connect_to(pr) thought_controller = ThoughtControllerZone(agent=self) # syntax_production = SyntaxProductionZone(agent=self) vr = VisualRecognitionZone(agent=self) vl = VisualLexiconZone(agent=self) semantic = SemanticStorageZone(agent=self) semantic.connect_to(vr, pr) vl.connect_to([semantic, vr]) # syntax_production.connect_to([vl]) speech_production = SpeechProductionZone(agent=self) # speech_production.connect_to([syntax_production]) speech_production.connect_to([vl]) self.container.add_zone(vr) self.container.add_zone(pr) self.container.add_zone(phrase_rec) self.container.add_zone(thought_controller) # self.container.add_zone(syntax_production) self.container.add_zone(semantic) self.container.add_zone(speech_production) # Gates # vl_syntax_gate = NeuralGate(agent=self, source=vl.output_area, target=syntax_production.input_area) vl_syntax_gate = NeuralGate(agent=self, source=vl.output_area, target=speech_production.input_area) self.container.add_gate(vl_syntax_gate) # br_speech_gate = NeuralGate( # agent=self, # source=syntax_production.output_areas()[0], # target=speech_production.input_area # ) # self.container.add_gate(br_speech_gate) # controller zones speech_controller = SpeechControllerZone(agent=self) speech_controller.connect_to_sensors(vl.output_areas()) speech_controller.connect_to_gate(vl_syntax_gate) # speech_controller.connect_to_gate(br_speech_gate) self.container.add_zone(speech_controller)
class NeuralNetwork: def __init__(self): self.container = NeuroContainer() self.samples = [] self.current_tick = 0 self.current_epoch = 0 self.num_epochs = 0 self.batch_size = 5 def load(self, filename): self.container.load(filename) with open(filename, 'r', encoding='utf-8') as data_file: content = json.load(data_file) self.samples = content['samples'] def fit(self, num_epochs=10, verbose=True): self.num_epochs = num_epochs number_of_batches = int(num_epochs / BATCH_SIZE) batch_thresholds =[] thresholds_are_set = False for batch in range(number_of_batches): if thresholds_are_set: thresholds = self.container.assign_random_thresholds() results = [] for _ in range(BATCH_SIZE): self.current_epoch += 1 for sample in self.samples: self.fire_input(sample) if not thresholds_are_set: thresholds = self.container.assign_random_thresholds() thresholds_are_set = True result = self._fit_on_sample(sample, verbose) results.append(int(result)) batch_thresholds.append((thresholds, sum(results))) batch_thresholds.sort(key=lambda x: x[1], reverse=True) thresholds = batch_thresholds[0][0] self.container.set_thresholds(thresholds) def _fit_on_sample(self, sample, verbose): self._reset_histories() return self._run(sample, verbose=verbose) def _run(self, sample, max_ticks=10, verbose=True): loop_ended = False result = False self.current_tick = 0 while self.current_tick <= max_ticks and not loop_ended: self.current_tick += 1 self._update_step() loop_ended, result = self._check_result(sample) if verbose: # print(self.get_state()) print('epoch {}/{}: {}'.format(self.current_epoch, self.num_epochs, result)) if result: self._update_on_reward() else: self._update_on_punishment() return result def _update_on_reward(self): for synapse in self.container.synapses: synapse.update_weight(LEARNING_RATE) for neuron in self.container.neurons: neuron.update_threshold() def _update_on_punishment(self): for synapse in self.container.synapses: synapse.update_weight(-LEARNING_RATE) # for neuron in self.container.neurons: # neuron.update_threshold() def _reset_histories(self): for synapse in self.container.synapses: synapse.reset_history() for neuron in self.container.neurons: neuron.reset_history() def _check_result(self, sample): output = sample['output'] negative = False if output.startswith('~'): negative = True output = output[1:] neuron = self.container.get_neuron_by_id(output) if negative: if neuron.fired: return True, False else: return False, True else: if neuron.fired: return True, True else: return False, False def _update_step(self): for neuron in self.container.neurons: neuron.update() for synapse in self.container.synapses: synapse.update() def fire_input(self, sample): for nrn in sample['input']: neuron = self.container.get_neuron_by_id(nrn) neuron.initial = True neuron.fire() def save_model(self, filename): out_val = {'neurons': self.container.neurons, 'synapses': self.container.synapses} with open(filename, mode='wt', encoding='utf-8') as output_file: print(json_serialize(out_val), file=output_file) def get_state(self): repr = ' '.join([str(neuron) for neuron in self.container.neurons]) return '{}: {}'.format(str(self.current_tick), repr)
def __init__(self): self.nodes = [] self.stop_words = ['a', 'is', 'in'] self.container = NeuroContainer()
def main(): random.seed(24) container = NeuroContainer() receptive_layer = ReceptiveLayer(container=container) receptive_layer.allocate() num_ticks = 20 output_sab_params = SabParameters() output_sab_params.interconnection_density = 0.8 output_sab_params.receptive_synapse_weight = 0.6 output_sab_params.inter_synapse_weight = 1 output_sab_params.num_sad_neurons = 1 output_sab_params.inhibitory_neurons_uppermost_threshold = 7 output_sab_params.inhibitory_neurons_lowest_threshold = 4 output_layer = SabLayer(layer_id=3, container=container, num_units=10, sab_params=output_sab_params) output_layer.is_output = True output_layer.allocate() hidden_sab_params = SabParameters() hidden_sab_params.interconnection_density = 0.3 hidden_sab_params.receptive_synapse_weight = 1 hidden_sab_params.inter_synapse_weight = 1 hidden_sab_params.inhibitory_neurons_uppermost_threshold = 19 hidden_sab_params.inhibitory_neurons_lowest_threshold = 19 hidden_layer = SabPoolingLayer(layer_id=2, container=container, regions_shape=(3, 3), sab_params=hidden_sab_params) hidden_layer.allocate() output_layer.connect_to(hidden_layer, connection_density=2) hidden_layer.connect_to(receptive_layer) print( f'Allocated {len(container.neurons)} neurons and {len(container.synapses)} synapses' ) network = Network(container=container) sab001 = container.get_sab_by_id('001') path = '../data/images' imgP1 = Image(imageio.imread(os.path.join(path, 'П1.png'))) imgP2 = Image(imageio.imread(os.path.join(path, 'П2.png'))) imgR1 = Image(imageio.imread(os.path.join(path, 'р1.png'))) imgR2 = Image(imageio.imread(os.path.join(path, 'р2.png'))) imgR3 = Image(imageio.imread(os.path.join(path, 'р3.png'))) # _print_sab_summary(sab001) # show_receptive_map(receptive_layer, Orientation.horizontal) receptive_neurons = list(receptive_layer.firing_history.keys()) hidden_sabs = hidden_layer.get_all_sabs() sab002 = container.get_sab_by_id('002') # _print_hidden_sab_summary(sab24, receptive_neurons) # for sab in hidden_sabs: # _print_hidden_sab_summary(sab, receptive_neurons) train_on_image(receptive_layer, network, imgP1, label='П', label_is_correct=True, max_ticks=num_ticks) train_on_image(receptive_layer, network, imgR1, label='р', label_is_correct=True, max_ticks=num_ticks) train_on_image(receptive_layer, network, imgR2, label='р', label_is_correct=True, max_ticks=num_ticks) # _print_sab_summary(sab002) train_on_image(receptive_layer, network, imgP1, label='р', label_is_correct=False, max_ticks=num_ticks) train_on_image(receptive_layer, network, imgR1, label='р', label_is_correct=True, max_ticks=num_ticks) train_on_image(receptive_layer, network, imgP2, label='П', label_is_correct=True, max_ticks=num_ticks) print('') print(Colors.bold('Inferencing..')) print('') infer_on_image(receptive_layer, network, imgP1) infer_on_image(receptive_layer, network, imgR3) winning_sab = output_layer.get_winning_sab() receptive_map = receptive_layer.get_firing_map(Orientation.horizontal)