# to give you trouble) and then use "pip install scipy --no-use-wheel". Or, if you can figure out how to get PyCharm # to ignore warnings, that's fine too. # # More info here: https://stackoverflow.com/questions/40845304/runtimewarning-numpy-dtype-size-changed-may-indicate # -binary-incompatibility import psyneulink as pnl import numpy as np ### building the LeabraMechanism n_input = 4 # don't change this! n_output = 2 # don't change this! n_hidden = 0 Leab = pnl.LeabraMechanism(input_size=n_input, output_size=n_output, hidden_layers=n_hidden, hidden_sizes=None, training_flag=True, quarter_size=20) ### building the PsyNeuLink network T_input = pnl.TransferMechanism(size=n_input) T_target = pnl.TransferMechanism(size=n_output) # target_projection connects T_target to the TARGET InputPort of Leab target_projection = pnl.MappingProjection(sender=T_target, receiver=Leab.input_ports[1]) p_input = pnl.Process(pathway=[T_input, Leab]) p_target = pnl.Process(pathway=[T_target, target_projection, Leab]) sys = pnl.System(processes=[p_input, p_target])
train_flag = False # should the LeabraMechanism and leabra network learn? # NOTE: there is currently a bug with training, in which the output may differ between trials, randomly # ending up in one of two possible outputs. Running this script repeatedly will make this behavior clear. # The leabra network and LeabraMechanism experience this bug equally. # NOTE: The reason TransferMechanisms are used below is because there is currently a bug where LeabraMechanism # (and other `Mechanism`s with multiple input states) cannot be used as origin Mechanisms for a System. If you desire # to use a LeabraMechanism as an origin Mechanism, you can work around this bug by creating two `TransferMechanism`s # as origin Mechanisms instead, and have these two TransferMechanisms pass their output to the InputStates of # the LeabraMechanism. # create a LeabraMechanism in PsyNeuLink L = pnl.LeabraMechanism(input_size=input_size, output_size=output_size, hidden_layers=hidden_layers, hidden_sizes=hidden_sizes, name='L', training_flag=train_flag) T1 = pnl.TransferMechanism(name='T1', size=input_size, function=pnl.Linear) T2 = pnl.TransferMechanism(name='T2', size=output_size, function=pnl.Linear) p1 = pnl.Process(pathway=[T1, L]) proj = pnl.MappingProjection(sender=T2, receiver=L.input_states[1]) p2 = pnl.Process(pathway=[T2, proj, L]) s = pnl.System(processes=[p1, p2]) print('Running Leabra in PsyNeuLink...') start_time = time.process_time() outputs = s.run(inputs={T1: input_pattern.copy(), T2: training_pattern.copy()}) end_time = time.process_time()