Пример #1
0
def cool_single_cpg(mode):
    """
    main function to cool single recurrent network
    loads previously fitted output weights
    computes correlation between neural trajectories as a similarity measure
    :return: nothing
    """

    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
    # weight_suffix1 = 'outunit1_weights.npy'
    # weight_suffix2 = 'outunit2_weights.npy'
    weight_suffix1 = 'outunit1_weights_force.npy'
    weight_suffix2 = 'outunit2_weights_force.npy'
    weight_suffix3 = 'Wrec_weights_force.npy'
    # weight_suffix1 = 'outunit_weights_twotimescales_force.npy'
    # weight_suffix2 = 'Wrec_weights_twotimescales_force.npy'
    # weight_suffix1 = 'outunit1_weights_circle.npy'
    # weight_suffix2 = 'outunit2_weights_circle.npy'
    Wout1 = np.load(os.path.join(out_dir, weight_suffix1))
    Wout2 = np.load(os.path.join(out_dir, weight_suffix2))
    Wrec = np.load(os.path.join(out_dir, weight_suffix3))

    # create network with same parameters
    t_max = 2000.0
    dt = 1.0
    # nw = rn.Network(N=800, g=1.5, pc=0.1)
    nw = rn.Network(N=800, g=1.5, pc=1.0)
    # nw = rn.Network(N=1000, g=1.5, pc=1.0)
    nw.Wrec = Wrec
    # nw = Network(N=500, g=1.2, pc=0.5)
    # nw = rn.Network(N=50, g=0.5/np.sqrt(0.2), pc=1.0)
    # Laje and Buonomano: 50 ms step
    # def ext_inp(t):
    #     if 0.0 <= t <= 50.0:
    #         return 5.0 * np.ones(nw.N)
    #     else:
    #         return np.zeros(nw.N)
    def ext_inp(t):
        return np.zeros(nw.N)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t, ref_rates = nw.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    ref_mean_ = np.mean(ref_rates, axis=1)
    ref_mean = ref_mean_.transpose()
    # ref trajectory: first three PCs (sufficient?)
    pcs, ref_trajectory = rn.compute_neural_trajectory(ref_rates)
    neuron_out1 = np.dot(Wout1, ref_rates)
    neuron_out2 = np.dot(Wout2, ref_rates)
    ref_behavior = neuron_out1 * neuron_out2
    # ref_behavior = np.array([neuron_out1, neuron_out2])
    # ref_behavior = np.array(neuron_out)

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(2, 1, 1)
    ax1.plot(ref_trajectory[0, :], ref_trajectory[1, :], 'k', linewidth=0.5, label='ref')
    ax2 = fig1.add_subplot(2, 1, 2)
    # ax2.plot(neuron_out1, neuron_out2, 'k', linewidth=0.5, label='ref')
    ax2.plot(ref_t, ref_behavior, 'k', linewidth=0.5, label='ref')


    # run dynamics at different temperatures using some Q10 for tau
    # and compute neural/behavioral trajectories
    if mode == 'sweep':
        dT_steps = [-0.2, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0]
    elif mode == 'vis':
        dT_steps = [-1.0, -3.0, -5.0]
    # dT_steps = [-5.0, -10.0, -20.0]
    # dT_steps = [-0.5, -1.0, -2.0]
    fig2 = plt.figure(2)
    ax3 = fig2.add_subplot(len(dT_steps) + 1, 1, 1)
    for i in range(10):
        ax3.plot(ref_t, ref_rates[i, :], linewidth=0.5)
    cooled_trajectories = []
    cooled_behaviors = []
    for i, dT in enumerate(dT_steps):
        # cooled_tau = _tau_q10(nw.tau, dT)
        # cooled_nw = Network(N=800, g=1.5, pc=0.1, tau=cooled_tau)
        cooled_q = _q(dT)
        # cooled_nw = rn.Network(N=800, g=1.5, pc=0.1, q=cooled_q)
        cooled_nw = rn.Network(N=800, g=1.5, pc=1.0, q=cooled_q)
        # cooled_nw = rn.Network(N=1000, g=1.5, pc=1.0, q=cooled_q)
        cooled_nw.Wrec = Wrec
        # cooled_nw = Network(N=500, g=1.2, pc=0.5, q=cooled_q)
        # cooled_nw = rn.Network(N=50, g=0.5/np.sqrt(0.2), pc=1.0, q=cooled_q)
        cooled_t, cooled_rates = cooled_nw.simulate_network(T=t_max/cooled_q, dt=dt, external_input=ext_inp)

        # behavior = np.array([np.dot(Wout1, cooled_rates), np.dot(Wout2, cooled_rates)])
        behavior = np.dot(Wout1, cooled_rates) * np.dot(Wout2, cooled_rates)
        # behavior = np.array(np.dot(Wout, cooled_rates))
        cooled_behaviors.append(behavior)
        projected_rates = rn.project_neural_trajectory(cooled_rates, ref_mean, pcs)
        cooled_trajectories.append(projected_rates)

        label_str = 'dT = %.1f' % dT
        ax1.plot(projected_rates[0, :], projected_rates[1, :], linewidth=0.5, label=label_str)
        # ax2.plot(behavior[0], behavior[1], linewidth=0.5, label=label_str)
        ax2.plot(cooled_t, behavior, linewidth=0.5, label=label_str)
        ax3 = fig2.add_subplot(len(dT_steps) + 1, 1, 2 + i)
        for j in range(10):
            ax3.plot(cooled_t, cooled_rates[j, :], linewidth=0.5)

    ax1.legend()
    ax1.set_xlabel('PC 1 (a.u.)')
    ax1.set_ylabel('PC 2 (a.u.)')
    ax2.legend()
    ax2.set_xlabel('Output (a.u.)')

    # measure similarity of neural/behavioral trajectories as a function of temperature
    trajectory_similarities = []
    behavior_similarities = []
    for i in range(len(dT_steps)):
        similarity1 = rn.measure_trajectory_similarity(ref_trajectory, cooled_trajectories[i])
        similarity2 = rn.measure_trajectory_similarity(ref_behavior, cooled_behaviors[i])
        trajectory_similarities.append(similarity1)
        behavior_similarities.append(similarity2)
    fig4 = plt.figure(4)
    ax4 = fig4.add_subplot(1, 1, 1)
    ax4.plot(dT_steps, trajectory_similarities, 'ko-', label='neural activity')
    ax4.plot(dT_steps, behavior_similarities, 'ro-', label='behavior')
    ax4.set_xlim(ax4.get_xlim()[::-1])
    ax4.set_xlabel('Temperature change')
    ax4.set_ylabel('Corr. coeff.')
    ax4.legend()

    plt.show()
Пример #2
0
def manipulate_network(manipulation, mode):
    """
    main function to manipulate neurons/synapses in single recurrent network
    loads previously fitted output weights
    computes correlation between neural trajectories as a similarity measure
    :return: nothing
    """
    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
    # weight_suffix1 = 'outunit1_weights.npy'
    # weight_suffix2 = 'outunit2_weights.npy'
    weight_suffix1 = 'outunit1_weights_force.npy'
    weight_suffix2 = 'outunit2_weights_force.npy'
    weight_suffix3 = 'Wrec_weights_force.npy'
    # weight_suffix1 = 'outunit_weights_twotimescales_force.npy'
    # weight_suffix2 = 'Wrec_weights_twotimescales_force.npy'
    # weight_suffix1 = 'outunit1_weights_circle.npy'
    # weight_suffix2 = 'outunit2_weights_circle.npy'
    Wout1 = np.load(os.path.join(out_dir, weight_suffix1))
    Wout2 = np.load(os.path.join(out_dir, weight_suffix2))
    Wrec = np.load(os.path.join(out_dir, weight_suffix3))

    # create network with same parameters
    t_max = 2000.0
    dt = 1.0
    # nw = rn.Network(N=800, g=1.5, pc=0.1)
    nw = rn.Network(N=800, g=1.5, pc=1.0)
    # nw = rn.Network(N=1000, g=1.5, pc=1.0)
    nw.Wrec = Wrec

    # nw = Network(N=500, g=1.2, pc=0.5)
    # nw = rn.Network(N=50, g=0.5/np.sqrt(0.2), pc=1.0)
    # Laje and Buonomano: 50 ms step
    # def ext_inp(t):
    #     if 0.0 <= t <= 50.0:
    #         return 5.0 * np.ones(nw.N)
    #     else:
    #         return np.zeros(nw.N)
    def ext_inp(t):
        return np.zeros(nw.N)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t, ref_rates = nw.simulate_network(T=t_max,
                                           dt=dt,
                                           external_input=ext_inp)
    ref_mean_ = np.mean(ref_rates, axis=1)
    ref_mean = ref_mean_.transpose()
    # ref trajectory: first three PCs (sufficient?)
    pcs, ref_trajectory = rn.compute_neural_trajectory(ref_rates)
    neuron_out1 = np.dot(Wout1, ref_rates)
    neuron_out2 = np.dot(Wout2, ref_rates)
    ref_behavior = neuron_out1 * neuron_out2
    # ref_behavior = np.array([neuron_out1, neuron_out2])
    # ref_behavior = np.array(neuron_out)

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(2, 1, 1)
    ax1.plot(ref_trajectory[0, :],
             ref_trajectory[1, :],
             'k',
             linewidth=0.5,
             label='ref')
    ax2 = fig1.add_subplot(2, 1, 2)
    # ax2.plot(neuron_out1, neuron_out2, 'k', linewidth=0.5, label='ref')
    ax2.plot(ref_t, ref_behavior, 'k', linewidth=0.5, label='ref')

    # run dynamics at different fractions of manipulated neurons
    # and compute neural/behavioral trajectories
    if manipulation == 'activation':
        if mode == 'sweep':
            fractions = [
                5.e-3, 1.e-2, 2.e-2, 5.e-2, 8.e-2, 1.e-1, 1.5e-1, 2.e-1
            ]
        elif mode == 'vis':
            fractions = [1.e-2, 5.e-2, 1.e-1]
    elif manipulation == 'inactivation':
        if mode == 'sweep':
            fractions = [
                1.e-3, 2.e-3, 5.e-3, 1.e-2, 2.e-2, 5.e-2, 8.e-2, 1.e-1, 1.5e-1,
                2.e-1
            ]
        elif mode == 'vis':
            fractions = [2.e-3, 1.e-2, 5.e-2, 2.e-1]

    fig2 = plt.figure(2)
    ax3 = fig2.add_subplot(len(fractions) + 1, 1, 1)
    for i in range(10):
        ax3.plot(ref_t, ref_rates[i, :], linewidth=0.5)

    n_repetitions = 10  # simulate multiple synapse removals
    manipulated_trajectories = []
    manipulated_behaviors = []
    for i, fraction in enumerate(fractions):
        # manipulated_nw = rn.Network(N=800, g=1.5, pc=0.1)
        # manipulated_nw = Network(N=500, g=1.2, pc=0.5)
        fraction_behaviors = []
        fraction_trajectories = []
        inputs = []
        zero_active = []
        networks = []
        # drives = []
        fraction_rates = np.zeros((n_repetitions, nw.N, int(t_max / dt + 0.5)))
        for j in range(n_repetitions):
            # manipulated_nw = rn.Network(N=800, g=1.5, pc=0.1)
            manipulated_nw = rn.Network(N=800, g=1.5, pc=1.0)
            # manipulated_nw = rn.Network(N=1000, g=1.5, pc=1.0)
            manipulated_nw.Wrec = np.copy(Wrec)
            # manipulated_nw = rn.Network(N=50, g=0.5/np.sqrt(0.2), pc=1.0)

            if manipulation == 'activation':
                nr_activate = int(manipulated_nw.N * fraction)
                activate_ids_ = rng.choice(range(manipulated_nw.N),
                                           nr_activate,
                                           replace=False)
                if 0 in activate_ids_:
                    zero_active.append(1)
                else:
                    zero_active.append(0)
                activate_ids = np.array([
                    1 if i in activate_ids_ else 0
                    for i in range(manipulated_nw.N)
                ],
                                        dtype=bool)
                # activate_ids = np.array([i % (j + 2) for i in range(manipulated_nw.N)], dtype=bool)
                manipulated_nw.Win = np.zeros(manipulated_nw.N)
                manipulated_nw.Win[activate_ids] = 0.016 * rng.rand()
                manipulated_nw.Win[~activate_ids] = 0.0
                manipulated_nw.activate_ids = activate_ids

                inputs.append(manipulated_nw.activate_ids)
                # manipulated_t, manipulated_rates = manipulated_nw.simulate_network(T=t_max, dt=dt,
                #                                                                    external_input=activation_func)
                # func = lambda t: activation(t, manipulated_nw)
                manipulated_t, manipulated_rates = manipulated_nw.simulate_network(
                    T=t_max, dt=dt, external_input=activation)
                fraction_rates[j, :, :] = manipulated_rates[:, :]
                behavior = np.dot(Wout1, manipulated_rates) * np.dot(
                    Wout2, manipulated_rates)

            # drives.append(activation)

            elif manipulation == 'inactivation':
                # for visualization, run 600 ms of regular sim, then inactivate from that state
                baseline = 600.0
                default_nw = rn.Network(N=800, g=1.5, pc=1.0)
                default_nw.Wrec = np.copy(Wrec)
                default_t, default_rates = default_nw.simulate_network(
                    T=baseline, dt=dt)
                _remove_neurons(manipulated_nw, fraction)
                new_x0 = np.arctanh(default_rates[:, -1])
                manipulated_t_, manipulated_rates = manipulated_nw.simulate_network(
                    T=t_max - baseline, dt=dt, x0=new_x0)
                manipulated_t = np.arange(0.0, t_max, dt)
                # stitch together our Frankenstein network output
                fraction_rates[j, :, :len(default_t)] = default_rates[:, :]
                fraction_rates[j, :, len(default_t):] = manipulated_rates[:, :]
                behavior = np.dot(Wout1, fraction_rates[j, :, :]) * np.dot(
                    Wout2, fraction_rates[j, :, :])

            networks.append(manipulated_nw)
            fraction_behaviors.append(behavior)
            projected_rates = rn.project_neural_trajectory(
                fraction_rates[j, :, :], ref_mean, pcs)
            fraction_trajectories.append(projected_rates)
        manipulated_behaviors.append(fraction_behaviors)
        manipulated_trajectories.append(fraction_trajectories)

        # hack: simply plot last simulation
        plot_index = 7  # 1 (then 7) best for w1 = 200 / w2 = 2 w1
        label_str = 'fraction = %.3f' % fraction
        ax1.plot(fraction_trajectories[plot_index][0, :],
                 fraction_trajectories[plot_index][1, :],
                 linewidth=0.5,
                 label=label_str)
        # ax2.plot(fraction_behaviors[-1][0], fraction_behaviors[-1][1], linewidth=0.5, label=label_str)
        ax2.plot(manipulated_t,
                 fraction_behaviors[plot_index],
                 linewidth=0.5,
                 label=label_str)
        ax3 = fig2.add_subplot(len(fractions) + 1, 1, 2 + i)
        for j in range(10):
            ax3.plot(manipulated_t,
                     fraction_rates[plot_index, j, :],
                     linewidth=0.5)

    ax1.legend()
    ax2.legend()

    # measure similarity of neural/behavioral trajectories as a function of temperature
    trajectory_similarities = []
    trajectory_errors = []
    behavior_similarities = []
    behavior_errors = []
    for i in range(len(fractions)):
        tmp_similarities1 = []
        tmp_similarities2 = []
        for j in range(n_repetitions):
            similarity1 = rn.measure_trajectory_similarity(
                ref_trajectory, manipulated_trajectories[i][j])
            similarity2 = rn.measure_trajectory_similarity(
                ref_behavior, manipulated_behaviors[i][j])
            tmp_similarities1.append(similarity1)
            tmp_similarities2.append(similarity2)
        trajectory_similarities.append(np.mean(tmp_similarities1))
        trajectory_errors.append(
            np.std(tmp_similarities1) / np.sqrt(n_repetitions))
        behavior_similarities.append(np.mean(tmp_similarities2))
        behavior_errors.append(
            np.std(tmp_similarities2) / np.sqrt(n_repetitions))
    fig4 = plt.figure(4)
    ax4 = fig4.add_subplot(1, 1, 1)
    ax4.errorbar(fractions,
                 trajectory_similarities,
                 yerr=trajectory_errors,
                 fmt='ko-',
                 label='neural activity')
    ax4.errorbar(fractions,
                 behavior_similarities,
                 yerr=behavior_errors,
                 fmt='ro-',
                 label='behavior')
    ax4.set_xlabel('Fraction of neurons inactivated')
    ax4.set_ylabel('Corr. coeff.')
    ax4.legend()

    plt.show()
Пример #3
0
def cool_hierarchical_cpgs(mode):
    """
    main function to cool single recurrent network with external top-down input.
    cool either external input or cpg itself.
    loads previously fitted output weights, computes correlation between neural trajectories
    and output before/during cooling as similarity measures.
    :return: nothing
    """

    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
    weight_suffix1 = 'outunit1_weights_force_w200_2w_input_w200.npy'
    weight_suffix2 = 'outunit2_weights_force_w200_2w_input_w200.npy'
    weight_suffix3 = 'Wrec_weights_force_w200_2w_input_w200.npy'
    Wout1 = np.load(os.path.join(out_dir, weight_suffix1))
    Wout2 = np.load(os.path.join(out_dir, weight_suffix2))
    Wrec = np.load(os.path.join(out_dir, weight_suffix3))

    # create network with same parameters
    t_max = 2000.0
    dt = 1.0
    nw = rn.Network(N=800, g=1.5, pc=1.0)
    nw.Wrec = Wrec

    w_ext = 2 * np.pi / 200.0 # control slow timescale like this maybe?
    amp_ext = 0.05
    def ext_inp(t):
        # w = 2 * np.pi / 200.0 # control slow timescale like this maybe?
        return amp_ext * np.ones(nw.N) * np.sin(w_ext * t)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t, ref_rates = nw.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    ref_mean_ = np.mean(ref_rates, axis=1)
    ref_mean = ref_mean_.transpose()
    # ref trajectory: first three PCs (sufficient?)
    pcs, ref_trajectory = rn.compute_neural_trajectory(ref_rates)
    neuron_out1 = np.dot(Wout1, ref_rates)
    neuron_out2 = np.dot(Wout2, ref_rates)
    ref_behavior = neuron_out1 * neuron_out2

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(2, 1, 1)
    ax1.plot(ref_trajectory[0, :], ref_trajectory[1, :], 'k', linewidth=0.5, label='ref')
    ax2 = fig1.add_subplot(2, 1, 2)
    ax2.plot(ref_t, ref_behavior, 'k', linewidth=0.5, label='ref')


    # run dynamics at different temperatures using some Q10 for tau
    # and compute neural/behavioral trajectories
    if mode == 'sweep':
        dT_steps = [-0.2, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0]
    elif mode == 'vis':
        dT_steps = [-1.0, -3.0, -5.0]
    # dT_steps = [-5.0, -10.0, -20.0]
    # dT_steps = [-0.5, -1.0, -2.0]
    fig2 = plt.figure(2)
    ax3 = fig2.add_subplot(len(dT_steps) + 1, 1, 1)
    for i in range(10):
        ax3.plot(ref_t, ref_rates[i, :], linewidth=0.5)
    cooled_trajectories = []
    cooled_behaviors = []
    for i, dT in enumerate(dT_steps):
        cooled_q = _q(dT)
        cooled_nw = rn.Network(N=800, g=1.5, pc=1.0)
        cooled_nw.Wrec = Wrec

        # only cool upstream network
        def ext_inp_cool(t):
            w_cool = w_ext * cooled_q
            return amp_ext * np.ones(nw.N) * np.sin(w_cool * t)

        cooled_t, cooled_rates = cooled_nw.simulate_network(T=t_max/cooled_q, dt=dt, external_input=ext_inp_cool)

        behavior = np.dot(Wout1, cooled_rates) * np.dot(Wout2, cooled_rates)
        cooled_behaviors.append(behavior)
        projected_rates = rn.project_neural_trajectory(cooled_rates, ref_mean, pcs)
        cooled_trajectories.append(projected_rates)

        label_str = 'dT = %.1f' % dT
        ax1.plot(projected_rates[0, :], projected_rates[1, :], linewidth=0.5, label=label_str)
        ax2.plot(cooled_t, behavior, linewidth=0.5, label=label_str)
        ax3 = fig2.add_subplot(len(dT_steps) + 1, 1, 2 + i)
        for j in range(10):
            ax3.plot(cooled_t, cooled_rates[j, :], linewidth=0.5)

    ax1.legend()
    ax1.set_xlabel('PC 1 (a.u.)')
    ax1.set_ylabel('PC 2 (a.u.)')
    ax2.legend()
    ax2.set_xlabel('Output (a.u.)')

    # measure similarity of neural/behavioral trajectories as a function of temperature
    trajectory_similarities = []
    behavior_similarities = []
    for i in range(len(dT_steps)):
        similarity1 = rn.measure_trajectory_similarity(ref_trajectory, cooled_trajectories[i])
        similarity2 = rn.measure_trajectory_similarity(ref_behavior, cooled_behaviors[i])
        trajectory_similarities.append(similarity1)
        behavior_similarities.append(similarity2)
    fig4 = plt.figure(4)
    ax4 = fig4.add_subplot(1, 1, 1)
    ax4.plot(dT_steps, trajectory_similarities, 'ko-', label='neural activity')
    ax4.plot(dT_steps, behavior_similarities, 'ro-', label='behavior')
    ax4.set_xlim(ax4.get_xlim()[::-1])
    ax4.set_xlabel('Temperature change')
    ax4.set_ylabel('Corr. coeff.')
    ax4.legend()

    plt.show()
Пример #4
0
def cool_cpg_lif_output(mode):
    """
    main function to cool single recurrent network with time-varying output that is mapped through LIF neuron
    onto singing mouse EMG for motor output.
    loads previously fitted output weights, computes correlation between neural trajectories
    and output before/during cooling as similarity measures.
    :return: nothing
    """

    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
    weight_suffix1 = 'outunit_weights_singing_mouse_ramp.npy'
    weight_suffix2 = 'Wrec_weights_singing_mouse_ramp.npy'
    Wout = np.load(os.path.join(out_dir, weight_suffix1))
    Wrec = np.load(os.path.join(out_dir, weight_suffix2))

    # create network with same parameters
    t_max = 2000.0
    # t_max = 1370.0
    dt = 1.0
    nw = rn.Network(N=800, g=1.5, pc=1.0)
    nw.Wrec = Wrec
    def ext_inp(t):
        return np.zeros(nw.N)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t, ref_rates = nw.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    ref_mean_ = np.mean(ref_rates, axis=1)
    ref_mean = ref_mean_.transpose()
    # ref trajectory: first three PCs (sufficient?)
    pcs, ref_trajectory = rn.compute_neural_trajectory(ref_rates)
    neuron_out_ = np.dot(Wout, ref_rates)
    # normalize?
    neuron_out = neuron_out_ / np.max(neuron_out_)
    ref_behavior, ref_spikes = _lif_behavior(neuron_out)

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(1, 2, 1)
    ax1.plot(ref_trajectory[0, :], ref_trajectory[1, :], 'k', linewidth=0.5, label='ref')
    ax2 = fig1.add_subplot(2, 2, 2)
    ax2.plot(ref_t, neuron_out, 'k', linewidth=0.5, label='ref neuron')
    ax3 = fig1.add_subplot(2, 2, 4)
    ax3.plot(ref_t, ref_behavior, 'k', linewidth=0.5, label='ref LIF')

    # run dynamics at different temperatures using some Q10 for tau
    # and compute neural/behavioral trajectories
    if mode == 'sweep':
        dT_steps = [-0.2, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0]
    elif mode == 'vis':
        dT_steps = [-1.0, -5.0, -10.0]
    fig2 = plt.figure(2)
    ax2_1 = fig2.add_subplot(len(dT_steps) + 1, 2, 1)
    ax2_2 = fig2.add_subplot(len(dT_steps) + 1, 2, 2)
    ax2_1.plot(ref_t, neuron_out, 'k', linewidth=0.5, label='ref neuron')
    ax2_2.plot(ref_t, ref_behavior, 'k', linewidth=0.5, label='ref LIF')
    ax2_1.set_xlim([0, 4000])
    ax2_2.set_xlim([0, 4000])
    fig3 = plt.figure(3)
    ax3_1 = fig3.add_subplot(1, 2, 1)
    ax3_2 = fig3.add_subplot(1, 2, 2)
    ax3_1.vlines(ref_spikes, [-0.5] * len(ref_spikes), [0.5] * len(ref_spikes), colors='k', linewidth=0.5)
    ref_dist = [len(ref_spikes[:i + 1]) for i in range(len(ref_spikes))]
    ax3_2.step(ref_spikes, ref_dist, color='k', linewidth=0.5)
    cooled_trajectories = []
    cooled_behaviors = []
    cooled_spikes = []
    for i, dT in enumerate(dT_steps):
        cooled_q = _q(dT)
        cooled_nw = rn.Network(N=800, g=1.5, pc=1.0, q=cooled_q)
        cooled_nw.Wrec = Wrec
        cooled_t, cooled_rates = cooled_nw.simulate_network(T=t_max / cooled_q, dt=dt, external_input=ext_inp)

        # behavior = np.array([np.dot(Wout1, cooled_rates), np.dot(Wout2, cooled_rates)])
        neuron_out_cooled_ = np.dot(Wout, cooled_rates)
        # normalize?
        neuron_out_cooled = neuron_out_cooled_ / np.max(neuron_out_cooled_)
        # behavior_cooled, spikes_cooled = _lif_behavior(neuron_out_cooled)
        behavior_cooled, spikes_cooled = _lif_behavior(neuron_out_cooled, cooled_q)
        cooled_behaviors.append(behavior_cooled)
        cooled_spikes.append(spikes_cooled)
        projected_rates = rn.project_neural_trajectory(cooled_rates, ref_mean, pcs)
        cooled_trajectories.append(projected_rates)

        label_str = 'dT = %.1f' % dT
        ax1.plot(projected_rates[0, :], projected_rates[1, :], linewidth=0.5, label=label_str)
        ax2.plot(cooled_t, neuron_out_cooled, linewidth=0.5, label=label_str)
        ax3.plot(cooled_t, behavior_cooled, linewidth=0.5, label=label_str)
        tmp_ax1 = fig2.add_subplot(len(dT_steps) + 1, 2, 2 * i + 3)
        tmp_ax2 = fig2.add_subplot(len(dT_steps) + 1, 2, 2 * i + 4)
        tmp_ax1.plot(cooled_t, neuron_out_cooled, linewidth=0.5, label=label_str)
        tmp_ax2.plot(cooled_t, behavior_cooled, linewidth=0.5, label=label_str)
        tmp_ax1.set_xlim([0, 4000])
        tmp_ax2.set_xlim([0, 4000])
        ax3_1.vlines(spikes_cooled, [-0.5 + i + 1] * len(spikes_cooled), [0.5 + i + 1] * len(spikes_cooled),
                     linewidth=0.5)
        cooled_dist = [len(spikes_cooled[:i + 1]) for i in range(len(spikes_cooled))]
        ax3_2.step(spikes_cooled, cooled_dist, linewidth=0.5)

    ax1.legend()
    ax1.set_xlabel('PC 1 (a.u.)')
    ax1.set_ylabel('PC 2 (a.u.)')
    ax2.legend()
    ax2.set_xlabel('Time (ms)')
    ax2.set_ylabel('Output (a.u.)')
    ax3.legend()
    ax3.set_xlabel('Time (ms)')
    ax3.set_ylabel('LIF output (mV)')
    ax3_1.set_xlabel('Time (ms)')
    ax3_2.set_xlabel('Time (ms)')
    ax3_1.set_ylabel('Spikes')
    ax3_2.set_ylabel('Steps taken')

    # let's see what changes - behavior duration (here: time between first and last spikes) vs. individual steps taken
    # (here: individual ISIs)
    durations = []
    ISIs = []
    durations.append(ref_spikes[-1] - ref_spikes[0])
    ISIs.append(np.diff(ref_spikes))
    for i in range(len(dT_steps)):
        tmp_spikes = cooled_spikes[i]
        durations.append(tmp_spikes[-1] - tmp_spikes[0])
        ISIs.append(np.diff(tmp_spikes))
    fig4 = plt.figure(4)
    ax4 = fig4.add_subplot(1, 1, 1)
    ref_ISI_mean = np.mean(ISIs[1][np.where(ISIs[1] < 200.0)])
    for i in range(len(durations)):
        # ax4.plot([durations[i]] * len(ISIs[i]), ISIs[i], 'o')
        short_ISIs = ISIs[i][np.where(ISIs[i] < 200.0)]
        ax4.errorbar(durations[i] / durations[1], np.mean(short_ISIs) / ref_ISI_mean,
                     np.std(short_ISIs) / ref_ISI_mean, fmt='o')
    ax4.set_xlabel('Behavior duration (norm.)')
    ax4.set_ylabel('ISIs (norm.)')
    ax4.set_ylim([0, 2.5])
    ax4.set_xlim([0.8, 2.0])

    plt.show()
Пример #5
0
def cool_distributed_cpgs(mode):
    """
    main function to manipulate neurons/synapses in distributed recurrent networks
    loads previously fitted output weights
    computes correlation between neural trajectories as a similarity measure
    :return: nothing
    """
    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation'
    weight_suffix1 = 'outunit1_parallel_weights_force.npy'
    weight_suffix2 = 'outunit2_parallel_weights_force.npy'
    weight_suffix3 = 'Wrec1_parallel_weights_force.npy'
    weight_suffix4 = 'Wrec2_parallel_weights_force.npy'
    Wout1 = np.load(os.path.join(out_dir, weight_suffix1))
    Wout2 = np.load(os.path.join(out_dir, weight_suffix2))
    Wrec1 = np.load(os.path.join(out_dir, weight_suffix3))
    Wrec2 = np.load(os.path.join(out_dir, weight_suffix4))

    # create network with same parameters
    t_max = 2000.0
    dt = 1.0
    nw1 = rn.Network(N=800, g=1.5, pc=1.0)
    nw1.Wrec = Wrec1
    nw2 = rn.Network(N=800, g=1.5, pc=1.0, seed=5432)
    nw2.Wrec = Wrec2
    def ext_inp(t):
        return np.zeros(nw1.N)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t1, ref_rates1 = nw1.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    ref_t2, ref_rates2 = nw2.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    neuron_out1 = np.dot(Wout1, ref_rates1)
    neuron_out2 = np.dot(Wout2, ref_rates2)
    ref_behavior = np.array([neuron_out1, neuron_out2])

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(1, 1, 1)
    ax1.plot(neuron_out1, neuron_out2, 'k', linewidth=0.5, label='ref')

    # run dynamics at different temperatures using some Q10 for tau
    # and compute neural/behavioral trajectories
    if mode == 'sweep':
        dT_steps = [-0.2, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0]
    elif mode == 'vis':
        dT_steps = [-1.0, -3.0, -5.0]
    # dT_steps = [-0.5, -1.0, -2.0]
    cooled_behaviors = []
    for i, dT in enumerate(dT_steps):
        cooled_q = _q(dT)
        cooled_nw = rn.Network(N=800, g=1.5, pc=1.0, q=cooled_q)
        cooled_nw.Wrec = Wrec1
        cooled_t, cooled_rates = cooled_nw.simulate_network(T=t_max/cooled_q, dt=dt, external_input=ext_inp)

        cooled_behavior = np.dot(Wout1, cooled_rates)
        target_length = len(ref_behavior[1])
        original_length = len(cooled_behavior)
        cooled_behavior_resampled = resample_poly(cooled_behavior, target_length, original_length)
        cooled_behaviors.append(np.array([cooled_behavior_resampled, ref_behavior[1]]))

        label_str = 'dT = %.1f' % dT
        ax1.plot(cooled_behavior_resampled, ref_behavior[1], linewidth=0.5, label=label_str)

    ax1.legend()
    ax1.set_xlabel('Output (a.u.)')

    # measure similarity of neural/behavioral trajectories as a function of temperature
    behavior_similarities = []
    for i in range(len(dT_steps)):
        similarity = rn.measure_trajectory_similarity(ref_behavior, cooled_behaviors[i])
        behavior_similarities.append(similarity)
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(1, 1, 1)
    ax2.plot(dT_steps, behavior_similarities, 'ro-', label='behavior')
    ax2.set_xlim(ax2.get_xlim()[::-1])
    ax2.set_xlabel('Temperature change')
    ax2.set_ylabel('Corr. coeff.')
    ax2.legend()

    plt.show()
Пример #6
0
def run_output_fit_force(save=False):
    """
    learn weights onto 2 output units to generate target output trajectory
    using FORCE algorithm
    :return: nothing
    """
    t_max = 2000.0
    dt = 0.1
    nw = rn.Network(N=800, g=1.5, pc=1.0)

    # def ext_inp(t):
    #     return np.zeros(nw.N)

    # figure 8
    # def behavior(t):
    #     w = 2 * np.pi / 500.0 # chaotic dynamics
    #     phi = np.pi / 100.0
    #     target_neuron1 = 0.5*np.sin(w * t + phi)
    #     target_neuron2 = 0.5*np.sin(5 * w * t + phi)
    #     # target_neuron2 = 0.5*np.cos(w * t + phi)
    #     return target_neuron1, target_neuron2

    # output for singing mouse toy model 1: ramp
    def behavior(t):
        amp_start = 0.5
        amp_stop = 0.5
        t_end = 1000.0
        w = 2 * np.pi / 500.0
        # t_plateau = 0.0
        # if t < t_plateau:
        #     return amp_start
        # return amp_start + (amp_stop - amp_start) * (t - t_plateau) / (t_end - t_plateau)
        # return amp_start + (amp_stop - amp_start) * t / t_end
        # return amp_stop + amp_start * np.exp(-1.0 * t / t_end) * np.sin(w * t)
        return amp_stop + amp_start * np.sin(w * t)

    # output for singing mouse toy model 2: step
    # TODO: for training, need to expose to periodic steps, otherwise seems to learn last constant amplitude
    # def behavior(t):
    #     t_step = 1000.0
    #     amp_start = 0.8
    #     amp_stop = 0.2
    #     return amp_start if t <= t_step else amp_stop

    # # two time scales - fast oscillations and slowly varying envelope - needs ~ 2000 neurons
    # def behavior(t):
    #     w1 = 2 * np.pi / 200.0 # fast
    #     w2 = 2 * np.pi / 2000.0 # slow
    #     offset = 1000.0
    #     amp = 0.6
    #     base = 0.2
    #     # return base + amp * np.sin(w * t) * np.exp(-(t - offset)**2 / (2 * s**2))
    #     return base + amp * np.sin(w1 * t) * np.sin(w2 * t)

    # # original FORCE paper output: sum of 4 sine waves
    # def behavior(t):
    #     w = 2 * np.pi / 120.0
    #     amp = 0.5
    #     f = amp * (np.sin(w * t) + 1 / 2.0 * np.sin(2.0 * w * t) +
    #         1 / 6.0 * np.sin(3.0 * w * t) + 1 / 3.0 * np.sin(4.0 * w * t))
    #     return f

    # two outputs
    # force_result = nw.simulate_learn_network_two_outputs(behavior, T=t_max)
    # Wout1, Wout2, Wrec_new = force_result
    #
    # nw_test = rn.Network(N=800, g=1.5, pc=1.0)
    # nw_test.Wrec = Wrec_new
    # t, rates = nw_test.simulate_network(T=t_max, dt=dt)
    #
    # neuron_out1 = np.dot(Wout1, rates)
    # neuron_out2 = np.dot(Wout2, rates)
    # out_behavior = neuron_out1 * neuron_out2 # make 1D behavior with 2 timescales
    # target_neuron1, target_neuron2 = behavior(t)
    # target_behavior = target_neuron1 * target_neuron2
    # fig2 = plt.figure(2)
    # ax2 = fig2.add_subplot(1, 1, 1)
    # # ax2.plot(neuron_out1, neuron_out2, 'r-', linewidth=1, label='learned')
    # # ax2.plot(target_neuron1, target_neuron2, 'k--', linewidth=0.5, label='target')
    # ax2.plot(t, out_behavior, 'r-', linewidth=1, label='learned')
    # ax2.plot(t, target_behavior, 'k--', linewidth=0.5, label='target')
    # ax2.legend()
    # ax2.set_xlabel('Output 1 activity')
    # ax2.set_ylabel('Output 2 activity')
    # ax2.set_title('Output')
    #
    # plt.show()
    #
    # if save:
    #     out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
    #     out_suffix1 = 'outunit1_weights_force_w500_5w.npy'
    #     out_suffix2 = 'outunit2_weights_force_w500_5w.npy'
    #     out_suffix3 = 'Wrec_weights_force_w500_5w.npy'
    #     np.save(os.path.join(out_dir, out_suffix1), Wout1)
    #     np.save(os.path.join(out_dir, out_suffix2), Wout2)
    #     np.save(os.path.join(out_dir, out_suffix3), Wrec_new)

    # one output, two time scales
    force_result = nw.simulate_learn_network(behavior, T=t_max)
    Wout, Wrec_new = force_result

    nw_test = rn.Network(N=800, g=1.5, pc=1.0)
    nw_test.Wrec = Wrec_new
    t, rates = nw_test.simulate_network(T=t_max, dt=dt)

    neuron_out = np.dot(Wout, rates)
    # def behavior_step_plot(t):
    #     t_step = 1000.0
    #     amp_start = 0.8
    #     amp_stop = 0.2
    #     amp_array = np.zeros(t.shape)
    #     amp_array[np.where(t <= t_step)] = 0.8
    #     amp_array[np.where(t > t_step)] = 0.2
    #     return amp_array
    # def behavior_step_plot(t):
    #     t_step = 500.0
    #     amp_start = 0.8
    #     amp_stop = 0.2
    #     amp_array = np.zeros(t.shape)
    #     amp_array[np.where(t <= t_step)] = 0.8
    #     amp_array[np.where(t > t_step)] = 0.2
    #     return amp_array
    # target_neuron = behavior_step_plot(t)
    target_neuron = behavior(t)
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(1, 1, 1)
    ax2.plot(t, neuron_out, 'r-', linewidth=1, label='learned')
    ax2.plot(t, target_neuron, 'k--', linewidth=0.5, label='target')
    ax2.legend()
    ax2.set_xlabel('Time (ms)')
    ax2.set_ylabel('Output activity (a.u.)')

    plt.show()

    if save:
        out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
        out_suffix1 = 'outunit_weights_singing_mouse_ramp.npy'
        out_suffix2 = 'Wrec_weights_singing_mouse_ramp.npy'
        np.save(os.path.join(out_dir, out_suffix1), Wout)
        np.save(os.path.join(out_dir, out_suffix2), Wrec_new)
Пример #7
0
def run_output_fit(save=False):
    """
    fit weights onto 2 output units to generate target output trajectory
    :return: nothing
    """

    t_max = 2000.0
    dt = 1.0
    # nw = rn.Network(N=800, g=1.5, pc=0.1)
    # nw = Network(N=500, g=1.2, pc=0.5)
    nw = rn.Network(N=50, g=0.5 / np.sqrt(0.2), pc=1.0)

    # def ext_inp(t):
    #     return 1.0 * np.sin(2 * np.pi / 220.0 * t + 0.1) * np.ones(nw.N)
    # Laje and Buonomano: 50 ms step
    # def ext_inp(t):
    #     if 0.0 <= t <= 50.0:
    #         return 5.0 * np.ones(nw.N)
    #     else:
    #         return np.zeros(nw.N)
    def ext_inp(t):
        return np.zeros(nw.N)

    t, rates = nw.simulate_network(T=t_max, dt=dt, external_input=ext_inp)

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(2, 1, 1)
    # for i in range(nw.N):
    for i in range(10):
        ax1.plot(t, rates[i, :], linewidth=0.5)
    ax1.set_xlabel('Time (ms)')
    ax1.set_title('Network activity')

    w = 2 * np.pi / 215.0  # CPG dynamics
    # w = 2 * np.pi / 2000.0 # chaotic dynamics
    phi = np.pi / 100.0
    target_neuron1 = 0.5 * np.sin(w * t + phi)
    # target_neuron2 = 0.5*np.sin(2 * w * t + phi)
    target_neuron2 = 0.5 * np.cos(w * t + phi)

    # target(t) =  y(t) * Wout
    # target: 1 x M; y: N x M; Wout: 1 x N; N << M
    tmp_solution = lstsq(rates.transpose(), target_neuron1, cond=1.0e-4)
    Wout1 = tmp_solution[0]
    tmp_solution = lstsq(rates.transpose(), target_neuron2, cond=1.0e-4)
    Wout2 = tmp_solution[0]

    neuron_out1 = np.dot(Wout1, rates)
    neuron_out2 = np.dot(Wout2, rates)
    # fig2 = plt.figure(2)
    ax2 = fig1.add_subplot(2, 1, 2)
    ax2.plot(neuron_out1, neuron_out2, 'r-', linewidth=1, label='fit')
    ax2.plot(target_neuron1,
             target_neuron2,
             'k--',
             linewidth=0.5,
             label='target')
    ax2.legend()
    ax2.set_xlabel('Output 1 activity')
    ax2.set_ylabel('Output 2 activity')
    ax2.set_title('Output')

    plt.show()

    if save:
        out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation'
        out_suffix1 = 'outunit1_weights_circle.npy'
        out_suffix2 = 'outunit2_weights_circle.npy'
        np.save(os.path.join(out_dir, out_suffix1), Wout1)
        np.save(os.path.join(out_dir, out_suffix2), Wout2)
Пример #8
0
def run_output_fit_force_parallel_networks(save=False):
    """
    learn weights from 2 recurrent networks onto 2 output units
    (i.e., independent: 1 network controls 1 output) to generate
    target output trajectory using FORCE algorithm
    :return: nothing
    """
    t_max = 4000.0
    dt = 0.1
    nw1 = rn.Network(N=800, g=1.5, pc=1.0)
    nw2 = rn.Network(
        N=800, g=1.5, pc=1.0,
        seed=5432)  # let's start in different initial conditions...

    # def ext_inp(t):
    #     return np.zeros(nw.N)

    w = 2 * np.pi / 200.0  # chaotic dynamics
    phi = np.pi / 100.0

    def behavior1(t):
        target_neuron1 = 0.3 * np.sin(w * t + phi) + 0.5
        return target_neuron1

    def behavior2(t):
        target_neuron2 = 0.3 * np.sin(2 * w * t + phi) + 0.5
        return target_neuron2

    force_result1 = nw1.simulate_learn_network(behavior1, T=t_max)
    Wout1, Wrec1_new = force_result1
    force_result2 = nw2.simulate_learn_network(behavior2, T=t_max)
    Wout2, Wrec2_new = force_result2

    nw1_test = rn.Network(N=800, g=1.5, pc=1.0)
    nw1_test.Wrec = Wrec1_new
    t1, rates1 = nw1_test.simulate_network(T=t_max, dt=dt)
    nw2_test = rn.Network(N=800, g=1.5, pc=1.0, seed=5432)
    nw2_test.Wrec = Wrec2_new
    t2, rates2 = nw2_test.simulate_network(T=t_max, dt=dt)

    neuron_out1 = np.dot(Wout1, rates1)
    neuron_out2 = np.dot(Wout2, rates2)
    target_neuron1, target_neuron2 = behavior1(t1), behavior2(t2)
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(1, 1, 1)
    ax2.plot(neuron_out1, neuron_out2, 'r-', linewidth=1, label='learned')
    ax2.plot(target_neuron1,
             target_neuron2,
             'k--',
             linewidth=0.5,
             label='target')
    ax2.legend()
    ax2.set_xlabel('Output 1 activity')
    ax2.set_ylabel('Output 2 activity')
    ax2.set_title('Output')

    plt.show()

    if save:
        out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation'
        out_suffix1 = 'outunit1_parallel_weights_force.npy'
        out_suffix2 = 'outunit2_parallel_weights_force.npy'
        out_suffix3 = 'Wrec1_parallel_weights_force.npy'
        out_suffix4 = 'Wrec2_parallel_weights_force.npy'
        np.save(os.path.join(out_dir, out_suffix1), Wout1)
        np.save(os.path.join(out_dir, out_suffix2), Wout2)
        np.save(os.path.join(out_dir, out_suffix3), Wrec1_new)
        np.save(os.path.join(out_dir, out_suffix4), Wrec2_new)
Пример #9
0
def run_output_fit_force_hierarchy(save=False):
    """
    learn weights onto 2 output units to generate target output trajectory
    using FORCE algorithm.
    this time, we have external drive (sinusoid? ramp?) that can be controlled (i.e., cooled)
    independently and should only affect one of the two timescales.
    :return: nothing
    """
    # try the following: train network on two timescales of external input
    # and see if cooling can interpolate between these
    t_max = 4000.0
    dt = 0.1
    nw = rn.Network(N=800, g=1.5, pc=1.0)

    w_ext = 2 * np.pi / 200.0  # control slow timescale like this maybe?
    amp_ext = 0.05

    def ext_inp(t):
        # w = 2 * np.pi / 200.0 # control slow timescale like this maybe?
        if t < 0.5 * t_max:
            return amp_ext * np.ones(nw.N) * np.sin(w_ext * t)
        else:
            return amp_ext * np.ones(nw.N) * np.sin(0.5 * w_ext * t)

    # figure 8
    def behavior(t):
        w = 2 * np.pi / 200.0  # chaotic dynamics
        phi = np.pi / 100.0
        if t < 0.5 * t_max:
            target_neuron1 = 0.5 * np.sin(w * t + phi)
        else:
            target_neuron1 = 0.5 * np.sin(0.5 * w * t + phi)
        target_neuron2 = 0.5 * np.sin(2 * w * t + phi)
        # target_neuron2 = 0.5*np.cos(w * t + phi)
        return target_neuron1, target_neuron2

    # two outputs
    force_result = nw.simulate_learn_network_two_outputs(
        behavior, T=t_max, external_input=ext_inp)
    Wout1, Wout2, Wrec_new = force_result

    nw_test = rn.Network(N=800, g=1.5, pc=1.0)
    nw_test.Wrec = Wrec_new
    t, rates = nw_test.simulate_network(T=t_max, dt=dt, external_input=ext_inp)

    neuron_out1 = np.dot(Wout1, rates)
    neuron_out2 = np.dot(Wout2, rates)
    out_behavior = neuron_out1 * neuron_out2  # make 1D behavior with 2 timescales

    def plot_behavior(t):
        w = 2 * np.pi / 200.0  # chaotic dynamics
        phi = np.pi / 100.0
        tmp1 = np.zeros(len(t))
        t1 = t < 0.5 * t_max
        t2 = t >= 0.5 * t_max
        tmp1[t1] = 0.5 * np.sin(w * t[t1] + phi)
        tmp1[t2] = 0.5 * np.sin(0.5 * w * t[t2] + phi)
        tmp2 = 0.5 * np.sin(2 * w * t + phi)
        return tmp1 * tmp2

    target_behavior = plot_behavior(t)
    fig2 = plt.figure(2)
    ax1 = fig2.add_subplot(2, 1, 1)
    ax1.set_xlabel('Time')
    ax1.set_ylabel('Input activity')
    ax1.set_title('Input')

    def plot_input(t):
        out = np.zeros(len(t))
        t1 = t < 0.5 * t_max
        t2 = t >= 0.5 * t_max
        out[t1] = amp_ext * np.sin(w_ext * t[t1])
        out[t2] = amp_ext * np.sin(0.5 * w_ext * t[t2])
        return out

    ax1.plot(t, plot_input(t), 'k-', linewidth=0.5)
    ax2 = fig2.add_subplot(2, 1, 2, sharex=ax1)
    # ax2.plot(neuron_out1, neuron_out2, 'r-', linewidth=1, label='learned')
    # ax2.plot(target_neuron1, target_neuron2, 'k--', linewidth=0.5, label='target')
    ax2.plot(t, out_behavior, 'r-', linewidth=1, label='learned')
    ax2.plot(t, target_behavior, 'k--', linewidth=0.5, label='target')
    ax2.legend()
    ax2.set_xlabel('Time')
    ax2.set_ylabel('Output activity')
    ax2.set_title('Output')

    plt.show()

    if save:
        out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation/weights'
        out_suffix1 = 'outunit1_weights_force_w200_2w_input_w200.npy'
        out_suffix2 = 'outunit2_weights_force_w200_2w_input_w200.npy'
        out_suffix3 = 'Wrec_weights_force_w200_2w_input_w200.npy'
        np.save(os.path.join(out_dir, out_suffix1), Wout1)
        np.save(os.path.join(out_dir, out_suffix2), Wout2)
        np.save(os.path.join(out_dir, out_suffix3), Wrec_new)