Example #1
0
def train_readout(SORN,
                  steps_train,
                  steps_test,
                  source,
                  display=True,
                  stdp_off=True,
                  storage_manager=None,
                  same_timestep_without_feedback_loop=False):

    if display:
        print("\nRecord predictions...")

    if stdp_off:
        SORN.deactivate_mechanisms('STDP')

    for ng in SORN['prediction_source']:
        SORN.add_behaviours_to_neuron_group(
            {100: Recorder(['n.output'], tag='prediction_rec')}, ng)
    for ng in SORN['text_input_group']:
        SORN.add_behaviours_to_neuron_group(
            {101: Recorder(['n.pattern_index'], tag='index_rec')}, ng)

    SORN.simulate_iterations(int(steps_train + steps_test),
                             100,
                             measure_block_time=display)

    if same_timestep_without_feedback_loop:
        readout_layer,X_train, Y_train, X_test, Y_test = \
        train_same_step_music(SORN['prediction_rec'], 'n.output', SORN['index_rec', 0], 'n.pattern_index', 0, steps_train, steps_test)  # train
    else:
        readout_layer,X_train, Y_train, X_test, Y_test = \
        train_music(SORN['prediction_rec'], 'n.output', SORN['index_rec', 0], 'n.pattern_index', 0, steps_train, steps_test, source, lag=1)  # steps_plastic, steps_plastic + steps_readout

    SORN.clear_recorder()
    SORN.recording_off()

    #if stdp_off:
    #    SORN.activate_mechanisms('STDP')
    if display:
        print('\nTrain readout')

    SORN.clear_recorder(['prediction_rec', 'index_rec'])
    SORN.deactivate_mechanisms(['prediction_rec', 'index_rec'])

    return readout_layer, X_train, Y_train, X_test, Y_test
Example #2
0
def train_and_generate_text(SORN,
                            steps_plastic,
                            steps_train,
                            steps_spont,
                            steps_recovery=0,
                            display=True,
                            stdp_off=True,
                            storage_manager=None,
                            same_timestep_without_feedback_loop=False,
                            return_key='total_score'):
    #exc_neuron_tag, output_recorder_tag, input_recorder_tag
    #'main_exc_group', 'exc_out_rec', 'inp_rec'

    #SORN.clear_recorder()
    if steps_plastic > 0:
        SORN.simulate_iterations(
            steps_plastic, 100,
            measure_block_time=display)  #, disable_recording=True

    #for i, syn in enumerate(SORN['syn']):
    #    np.save('Data/E{}.npy'.format(i), syn.enabled)
    #    np.save('Data/W{}.npy'.format(i), syn.W)

    if stdp_off:
        SORN.deactivate_mechanisms('STDP')

    for ng in SORN['prediction_source']:
        SORN.add_behaviours_to_neuron_group(
            {100: Recorder(['n.output'], tag='prediction_rec')}, ng)
    for ng in SORN['text_input_group']:
        SORN.add_behaviours_to_neuron_group(
            {101: Recorder(['n.pattern_index'], tag='index_rec')}, ng)

    SORN.simulate_iterations(steps_train, 100, measure_block_time=display)

    print('test2')

    if same_timestep_without_feedback_loop:
        readout_layer = train_same_step(SORN['prediction_rec'], 'n.output',
                                        SORN['index_rec',
                                             0], 'n.pattern_index', 0,
                                        steps_train)  # train
    else:
        readout_layer = train(
            SORN['prediction_rec'],
            'n.output',
            SORN['index_rec', 0],
            'n.pattern_index',
            0,
            steps_train,
            lag=1)  # steps_plastic, steps_plastic + steps_readout

    #act = SORN['prediction_rec', 0]['n.output', 0, 'np']
    #transition_score = np.mean(np.abs(act[0:-2]-act[1:-1])/np.mean(act))

    ###########################################################
    ###########################################################
    ###########################################################

    additional_info = False

    if additional_info:
        act = SORN['prediction_rec', 0]['n.output', 0, 'np']

        mean_act_1 = np.mean(act, axis=1)

        print(mean_act_1.shape)

        group_mr_A = MR_estimation([mean_act_1], 1, 150)

        #storage_manager.save_np('act_inh', SORN['prediction_rec', 1]['n.output', 0, 'np'])

        storage_manager.save_np('r_k', group_mr_A['r_k'])
        storage_manager.save_param('branching_ratio',
                                   group_mr_A['branching_ratio'])

        singluar_values, components, explained_variance, explained_variance_ratio = get_activity_singular_values_and_components(
            act)

        storage_manager.save_np('singluar_values', singluar_values)
        storage_manager.save_np('components', components)
        storage_manager.save_np('explained_variance', explained_variance)
        storage_manager.save_np('explained_variance_ratio',
                                explained_variance_ratio)

        #print(singluar_values)
        #print(components.shape, components)

    ###########################################################
    ###########################################################
    ###########################################################

    #SORN.recording_off()

    if same_timestep_without_feedback_loop:
        SORN['grammar_act', 0].active = False
        if steps_recovery > 0:
            SORN.simulate_iterations(steps_recovery,
                                     100,
                                     measure_block_time=display,
                                     disable_recording=True)
        spont_output = get_simu_sequence(SORN,
                                         SORN['prediction_source'],
                                         'n.output',
                                         readout_classifyer=readout_layer,
                                         seq_length=steps_spont,
                                         source=SORN['grammar_act',
                                                     0])  #output generation
    else:
        spont_output = predict_sequence(readout_layer,
                                        SORN['prediction_source'],
                                        'n.output',
                                        steps_spont,
                                        SORN,
                                        SORN['grammar_act', 0],
                                        lag=1)

    SORN['grammar_act', 0].active = True

    if additional_info:
        mean_act = np.mean(SORN['prediction_rec', 0]['n.output', 0, 'np'],
                           axis=1)
        storage_manager.save_np('act_exc', mean_act)
    #SORN.clear_recorder(['prediction_rec', 'index_rec'])
    #SORN.deactivate_mechanisms(['prediction_rec', 'index_rec'])
    SORN.remove_behaviours_from_neuron_groups(SORN['prediction_source'],
                                              tags=['prediction_rec'])
    SORN.remove_behaviours_from_neuron_groups(SORN['text_input_group'],
                                              tags=['index_rec'])

    #if display:
    print(spont_output)
    SORN.recording_on()

    if stdp_off:
        SORN.activate_mechanisms('STDP')

    score_dict = SORN['grammar_act', 0].get_text_score(spont_output)

    if storage_manager is not None:
        storage_manager.save_param_dict(score_dict)

    result = score_dict[return_key]  #-transition_score/10

    if result < 0:
        return 0
    else:
        return result


#act = np.random.rand(10000)+1
#transition_error = np.mean(np.abs(act[0:-2]-act[1:-1])/np.mean(act))
#print(transition_error)
Example #3
0
def get_score_spontaneous_music(SORN,
                                source,
                                readout_layer,
                                steps_spont,
                                split_tracks=False,
                                seen=None,
                                steps_recovery=0,
                                display=True,
                                stdp_off=True,
                                storage_manager=None,
                                same_timestep_without_feedback_loop=False,
                                create_MIDI=False):
    #exc_neuron_tag, output_recorder_tag, input_recorder_tag
    #'main_exc_group', 'exc_out_rec', 'inp_rec'

    if display:
        print('\nGenerate spontaneous output...')

    if stdp_off:
        SORN.deactivate_mechanisms('STDP')

    for ng in SORN['prediction_source']:
        SORN.add_behaviours_to_neuron_group(
            {100: Recorder(['n.output'], tag='prediction_rec')}, ng)
    for ng in SORN['text_input_group']:
        SORN.add_behaviours_to_neuron_group(
            {101: Recorder(['n.pattern_index'], tag='index_rec')}, ng)

    SORN.clear_recorder()
    SORN.recording_on()

    if same_timestep_without_feedback_loop:
        source.behaviour_enabled = False
        if steps_recovery > 0:
            SORN.simulate_iterations(steps_recovery,
                                     100,
                                     measure_block_time=display,
                                     disable_recording=True)
        spont_output, pianoroll = get_simu_music_sequence(
            SORN,
            SORN['prediction_source'],
            'n.output',
            readout_classifyer=readout_layer,
            seq_length=steps_spont,
            source=source)  #output generation
    else:
        spont_output, pianoroll = predict_music_sequence(
            readout_layer,
            SORN['prediction_source'],
            'n.output',
            steps_spont,
            SORN,
            source,
            lag=1)

    source.behaviour_enabled = True

    if create_MIDI and source.is_drum:  # create a percussion track!
        # in this case pianoroll is a sequence of vectors of length alphabet, each letter in the alphabet stands for one instrument

        if split_tracks == False and source.offtoken == False:  # create one long track
            instruments_non_zero = np.nonzero(pianoroll)
            instruments_non_zero = list(set(
                instruments_non_zero[1]))  # for them we have to create tracks

            tracks = []

            for i in range(len(instruments_non_zero)):
                track = np.zeros((len(pianoroll), 128))
                track[:, source.alphabet[instruments_non_zero[
                    i]]] = pianoroll[:, instruments_non_zero[i]]
                track = piano.Track(track)
                #track.program = source.alphabet[instruments_non_zero[i]]
                track.binarize()
                track.beat_resolution = 4
                track.is_drum = True
                tracks.append(track)

            multitrack = piano.Multitrack(tracks=tracks, beat_resolution=4)

            if storage_manager is not None:
                path = storage_manager.absolute_path
                multitrack.write(path + 'sample.mid')
            else:
                multitrack.write('sample.mid')
                print(
                    'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
                )

        else:  # create n tracks of length of input tracks (or diverse length if stop token is active)

            if source.offtoken and not source.ontoken:
                stop_tokens = np.nonzero(
                    pianoroll[:, -1])[0]  # time steps when track is finished
                if stop_tokens.size == 0:  # if it never predicted a stop token
                    start_tokens = [len(pianoroll)
                                    ]  # we just generate one long track
                else:
                    start_tokens = stop_tokens + 1  # so that indexing works

                n_tracks = int(len(start_tokens)) - 1
                #start_tokens = np.insert(start_tokens, 0, 0) # add first start token
                # note that we do not generate a track from the time steps potentially generated after the last stop token and before first stop token

            elif source.ontoken and not source.offtoken:
                start_tokens = np.nonzero(
                    pianoroll[:, -1])[0]  # time steps when track starts
                n_tracks = int(len(start_tokens)) - 1

            elif source.ontoken and source.offtoken:
                n_tracks = 0
                start_tokens = []
                stop_tokens = []
                start = False
                stop = False
                for i in range(len(pianoroll)):
                    if pianoroll[i, -1]:  # we have a start token
                        start = i  # we also overwrite start token if two appear without a stop token in between
                        stop = False
                    if pianoroll[i, -2]:  # we have a stop token
                        stop = i

                    if stop and not start:
                        stop = False

                    if start and stop and stop > start:
                        start_tokens.append(start)
                        stop_tokens.append(stop)
                        n_tracks += 1
                        start = False
                        stop = False

                    if start and stop and stop <= start:
                        start = False
                        stop = False
                #print(start_tokens)
                #print(stop_tokens)
                # we ignore parts when two stop tokens or two start tokens occur after another

            else:  # else we split the generated output after 32 time steps each (length of one track in the corpus)
                len_track = len(source.corpus_blocks[0])
                n_tracks = int(len(pianoroll) / len_track)

            for j in range(n_tracks):
                if source.offtoken and source.ontoken:
                    curr_pianoroll = pianoroll[
                        start_tokens[j]:stop_tokens[j], :int(
                            source.A -
                            2)]  # ignore last two tokens in alphabet
                elif source.offtoken or source.ontoken:
                    curr_pianoroll = pianoroll[start_tokens[j]:start_tokens[
                        j + 1], :int(
                            source.A -
                            1)]  # ignore last token in alphabet (stop token)
                else:
                    curr_pianoroll = pianoroll[j * len_track:(j * len_track) +
                                               len_track, :]
                if np.any(curr_pianoroll
                          ):  # only proceed if it would not be all silence
                    instruments_non_zero = np.nonzero(curr_pianoroll)
                    instruments_non_zero = list(
                        set(instruments_non_zero[1]
                            ))  # for them we have to create tracks

                    tracks = []
                    for i in range(len(instruments_non_zero)):
                        track = np.zeros((len(curr_pianoroll), 128))
                        track[:, source.alphabet[
                            instruments_non_zero[i]]] = curr_pianoroll[:, i]
                        track = piano.Track(track)
                        track.program = source.alphabet[
                            instruments_non_zero[i]]
                        track.binarize()
                        track.beat_resolution = 4
                        track.is_drum = True
                        tracks.append(track)

                    multitrack = piano.Multitrack(tracks=tracks,
                                                  beat_resolution=4)
                    if storage_manager is not None:
                        path = storage_manager.absolute_path
                        multitrack.write(path + 'sample{}.mid'.format(j + 1))
                    else:
                        multitrack.write('sample{}.mid'.format(j + 1))
                        print(
                            'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
                        )

    elif create_MIDI:  # we create just one MIDI track of one instrument (if we have a MusicActivator)
        track = piano.Track(pianoroll)
        track.program = source.instrument
        track.binarize()
        track.beat_resolution = source.beat_resolution
        track = piano.Multitrack(tracks=[track],
                                 beat_resolution=source.beat_resolution)
        if storage_manager is not None:
            path = storage_manager.absolute_path
            track.write(path + 'sample.mid')
        else:
            track.write('sample.mid')
            print(
                'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
            )

    #if display:
    print(spont_output)
    SORN.recording_on()

    if stdp_off:
        SORN.activate_mechanisms('STDP')

    score_dict = source.get_music_score(spont_output, pianoroll)
    #print(score_dict)
    if storage_manager is not None:
        storage_manager.save_param_dict(score_dict)

    SORN.clear_recorder(['prediction_rec', 'index_rec'])
    SORN.deactivate_mechanisms(['prediction_rec', 'index_rec'])

    return score_dict
    def start_rec(self, group):

        #if self.current_ng is not None and len(self.current_ng[self.rec_name]) > 0 and self.Network_UI.neuron_select_id != self.neuron_index:
        #    self.Network_UI.network.remove_behaviours_from_object(self.current_ng, tags=[self.rec_name])
        #    self.my_syn_rec = None
        #    print('stop')

        if self.current_ng is not None and (
                self.neuron_index != self.Network_UI.neuron_select_id
                or self.current_ng != group):

            for s in self.current_ng.afferent_synapses["All"]:
                s.UI_recorder = None
                s.cb = None
                self.Network_UI.network.remove_behaviours_from_object(
                    s, tags=[self.key])

            self.listWidget.clear()
            self.plot.clear()

            self.current_ng = None
            self.neuron_index = -1
            print('removed')

        if self.current_ng is None:  #len(self.Network_UI.network[self.key]) == 0:

            self.current_ng = group
            self.neuron_index = self.Network_UI.neuron_select_id

            # self.key = 'np.concatenate([s.W[:,np.where(s.dst.id=='+str(self.neuron_index)+')[0]][s.enabled[:,np.where(s.dst.id=='+str(self.neuron_index)+')[0]]] for s in n.afferent_synapses["All"]])'
            # self.my_syn_rec = Recorder([self.key], tag=self.rec_name)

            #if self.my_syn_rec is not None:
            #    self.Network_UI.network.remove_behaviours_from_neuron_groups([self.current_ng], tags=[self.rec_name])
            #        print('stop')

            print('started')
            self.plot.clear()

            self.key = 's.' + self.weight_attr + '[' + str(
                self.neuron_index) + ']'

            self.checkboxes = {}
            self.listWidget.clear()

            for s in group.afferent_synapses["All"]:
                s.UI_recorder = Recorder([self.key, 's.iteration'],
                                         tag=self.rec_name,
                                         gapwidth=10,
                                         save_as_numpy=True)
                self.Network_UI.network.add_behaviours_to_object(
                    {10001: s.UI_recorder}, s)

                s.UI_curves = []
                syn_data = eval(self.key)

                s.cb = QListWidgetItem(str(s.tags[0]))
                s.cb.setCheckState(2)
                self.listWidget.addItem(s.cb)

                for i in range(len(syn_data)):
                    if (type(s.enabled) is bool
                            and s.enabled) or s.enabled[self.neuron_index, i]:
                        color = (s.src.color[0], s.src.color[1],
                                 s.src.color[2], 255)
                        curve = pg.PlotCurveItem(
                            [], pen=color)  # pen=colors[i%len(colors)]
                        curve.index = i
                        self.plot.addItem(curve)
                        s.UI_curves.append(curve)

                #self.curves[k] = []
                #self.keys.append(k)

                #self.checkboxes[k] = QListWidgetItem(str(s.src.tags[0]))
                #self.checkboxes[k].setCheckState(2)
                #self.listWidget.addItem(self.checkboxes[k])

                #for i in range(syn_count):
                '''
Example #5
0
            def train_click(event):

                item = pg.InfiniteLine(pos=Network_UI.network.iteration,
                                       movable=False,
                                       angle=90)
                self.wnr_plot.addItem(item)

                #Network_UI.network.deactivate_mechanisms('STDP')

                #Network_UI.network.recording_off()

                Network_UI.network.add_behaviours_to_neuron_groups(
                    {100: Recorder(['n.output'], tag='pediction_rec')},
                    Network_UI.network['prediction_source'])
                Network_UI.network.add_behaviours_to_neuron_groups(
                    {101: Recorder(['n.pattern_index'], tag='index_rec')},
                    Network_UI.network['text_input_group'])

                #for ng in Network_UI.network['prediction_source']:
                #    Network_UI.network.add_behaviours_to_neuron_group({100: Recorder(['n.output'], tag='pediction_rec')}, ng)
                #for ng in Network_UI.network['text_input_group']:
                #    Network_UI.network.add_behaviours_to_neuron_group({101: Recorder(['n.pattern_index'], tag='index_rec')}, ng)

                Network_UI.network['grammar_act', 0].active = True

                steps = 5000
                Network_UI.network.simulate_iterations(steps,
                                                       100,
                                                       measure_block_time=True)

                if self.next_p:
                    self.readout = train(Network_UI.network['pediction_rec'],
                                         'n.output',
                                         Network_UI.network['index_rec', 0],
                                         'n.pattern_index',
                                         0,
                                         steps,
                                         lag=1)

                if self.simu_p:
                    self.readout_simu = train_same_step(
                        Network_UI.network['pediction_rec'], 'n.output',
                        Network_UI.network['index_rec',
                                           0], 'n.pattern_index', 0, steps)

                Network_UI.network.remove_behaviours_from_neuron_groups(
                    Network_UI.network['prediction_source'],
                    tags=['pediction_rec'])
                Network_UI.network.remove_behaviours_from_neuron_groups(
                    Network_UI.network['text_input_group'], tags=['index_rec'])

                #Network_UI.network.clear_recorder(['pediction_rec', 'index_rec'])
                #Network_UI.network.deactivate_mechanisms(['pediction_rec', 'index_rec'])

                #Network_UI.network.recording_on()

                #Network_UI.network.activate_mechanisms('STDP')

                self.input_select_box.setCurrentIndex(1)

                print('training_finished')

                item = pg.InfiniteLine(pos=Network_UI.network.iteration,
                                       movable=False,
                                       angle=90)
                self.wnr_plot.addItem(item)

                item = pg.InfiniteLine(pos=Network_UI.network.iteration + 1000,
                                       movable=False,
                                       angle=90)
                self.wnr_plot.addItem(item)