def amp_sensitivity( ode_func, params, j, percent,
                        y_init, t0, dt, tf, mid=True):
    params_star = copy.copy(params)
    params_star[j] = (1+percent)*params[j]
    t, sol = ode15s(ode_func, y_init, t0, dt, tf, params)
    t_star, sol_star = ode15s(ode_func, y_init, t0, dt, tf, params_star)
    if(mid==True):
        mid = int(len(t)/2)
        amp = get_amps( sol[mid:] )
        amp_star = get_amps( sol_star[mid:] )
    else:
        amp = get_amps( y )
        amp_star = get_amps( y_star )
    return (np.sum(amp_star)-np.sum(amp))/percent
Exemple #2
0
def goldbeter_fly_cost_function(params):
    # initial conditions
    M = 1
    P0 = 1
    P1 = 1
    P2 = 1
    PN = 1

    yinit = [M, P0, P1, P2, PN]

    t0 = 0
    tf = 800
    dt = .1
    RelTol = 1e-8
    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            t, sol = ode15s(goldbeter_fly,
                            yinit,
                            t0,
                            dt,
                            tf,
                            params,
                            rtol=RelTol)
        except (ValueError, UserWarning) as e:
            cost = math.inf
            return cost
    mid = int(len(t) / 2)

    desired_per = 23.6
    desired_amp = .1
    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            per = get_period(t[mid:], sol[mid:, 1])
            amps = get_amps(sol[mid:])
        except RuntimeWarning as e:
            # something went wrong, most likely no oscillation was created
            cost = math.inf
            return cost

    rate = math.log(.001) / .1
    amperrvals = np.exp(np.multiply(amps, rate))
    amperrval = np.sum(amperrvals)

    perrval = math.pow(((per - desired_per) / desired_per), 2)

    return (perrval + amperrval)
Exemple #3
0
def gonzeGoodwinFullCircadianError2(params):
    M0 = 1
    P0 = 1
    I0 = 1

    yinit = [M0, P0, I0]

    t0 = 0
    tf = 800
    dt = .1

    # Simulate the model
    RelTol = 1e-8
    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            t, sol = ode15s(gonze_goodwin,
                            yinit,
                            t0,
                            dt,
                            tf,
                            params,
                            rtol=RelTol)
        except (ValueError, UserWarning) as e:
            cost = math.inf
            return cost
    mid = int(len(t) / 2)

    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            per = get_period(t[mid:], sol[mid:, 1])
            amps = get_amps(sol[mid:])
        except RuntimeWarning as e:
            # something went wrong, most likely no oscillation was created
            cost = math.inf
            return cost

    perrval = math.pow((per - 24) / 24, 2)

    # an amplitude larger than 0.1 is going to have low cost
    rate = math.log(0.001) / 0.1
    amp_errvals = np.exp(np.multiply(rate, amps))

    errval = perrval + np.sum(amp_errvals)

    return errval
Exemple #4
0
def new_learn_audio(host, debug=False):
    context = zmq.Context()

    mic = context.socket(zmq.SUB)
    mic.connect('tcp://{}:{}'.format(host, IO.MIC))
    mic.setsockopt(zmq.SUBSCRIBE, b'')

    dreamQ = context.socket(zmq.PUSH)
    dreamQ.connect('tcp://{}:{}'.format(host, IO.DREAM))


    stateQ, eventQ, brainQ = _three_amigos(context, host)

    sender = context.socket(zmq.PUSH)
    sender.connect('tcp://{}:{}'.format(host, IO.EXTERNAL))

    counterQ = context.socket(zmq.REQ)
    counterQ.connect('tcp://{}:{}'.format(host, IO.COUNTER))
    
    poller = zmq.Poller()
    poller.register(mic, zmq.POLLIN)
    poller.register(stateQ, zmq.POLLIN)
    poller.register(eventQ, zmq.POLLIN)

    audio = deque()
    NAPs = []
    wavs = []
    wav_audio_ids = {}
    NAP_hashes = {}

    audio_classifier = []
    audio_recognizer = []
    global_audio_recognizer = []
    mixture_audio_recognizer = []
    maxlen = []

    deleted_ids = []
    
    state = stateQ.recv_json()
    
    black_list = open('black_list.txt', 'a')

    audio_memory = AudioMemory()
    
    if debug:
        import matplotlib.pyplot as plt
        plt.ion()

    while True:
        events = dict(poller.poll())
        
        if stateQ in events:
            state = stateQ.recv_json()

        if mic in events:
            new_audio = utils.recv_array(mic)
            if state['record']:
                audio.append(new_audio)

        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'learn' in pushbutton:
                try:
                    t0 = time.time()
                    filename = pushbutton['filename']
                    audio_segments = utils.get_segments(filename)

                    print 'Learning {} duration {} seconds with {} segments'.format(filename, audio_segments[-1], len(audio_segments)-1)
                    new_sentence = utils.csv_to_array(filename + 'cochlear')
                    norm_segments = np.rint(new_sentence.shape[0]*audio_segments/audio_segments[-1]).astype('int')

                    audio_ids = []
                    new_audio_hash = []
                    amps = utils.get_amps(filename)
                    most_significant_value = -np.inf
                    most_significant_audio_id = []

                    original_NAP_length = len(NAPs)
                    
                    for segment, new_sound in enumerate([ utils.trim_right(new_sentence[norm_segments[i]:norm_segments[i+1]]) for i in range(len(norm_segments)-1) ]):
                        # We filter out short, abrupt sounds with lots of noise.
                        if np.mean(new_sound) < 2 or new_sound.shape[0] == 0:
                          black_list.write('{} {}\n'.format(filename, segment))
                          print 'BLACKLISTED segment {} in file {}'.format(segment, filename)
                          continue

                        if debug:
                            utils.plot_NAP_and_energy(new_sound, plt)

                        audio_id = audio_memory.learn(new_sound, filename, [ audio_segments[segment], audio_segments[segment+1] ])

                        # START LEGACY
                        try:
                            wavs[audio_id].append(filename)
                        except:
                            wavs.append([filename])
                        wav_audio_ids[(filename, audio_id)] = [ audio_segments[segment], audio_segments[segment+1] ]
                        # END LEGACY
                        
                        audio_ids.append(audio_id)
                        if amps[segment] > most_significant_value:
                            most_significant_audio_id = audio_id
                            most_significant_value = amps[segment]

                    black_list.flush()
                    print 'AUDIO IDs after blacklisting {}'. format(audio_ids)
                    if len(audio_ids):
                        # while len(NAPs) - len(deleted_ids) > AUDIO_MEMORY_SIZE:
                        #     utils.delete_loner(counterQ, NAPs, 'audio_ids_counter', int(AUDIO_MEMORY_SIZE*PROTECT_PERCENTAGE), deleted_ids)

                        # maxlen = max([ m.shape[0] for memory in NAPs for m in memory if len(m) ])
                        # memories = [ np.ndarray.flatten(utils.zero_pad(m, maxlen)) for memory in NAPs for m in memory if len(m) ]

                        # targets = [ i for i,f in enumerate(NAPs) for k in f if len(k) ]
                        # audio_classifier = train_rPCA_SVM(memories, targets)

                        # all_hammings = [ utils.hamming_distance(new_audio_hash[i], new_audio_hash[j])
                        #                                         for i in range(len(new_audio_hash)) for j in range(len(new_audio_hash)) if i > j ]
                    
                        # print 'RHYME VALUE', np.mean(sorted(all_hammings)[int(len(all_hammings)/2):])
                        # rhyme = np.mean(sorted(all_hammings)[int(len(all_hammings)/2):]) < RHYME_HAMMERTIME

                        # sender.send_json('rhyme {}'.format(rhyme))

                        brainQ.send_pyobj(['audio_learn', filename, audio_ids, audio_memory, most_significant_audio_id, wavs, wav_audio_ids])
                        print 'Audio learned from {} in {} seconds'.format(filename, time.time() - t0)
                    else:
                        print 'SKIPPING fully blacklisted file {}'.format(filename)
                except:
                    utils.print_exception('Audio learning aborted.')

                audio.clear()

            if 'dream' in pushbutton:
                new_dream(audio_memory)
                     
            if 'save' in pushbutton:
                utils.save('{}.{}'.format(pushbutton['save'], mp.current_process().name), [ deleted_ids, NAPs, wavs, wav_audio_ids, NAP_hashes, audio_classifier, maxlen, audio_memory ])
                
            if 'load' in pushbutton:
                deleted_ids, NAPs, wavs, wav_audio_ids, NAP_hashes, audio_classifier, maxlen, audio_memory = utils.load('{}.{}'.format(pushbutton['load'], mp.current_process().name))
Exemple #5
0
        PN = .4

        yinit = [M, P0, P1, P2, PN]

        t0 = 0
        tf = 800
        dt = .1

        t, sol = ode15s(goldbeter_fly, yinit, t0, dt, tf, params)

        sols.append(sol)

        j = 2

        print(get_period(np.sum(sol[:-j, 1:4], axis=1), t[:-j]))
        print(get_amps(sol[:-j], t[:-j]))

        # plt.plot(t[:-j], sol[:-j, 0], 'b', label='M')
        # plt.plot(t[:-j], sol[:-j, 1], 'g', label='P0')
        # plt.plot(t[:-j], sol[:-j, 2], 'm', label='P1')
        # plt.plot(t[:-j], sol[:-j, 3], 'r', label='P2')
        # plt.plot(t[:-j], sol[:-j, 4], 'k', label='PN')
        # plt.plot(t[:-j], np.sum(sol[:-j, 1:4], axis=1), 'c', label='PT')
        # plt.legend(loc='best')
        # plt.xlabel('time / h')
        # plt.ylabel('PER forms or M')
        # plt.ylim(ymin=0, ymax=5.5)
        # plt.title('Oscillations in PER over Time')
        # plt.grid()
        # plt.show()