示例#1
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    (t0, tf, dt, paramFn, graphFn, outFn, abs_error, rel_error, save_full,
     save_spikes, quiet, spike_thresh, refractory) = parse_args(argv)
    # compute the number of steps required
    Nstep = np.ceil(tf / dt)
    if not quiet:
        print "Loading parameters, graph, and setting up IC's"
    my_params = prebotc.params(paramFn)
    num_vertices, num_edges, graph_params = prebotc.graph(graphFn)
    y, N = prebotc.ics(num_vertices, num_edges)
    #y, N = prebotc.ics(num_vertices, num_edges, random=False)
    # rhs of ODE with parameters evaluated
    # f is the rhs with parameters evaluated
    f = lambda t, y: prebotc.rhs(t, y, graph_params, my_params)
    # data structure to output, timeseries or sparse raster
    if save_full:
        # all state variables
        save_state = np.zeros((N, Nstep + 1))
    elif save_spikes:
        # just spike times, as sparse matrix
        # not boolean because type conversion bug in matlab output
        save_state = scipy.sparse.dok_matrix((num_vertices, Nstep + 1))
        last_spike = np.ones(num_vertices) * (-np.inf)
    else:
        # timeseries of spikes
        save_state = np.zeros((num_vertices, Nstep + 1))
    r = scipy.integrate.ode(f)
    r.set_initial_value(y, t0)
    # # other integration methods
    # # method 1: BDF
    # r.set_integrator(
    #     'vode',
    #     method='bdf',
    #     with_jacobian = False,
    #     order=3,
    #     rtol= rel_error,
    #     atol= abs_error
    #     )
    # # method 2: Dormand-Price
    # r.set_integrator(
    #     'dopri5',
    #     rtol = rel_error,
    #     atol = abs_error
    #     )
    # # method 3: VODE
    # r.set_integrator('vode',
    #                  rtol = rel_error,
    #                  atol = abs_error)
    if not quiet:
        print "Running integration loop...."
        t = time.time()
        bar_updates = 100
        widgets = [
            progressbar.Bar('=', '[', ']'), ' ',
            progressbar.Percentage()
        ]
        bar = progressbar.ProgressBar(maxval=bar_updates, widgets=widgets)
        bar.start()
        j = 0
    i = 0
    while r.successful() and r.t < tf:
        r.integrate(r.t + dt)
        y = r.y.copy()
        if save_full:
            save_state[:, i] = y
        elif save_spikes:
            spikers = prebotc.spiking(y, num_vertices, spike_thresh)
            for neur in spikers:
                # only count if the new trigger occurs after reasonable delay
                if dt * (float(i) - last_spike[neur]) > refractory:
                    save_state[neur, i] = 1
                    last_spike[neur] = i
        else:
            save_state[:, i] = prebotc.voltages(y, num_vertices)
        i += 1
        if not quiet:
            if (i % np.floor(Nstep / bar_updates)) == 0:
                bar.update(j)
                j += 1
    if not save_spikes:
        save_state = save_state[:, 0:(i - 1)]
    else:
        save_state.resize((num_vertices, i - 1))
    if not quiet:
        bar.finish()
        elapsed = time.time() - t
        print "Done!\nElapsed: %1.2fs" % elapsed
        # Time saving
        t = time.time()
        print "Saving output...."
    if save_full:
        save_str = 'full'
    elif save_spikes:
        save_str = 'spikes'
    else:
        save_str = 'V'
    # save output
    scipy.io.savemat(outFn,
                     mdict={
                         'Y': save_state,
                         'dt': dt,
                         't0': t0,
                         'tf': tf,
                         'paramFn': os.path.abspath(paramFn),
                         'graphFn': os.path.abspath(graphFn),
                         'absErr': abs_error,
                         'relErr': rel_error,
                         'saveStr': save_str,
                         'finalState': y
                     },
                     oned_as='column')
    if not quiet:
        elapsed = time.time() - t
        print "Done!\nSave time: %1.2fs" % elapsed
示例#2
0
def main(argv=None):
    should_save = True
    if argv is None:
        argv = sys.argv
    else:
        should_save = False


    (t0, tf, dt, param_fn, graph_fn, outFn, abs_error, rel_error, save_full,
    save_spikes, quiet, spike_thresh, refractory, ic_str) = parse_args(argv)


    # compute the number of steps required
    Nstep = np.ceil(tf/dt)
    if not quiet:
        print("Loading parameters, graph, and setting up ICs")
    my_params = prebotc.params(param_fn)
    num_vertices, num_edges, graph_params = prebotc.graph(graph_fn)

    #Checks to see if it neets to load a set of initial conditions or generate random ones
    if ic_str == 'random':
        if not quiet:
            print('Setting random ICs')
        y = prebotc.ics(num_vertices, num_edges)
    elif ic_str == 'testing_ic':
        if not quiet:
            print('Setting random ICs')
        y = prebotc.ics(num_vertices, num_edges, random = False)
    else:
        if not quiet:
            print('Loading ICs from ' + ic_str)
        y, graph_fn_loaded = prebotc.load_ics(ic_str)
        if os.path.abspath(graph_fn_loaded) != os.path.abspath(graph_fn):
            warnings.warn(('simulation is running on a graph which differs'
                           'from the ics'))
            print(graph_fn_loaded)
            print(graph_fn)
    N = y.size
    #y, N = prebotc.ics(num_vertices, num_edges, random=False)
    # rhs of ODE with parameters evaluated
    # f is the rhs with parameters evaluated
    f = lambda t, y: prebotc.rhs(t, y, 
                                 graph_params,
                                 my_params)
    # data structure to output, timeseries or sparse raster
    if save_full:
        # all state variables
        save_state = np.zeros((N, Nstep+1))
    elif save_spikes:
        # just spike times, as sparse matrix
        # not boolean because type conversion bug in matlab output
        save_state = scipy.sparse.dok_matrix((num_vertices, Nstep+1))
        last_spike = np.ones(num_vertices) * (-np.inf)
    else:
        # timeseries of spikes
        save_state = np.zeros((num_vertices, Nstep+1)) 

    #Creates and sets integrator. Using VODE and backward differentiation formulas (BDF) as the method
    #This is done since we are using a stiff DFQ, the default method is Adams (implicit, non-stiff)
    r = scipy.integrate.ode(f)
    r.set_initial_value(y, t0)
    
    r.set_integrator(
        'vode',
        method='bdf',
        atol = abs_error,
        rtol = rel_error
    )

    #Visual representation of running progress
    if not quiet:
        print("Running integration loop....")
        t = time.time()
        bar_updates = 100
        widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
        bar = progressbar.ProgressBar(maxval=bar_updates, widgets=widgets)
        bar.start()
        j = 0

    i = 0
    while r.successful() and r.t < tf:
        r.integrate(r.t + dt)
        y = r.y.copy()
        if save_full:
            save_state[:, i] = y.flatten()
        elif save_spikes:
            spikers = prebotc.spiking(y, num_vertices, spike_thresh)
            for neur in spikers:
                # only count if the new trigger occurs after reasonable delay
                if dt*( float(i) - last_spike[neur] ) >  refractory:
                    save_state[neur, i] = 1
                    last_spike[neur] = i
        else:
            save_state[:, i] = prebotc.voltages(y, num_vertices)
        i += 1
        if not quiet:
            if ( i % np.floor(Nstep/bar_updates) ) == 0:
                bar.update(j)
                j += 1

    if not save_spikes:
        save_state = save_state[:, 0:(i-1)]
    else:
        save_state.resize( (num_vertices, i-1) )

    if not quiet:
        bar.finish()
        elapsed = time.time() - t
        print("Done!\nElapsed: %1.2fs" % elapsed)
        # Time saving
        t = time.time()
        print("Saving output....")

    if save_full:
        save_str = 'full'
    elif save_spikes:
        save_str = 'spikes'
    else:
        save_str = 'V'

    mdict={ 'Y': save_state,
            'dt': dt,
            't0': t0,
            'tf': tf,
            'paramFn': os.path.abspath(param_fn),
            'graphFn': os.path.abspath(graph_fn),
            'absErr': abs_error,
            'relErr': rel_error,
            'saveStr': save_str,
            'finalState': y,
            'icStr': ic_str
          }

    # save output in .mat files with variable names given below
    #Checks to see if it is being run for testing so, if so it will not save to avoid loading the file again
    if should_save:
        savemat(outFn, mdict,oned_as = 'column', do_compression=True)
    else:
        return mdict

    if not quiet:
        elapsed = time.time() - t
        print("Done!\nSave time: %1.2fs" % elapsed)
示例#3
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    (t0, tf, dt, paramFn, graphFn, outFn, abs_error, rel_error, save_full, 
     save_spikes, quiet, spike_thresh, refractory) = parse_args(argv)
    # compute the number of steps required
    Nstep = np.ceil(tf/dt)
    if not quiet:
        print "Loading parameters, graph, and setting up IC's"
    my_params = prebotc.params(paramFn)
    num_vertices, num_edges, graph_params = prebotc.graph(graphFn)
    y, N = prebotc.ics(num_vertices, num_edges)
    #y, N = prebotc.ics(num_vertices, num_edges, random=False)
    # rhs of ODE with parameters evaluated
    # f is the rhs with parameters evaluated
    f = lambda t, y: prebotc.rhs(t, y, 
                                 graph_params,
                                 my_params)
    # data structure to output, timeseries or sparse raster
    if save_full:
        # all state variables
        save_state = np.zeros( (N, Nstep+1) )
    elif save_spikes:
        # just spike times, as sparse matrix
        # not boolean because type conversion bug in matlab output
        save_state = scipy.sparse.dok_matrix( (num_vertices, Nstep+1) )
        last_spike = np.ones( num_vertices ) * (-np.inf)
    else:
        # timeseries of spikes
        save_state = np.zeros( (num_vertices, Nstep+1) ) 
    r = scipy.integrate.ode(f)
    r.set_initial_value(y, t0)
    # # other integration methods
    # # method 1: BDF
    # r.set_integrator(
    #     'vode', 
    #     method='bdf', 
    #     with_jacobian = False,
    #     order=3,
    #     rtol= rel_error,
    #     atol= abs_error
    #     )
    # # method 2: Dormand-Price
    # r.set_integrator(
    #     'dopri5', 
    #     rtol = rel_error,
    #     atol = abs_error
    #     )
    # # method 3: VODE
    # r.set_integrator('vode',
    #                  rtol = rel_error,
    #                  atol = abs_error)
    if not quiet:
        print "Running integration loop...."
        t = time.time()
        bar_updates = 100
        widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
        bar = progressbar.ProgressBar(maxval=bar_updates, widgets=widgets)
        bar.start()
        j = 0
    i = 0
    while r.successful() and r.t < tf:
        r.integrate(r.t + dt)
        y = r.y.copy()
        if save_full:
            save_state[:, i] = y
        elif save_spikes:
            spikers = prebotc.spiking(y, num_vertices, spike_thresh)
            for neur in spikers:
                # only count if the new trigger occurs after reasonable delay
                if dt*( float(i) - last_spike[neur] ) >  refractory:
                    save_state[neur, i] = 1
                    last_spike[neur] = i
        else:
            save_state[:, i] = prebotc.voltages(y, num_vertices)
        i += 1
        if not quiet:
            if ( i % np.floor(Nstep/bar_updates) ) == 0:
                bar.update(j)
                j += 1
    if not save_spikes:
        save_state = save_state[:, 0:(i-1)]
    else:
        save_state.resize( (num_vertices, i-1) )
    if not quiet:
        bar.finish()
        elapsed = time.time() - t
        print "Done!\nElapsed: %1.2fs" % elapsed
        # Time saving
        t = time.time()
        print "Saving output...."
    if save_full:
        save_str = 'full'
    elif save_spikes:
        save_str = 'spikes'
    else:
        save_str = 'V'
    # save output
    scipy.io.savemat(outFn, 
                     mdict={'Y': save_state,
                            'dt': dt,
                            't0': t0,
                            'tf': tf,
                            'paramFn': os.path.abspath(paramFn),
                            'graphFn': os.path.abspath(graphFn),
                            'absErr': abs_error,
                            'relErr': rel_error,
                            'saveStr': save_str,
                            'finalState': y
                            },
                     oned_as = 'column')
    if not quiet:
        elapsed = time.time() - t
        print "Done!\nSave time: %1.2fs" % elapsed