def evalDir(path, i):
    print(path)
    frame = pd.DataFrame()
    modelPaths = [os.path.join(path + '/', dir) for dir in os.listdir(path)]
    onlyPickles = filter(lambda x: x.endswith('pickle'), modelPaths)
    counter = 0
    for modelPath in onlyPickles:

        model = pickle.load(open(modelPath, 'rb'))
        model = dist.Distribution(model.v, model.m, type='canonical')
        dict = {'Dim': model.m.shape[0]}
        dict['Sparsity'] = model.sparsity()
        #print('mccc')
        dict['mccc'] = timing.marginalizeCanonicalConditionCanonical(model)
        #print('mccm')
        dict['mccm'] = timing.marginalizeCanonicalConditionMean(model)
        #print('mmcm')
        dict['mmcm'] = timing.marginalizeMeanConditionMean(model)
        #print('mmcc')
        dict['mmcc'] = timing.marginalizeMeanConditionCanonical(model)

        dict['conditionOnlyMean'] = timing.conditionOnlyMean(model)
        dict['conditionOnlyCanonical'] = timing.conditionOnlyCanonical(model)

        dict['mmccConvert'] = timing.marginalizeMeanConditionCanonicalConvert(
            model)
        dict['convertMmcc'] = timing.ConvertMarginalizeMeanConditionCanonical(
            model)

        frame = frame.append(dict, ignore_index=True)
    frame.to_csv(path + '/' + 'inferenceOperationsSparse' + str(i) + '.csv',
                 index=False)
예제 #2
0
    def __init__(self, width, height,
                 origin=(37.484101,-122.149242),
                 y_dir=(37.484577,-122.148812),
                 x_dir=(37.483828,-122.148796)):
        self.player_cloud = {}
        self.player_angles = {}
        self.player_connections = {}
        self.player_speeds = {}
        self.ghost_cloud = {}
        self.ghost_cloud["Ghost1"] = distribution.Distribution(emission_function=self.ghost_observation)

        #rotate a geo angle CW this many degrees to get simple
        self.geo_to_simp_angle = degrees(math.atan2((y_dir[1]-origin[1]),(y_dir[0]-origin[0])))
        self.simp_to_geo = transform_mtx(width, height, origin, x_dir, y_dir)
        self.geo_to_simp = inverse(self.simp_to_geo)

        self.receiving = False
        self.compass_queue = Queue()
        self.snap_queue = Queue()

        self.thread = Thread(target=self.run_thread)
        self.thread.daemon = True # thread dies with the program
        self.thread.start()
        self.time_since_tick = time.time()
        self.plotthread = Thread(target=self.plot_particles)
        self.plotthread.daemon = True
        self.plotthread.start()
def genModel(sparsity, dim):
    print(str(dim) + " " + str(sparsity))
    #Start with identity matrix
    model = numpy.eye(dim)
    #Calculate how many entries we have to set for given sparsity
    numOfNNZ = ((1 - sparsity) * (dim * dim)) - dim
    #Divide because the matrix is symmetric
    numOfNNZsym = math.ceil(numOfNNZ / 2)
    #Generate index set of elements in the upper triangle
    ind = [(i, j) for i in range(1, dim) for j in range(i + 1, dim)]
    #Randomly sample elements from the index set of the upper triangle
    r = random.sample(list(range(0, len(ind))), numOfNNZsym)
    #Retrieve the index pairs
    chosenInd = [ind[i] for i in r]
    #For every index pain (x,y) set m[x,y] and m[y,x] to attain symmetry
    for i in chosenInd:
        randomNum = random.random()
        model[i[0], i[1]] = randomNum
        model[i[1], i[0]] = randomNum

    #Calculate information vector
    inf = numpy.linalg.inv(model).dot(numpy.random.rand(dim))
    #Construct compressed sparse column structure
    model = scipy.sparse.csc_matrix(model)
    d = dst.Distribution(inf, model, 'canonical')
    with open(modelsPath + str(dim) + '/s' + str(sparsity) + '.pickle',
              'wb') as f:
        pickle.dump(d, f)
def evalDir(path, i):
    #print(path)
    frame = pd.DataFrame()
    modelPaths = [os.path.join(path + '/', dir) for dir in os.listdir(path)]
    onlyPickles = filter(lambda x: x.endswith('pickle'), modelPaths)
    counter = 0
    for modelPath in onlyPickles:

        model = pickle.load(open(modelPath, 'rb'))
        model = dist.Distribution(model.v, model.m, type='canonical')

        dict = {'Dim': model.m.shape[0]}
        dict['Sparsity'] = model.sparsity()

        #print('inversionTimeSparse')
        dict['inversionTimeSparse'] = timing.inversionTimeSparse(model)
        #dict['inversionTimeSparseCSC'] = timing.inversionTimeSparse(model)

        #print('inversionTimeSparseToDense')
        #dict['inversionTimeSparseToDense'] = timing.inversionTimeSparseToDense(model)

        #print('inversionTimeToDenseSplit')
        #dict['inversionTimeToDenseSplit'] = timing.inversionTimeSparseToDenseSplit(model)

        #print('sparseMatrixDotVectorWithScipySparse')
        dict[
            'sparseMatrixDotVectorWithScipySparse'] = timing.sparseMatrixDotVectorWithScipySparse(
                model)

        ##print('sparseMatrixDotVectorWithNumpy')
        #dict['sparseMatrixDotVectorWithNumpy'] = timing.sparseMatrixDotVectorWithNumpy(model)

        #print('sparseSubsetFI')
        dict['sparseSubsetFI'] = timing.sparseSubsetFI(model)
        #print('sparseSubsetIX')
        dict['sparseSubsetIX'] = timing.sparseSubsetIX(model)

        #print("sparseMatrixDotMatrix")
        dict['sparseMatrixDotMatrix'] = timing.sparseMatrixDotMatrix(model)

        model.meanForm()
        ##print(model.type)
        #print('inversionTimeDense')
        dict['inversionTimeDense'] = timing.inversionTimeDense(model)
        #print('inversionTimeDenseToSparse')
        #dict['inversionTimeDenseToSparse'] = timing.inversionTimeDenseToSparse(model)
        #print('denseMatrixDotVector')
        dict['denseMatrixDotVector'] = timing.denseMatrixDotVector(model)
        #print('denseMatrixDotVectorFlattened')
        #dict['denseMatrixDotVectorFlattened'] = timing.denseMatrixDotVectorFlattened(model)
        #print('denseMatrixDotMatrix')
        dict['denseMatrixDotMatrix'] = timing.denseMatrixDotMatrix(model)

        frame = frame.append(dict, ignore_index=True)
    frame.to_csv(path + '/' + 'atomicOperations' + str(i) + '.csv',
                 index=False)
예제 #5
0
def histogram(distrib,
              pathname,
              normalised=True,
              colours=None,
              title=None,
              xaxis_name=None,
              faxis_name=None):
    if isinstance(distrib, dict):  # 'distrib' is a plain histogram.
        distrib = distribution.Distribution(distrib)
    assert isinstance(distrib, distribution.Distribution)
    if title is None:
        title = get_title_placeholder()
    if normalised:
        title_addon = "[normalised] "
    else:
        title_addon = ""
    title_addon += (
        "#bars=" + str(len(distrib.get_histogram())) + ", "
        "sum bars=" +
        format(sum([value
                    for _, value in distrib.get_histogram().items()]), ".3f") +
        ", "
        "sum x*bar=" + format(
            sum([x * value
                 for x, value in distrib.get_histogram().items()]), ".3f") +
        ", "
        "median=" + format(distrib.get_median(), ".3f") + ", "
        "mean=" + format(distrib.get_mean(), ".3f") + ", "
        "mean freq.=" + format(
            1.0 / (distrib.get_mean() if abs(distrib.get_mean()) > 0.00001 else
                   0.00001), ".3f") + ", "
        "variance=" + format(distrib.get_variance(), ".3f") + ", "
        "std. deviation=" + format(distrib.get_standard_deviation(), ".3f") +
        ", "
        "CV=" + format(distrib.get_coefficient_of_variation(), ".3f"))
    title = title.replace(get_title_placeholder(), title_addon)
    if normalised:
        points = distrib.get_probability_points()
    else:
        points = distrib.get_points()

    if len(distrib.get_events()) < 2:
        bar_width = 0.8
    else:
        bar_width = min(
            map(lambda x: abs(x[1] - x[0]),
                zip(distrib.get_events()[:-1],
                    distrib.get_events()[1:])))
    __write_xplot(
        lambda ax, x, y: ax.bar(x, y, bar_width, color=colours, align="center"
                                ), points, pathname, title, xaxis_name,
        faxis_name)
예제 #6
0
def test_copy():
    sigma = hlp.randomPositiveSemidefinite(dims)
    mu = np.random.rand(dims)
    d = dst.Distribution(mu, sigma, 'moment')
    c = d.copy()
    # Check if different objects
    assert c is not d
    # Check if entities hold equal values
    assert c.equalsVerbose(d)
    # Check if deep-copy succeeded
    assert c.v is not d.v
    # Check if deep-copy succeeded
    assert c.m is not d.m
예제 #7
0
def test_Conversion():
    sigma = hlp.randomPositiveSemidefinite(dims)
    mu = np.random.rand(dims).T
    d = dst.Distribution(mu, sigma, 'mean')
    dd = ddst.DistributionDense(mu, sigma, 'mean')
    print(type(dd))

    a = d.copy()
    b = dd.copy()
    # Check if multiple conversions don't change equality
    for i in range(1, 100):
        #print(i)
        a.canonicalForm()
        a.meanForm()

        b.canonicalForm()
        b.meanForm()
    assert a.isCloseVerbose(d)
    assert b.isCloseVerbose(dd)
예제 #8
0
 def doit(hist, n, ofile):
     xhist = hist.copy()
     for k in xhist.keys():
         xhist[k] = 0
     isi = distribution.Distribution(hist)
     print(isi)
     ofile.write(str(isi) + "\n")
     for _ in range(n):
         e = isi.next_event()
         assert e in hist.keys()
         xhist[e] += 1
     osum = 0.0
     xsum = 0.0
     for k in hist.keys():
         osum += hist[k]
         xsum += xhist[k]
     if xsum > 0:
         for k in xhist.keys():
             xhist[k] *= osum / xsum
     return xhist
예제 #9
0
    def histogram(self, distrib, colours=None, normalised=None, legend=None):
        if isinstance(distrib, dict):  # 'distrib' is a plain histogram.
            distrib = distribution.Distribution(distrib)
        assert isinstance(distrib, distribution.Distribution)
        if normalised:
            points = distrib.get_probability_points()
        else:
            points = distrib.get_points()

        if len(distrib.get_events()) < 2:
            bar_width = 0.8
        else:
            bar_width = min(
                map(lambda x: abs(x[1] - x[0]),
                    zip(distrib.get_events()[:-1],
                        distrib.get_events()[1:])))
        self.make_plot(
            lambda ax, x, y: ax.bar(x,
                                    y,
                                    bar_width,
                                    color=self._choose_colours(colours),
                                    align="center",
                                    label=legend), points, legend is not None)
예제 #10
0
def main(massfunction = 0, starformationhistory = 0, A_v = 10.0, sfr = .01, apera = 24000,\
 maxage = 2000000., distance = 8.0, appendix='default', quiet=0, precise=0):
    """main(massfunction = 0, starformationhistory = 0, A_v = 10.0, sfr = .01, apera = 24000,\
          maxage = 2000000., distance = 8.0, appendix='default', quiet=0, precise=0)

    Creates a sample of stars

    Parameters
    ----------
    massfunction            distribution:
        relatively in mass, with lower and upper restriction, see also what the distribution must provide
    starformation history   distribution:
        relatively in age, with lower and upper restriction, see also what the distribution must provide
    A_v       float:
        value for the visual extinction 
    sfr       float:
        average star formation rate in M_sun/year (only if precise = True)
    apera     float:
        used aperture size for selecting the fluxes of the protostars
    maxage    float:
        age of the star formation site, sfr is assumed to be constant
    distance  float:
        distance to the simulated starformation site
    appendix  String:
        sets the outputfilename, default is the starting time (via time.time())
    quiet     boolean:
        if true (=1) suppresses all standard output
    precise   boolean:
        if true (=1) sample single star till expected mass reached based on the 
        integrated starformationhistory times the starformationrate
        else sfr is the number of expected stars and the starformationrate is
        calculated by the cumulated mass divided by the formation time

    The distributions must provide an object which has the following members:
        float    cdf(float x)   returns the integrated distribution up to x, is used to calculate
                                the expected mass
        float    _upperbound    returns the upper limit of the distribution, is used to calculate
                                the expected mass
        float[]  sample(int n)  returns an array of n floats, sampled from the distribution
        float    mean()         returns the mean value of the distribution


    Returns
    ----------
    returns a fits file in the out-folder, either using the appendix as filename or the time of the
          starting of the script in order to prevent overwriting existing files
          In the header of the fits-file are the values: A_v, sfr, apera, maxage and distance recorded
          In the data part are the age, mass, modelnumber and the uncorrected and corrected fluxes
    """

    if quiet:
        output_stream = StringIO()
    else:
        output_stream = sys.stdout

    t0 = time()
    if appendix == 'default':  # making sure not to overwrite former output
        appendix = t0  # by using the starting time as an unique id
    #parameter settings
    k_v = 211.4  # opacity in v_band in cm^2/g
    # wavelength of the corresponding filterband in microns
    wavelength = [
        1.235, 1.662, 2.159, 3.550, 4.493, 5.731, 7.872, 23.68, 71.42, 155.9
    ]
    models = ['2H', '2J', '2K', 'I1', 'I2', 'I3', 'I4', 'M1', 'M2', 'M3']

    if massfunction == 0 and starformationhistory == 0:
        # star mass function
        kroupa = np.vectorize(functions.kroupa)
        massfunction = dist.Distribution(kroupa, .1, 50.)

        #star formation history
        constant_sfr = np.vectorize(functions.constant_sfr)
        starformationhistory = dist.Distribution(constant_sfr, 1000., maxage)

    cumass = 0.  #sampled mass
    stars = []  #storing for the sample
    sfh = starformationhistory

    t1 = time()  #startup completed

    if precise:
        n = 0
        exmass = sfh.cdf()(sfh._upperbound) * sfr  #expected mass formed
        while cumass < exmass:
            mass, age = massfunction.sample(), sfh.sample()
            cumass = cumass + mass
            stars.append([n, age, mass])
            if n % 10000 == 0:
                print(n, cumass, file=output_stream)  #reporting progress
            n = n + 1
    else:
        n = sfr
        mass, age = massfunction.sample(n), sfh.sample(n)
        cumass = np.sum(mass)
        exmass = n * massfunction.mean()
        stars = [[i, age[i], mass[i]] for i in range(n)]
    sfr = cumass / (sfh._upperbound - sfh._lowerbound
                    )  #average star formation rate

    print('number of sampled stars: %s' % n, file=output_stream)
    print('mass of sampled stars: %s' % cumass, file=output_stream)
    print('mean mass: %s' % (cumass / n), file=output_stream)
    print('expected mass of stars: %s' % exmass, file=output_stream)
    t2 = time()  # sampleing completed

    # python code for model contact
    #initial parameters
    model = [fits.open('models/%s.fits' % mod)
             for mod in models]  # fits-data for the model
    param = fits.open('models/parameters.fits.gz')  # modelparameter
    app_num = [
        np.interp(apera, model[i][2].data.field(0),
                  range(model[i][2].data.field(0).size))
        for i in range(len(models))
    ]

    # sampling viewing angle
    angle = np.random.random_integers(0, 9, len(stars))
    #reading model grid
    mass = param[1].data['MASSC'][::10]
    age = param[1].data['TIME'][::10]
    grid = np.vstack([age, mass]).transpose()

    #converting to logspace
    stars = np.asarray(stars)
    grid = np.log10(grid)
    stars[:, 1:] = np.log10(stars[:, 1:])

    output = stars.tolist()  #creating output

    #normalizing for nearest neighbor search
    grid[0, :] = grid[0, :] / (grid[0, :].max() - grid[0, :].min())
    grid[1, :] = grid[1, :] / (grid[1, :].max() - grid[1, :].min())
    stars[1, :] = stars[1, :] / (grid[0, :].max() - grid[0, :].min())
    stars[2, :] = stars[2, :] / (grid[1, :].max() - grid[1, :].min())

    t3 = time()  #model data load complete

    tree = scipy.spatial.cKDTree(grid, leafsize=10)  #search tree
    matches = [tree.query(star[1:], k=1)[1]
               for star in stars]  #saves matches with (dist, index)

    t4 = time()  #matching sample to data complete

    # extracting fluxes
    fluxes = [0 for j in range(len(models))]
    indices = 10 * np.asarray(matches) + angle
    for j in range(len(models)):
        fluxes[j] = model[j][1].data[indices]['TOTAL_FLUX'][:, app_num[j]]

    # applying extinction
    extinction = np.loadtxt('models/extinction_law.ascii')
    k_lambda = np.interp(wavelength, extinction[:, 0], extinction[:, 1])
    correctionfactor = 10.**(-.4 * A_v * k_lambda / k_v)

    newfluxes = [0 for j in range(len(models))]
    for j in range(len(models)):
        newfluxes[j] = np.asarray(
            fluxes[j]) * correctionfactor[j] * (1. / distance)**2

    t5 = time()  #extracting fluxes complete

    # saving data
    fluxes = np.asarray(fluxes)
    newfluxes = np.asarray(newfluxes)
    output = np.vstack(
        [np.asarray(output).transpose(), matches, fluxes,
         newfluxes]).transpose()

    # create table
    # data table
    t = Table()
    t.add_column(Column(name='age', data=output[:, 1]))
    t.add_column(Column(name='mass', data=output[:, 2]))
    t.add_column(Column(name='model', data=output[:, 3]))
    for i in range(len(models)):
        t.add_column(Column(name='%s' % models[i], data=output[:, 4 + i]))
    for i in range(len(models)):
        t.add_column(
            Column(name='c%s' % models[i], data=output[:,
                                                       4 + len(models) + i]))
    # head table
    header = Table()
    header.add_column(Column(name='AV', data=[A_v]))
    header.add_column(Column(name='SFR', data=[sfr]))
    header.add_column(Column(name='APPERA', data=[apera]))
    header.add_column(Column(name='MAXAGE', data=[maxage]))
    header.add_column(Column(name='DIST', data=[distance]))

    fits.writeto('out/%s' % appendix, np.array(t), clobber=True)
    fits.append('out/%s' % appendix, np.array(header), clobber=True)

    t6 = time()  #saving complete

    # timing possibility for optimization efforts

    print('starting script at %f' % (t0), file=output_stream)
    print('initializing       %f' % (t1 - t0), file=output_stream)
    print("sampleing          %f" % (t2 - t1), file=output_stream)
    print("model data load    %f" % (t3 - t2), file=output_stream)
    print("matching model     %f" % (t4 - t3), file=output_stream)
    print("extracting fluxes  %f" % (t5 - t4), file=output_stream)
    print("saving             %f" % (t6 - t5), file=output_stream)
    print("________________________", file=output_stream)
    print("total runtime      %f" % (t6 - t0), file=output_stream)
    print("finishing script   %f" % t6, file=output_stream)


#main(sfr = .08)  # for testing purposes and directly called from bash
예제 #11
0
def _test_alignment_of_aligned_spike_trains(info):
    """
    Checks alignment of spike trains all aligned to a pivot
    spike train. Test is conducted for several alignment
    coefficients.
    """
    assert isinstance(info, TestInfo)

    seed = 0

    def get_next_seed():
        nonlocal seed
        seed += 1
        return seed

    numpy.random.seed(get_next_seed())

    def make_spike_train(is_excitatory=True,
                         mean_frequency=None,
                         percentage_of_regularity_phases=None,
                         seed=None):
        if mean_frequency is None:
            mean_frequency = 15.0 if is_excitatory else 60.0
        if percentage_of_regularity_phases is None:
            percentage_of_regularity_phases = 0.0
        return spike_train.create(
            distribution.hermit_distribution_with_desired_mean(
                1.0 / mean_frequency, 0.003, 0.3, 0.0001, pow_y=2, seed=seed)
            if is_excitatory else
            distribution.hermit_distribution_with_desired_mean(1.0 /
                                                               mean_frequency,
                                                               0.001,
                                                               0.08,
                                                               0.0001,
                                                               bin_size=0.0002,
                                                               pow_y=2,
                                                               seed=seed),
            percentage_of_regularity_phases)

    pivot_trains = [
        (make_spike_train(True, seed=get_next_seed()), "epivot"),
        (make_spike_train(False, seed=get_next_seed()), "ipivot"),
    ]

    nsteps = 1000000
    dt = 0.0001
    start_time = 0.0

    def simulate_trains(trains):
        t = start_time
        for step in range(nsteps):
            utility.print_progress_string(step, nsteps)
            for train, _ in trains:
                train.on_time_step(t, dt)
            t += dt

    print("Simulating pivot trains")
    simulate_trains(pivot_trains)

    def make_aligned_train(pivot,
                           alignment_coefficient,
                           dispersion_fn,
                           is_excitatory=True,
                           mean_frequency=None,
                           seed=None):
        assert isinstance(pivot, spike_train.SpikeTrain)
        train = make_spike_train(is_excitatory, mean_frequency, 0.0, seed)
        train.set_spike_history_alignment(pivot.get_spikes_history(),
                                          alignment_coefficient, dispersion_fn)
        return train

    dispersion_fn = datalgo.AlignmentDispersionToSpikesHistory(2.0)

    other_trains = [[
        (make_aligned_train(pivot,
                            coef,
                            dispersion_fn,
                            kind,
                            seed=get_next_seed()),
         kname + str(idx) + "_" + format(coef, ".2f"))
        for kind, kname in [(True, "e"), (False, "i")] for idx in range(2)
        for coef in [-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0]
    ] for pivot, _ in pivot_trains]

    print("Simulating other trains")
    simulate_trains(
        [train for pivot_trains in other_trains for train in pivot_trains])

    print("Saving results:")
    for pivot_desc_pair, pivot_trains in zip(pivot_trains, other_trains):
        pivot = pivot_desc_pair[0]
        pivot_desc = pivot_desc_pair[1]
        outdir = os.path.join(info.output_dir, pivot_desc)
        os.makedirs(outdir, exist_ok=True)
        print("    Saving statistics of spike train: " + pivot_desc)
        with open(os.path.join(outdir, "stats_" + pivot_desc + ".json"),
                  "w") as ofile:
            ofile.write(
                json.dumps(
                    {
                        "configuration": pivot.get_configuration(),
                        "statistics": pivot.get_statistics()
                    },
                    sort_keys=True,
                    indent=4))
        print("    Saving spike event distributions of spike train: " +
              pivot_desc)
        plot.histogram(datalgo.make_histogram(
            datalgo.make_difference_events(pivot.get_spikes_history()), dt,
            start_time),
                       os.path.join(outdir, "distrib_" + pivot_desc + ".png"),
                       normalised=False)
        for i in range(len(pivot_trains)):
            train_i, desc_i = pivot_trains[i]
            print("    Saving statistics of spike train: " + desc_i)
            with open(os.path.join(outdir, "stats_" + desc_i + ".json"),
                      "w") as ofile:
                ofile.write(
                    json.dumps(
                        {
                            "configuration": train_i.get_configuration(),
                            "statistics": train_i.get_statistics()
                        },
                        sort_keys=True,
                        indent=4))
            print("    Saving spike event distributions of spike train: " +
                  desc_i)
            plot.histogram(datalgo.make_histogram(
                datalgo.make_difference_events(train_i.get_spikes_history()),
                dt, start_time),
                           os.path.join(outdir, "distrib_" + desc_i + ".png"),
                           normalised=False)

            print("    Saving alignment histogram: " + desc_i + " VS " +
                  pivot_desc)
            hist_pair = datalgo.make_alignment_histograms_of_spike_histories(
                train_i.get_spikes_history(), pivot.get_spikes_history(),
                0.005, dt / 2.0)
            plot.histogram(
                hist_pair[0],
                os.path.join(outdir,
                             "hist_" + desc_i + "_vs_" + pivot_desc + ".png"),
                normalised=False)

            for j in range(i + 1, len(pivot_trains)):
                train_j, desc_j = pivot_trains[j]

                print("    Saving alignment histogram: " + desc_i + " VS " +
                      desc_j)
                hist_pair = datalgo.make_alignment_histograms_of_spike_histories(
                    train_i.get_spikes_history(), train_j.get_spikes_history(),
                    0.005, dt / 2.0)
                # plot.histogram(
                #     hist_pair[0],
                #     os.path.join(outdir, "hist_" + desc_i + "_vs_" + desc_j + ".png"),
                #     normalised=False
                #     )
                # plot.histogram(
                #     hist_pair[1],
                #     os.path.join(outdir, "hist_" + desc_j + "_vs_" + desc_i + ".png")
                #     )

                print("    Saving alignment curve: " + desc_i + " VS " +
                      desc_j)
                with plot.Plot(
                        os.path.join(
                            outdir, "curve_" + desc_i + "_vs_" + desc_j +
                            ".png")) as plt:
                    plt.curve(datalgo.interpolate_discrete_function(
                        datalgo.approximate_discrete_function(
                            distribution.Distribution(
                                hist_pair[0]).get_probability_points())),
                              legend=desc_i + " -> " + desc_j)
                    plt.curve(datalgo.interpolate_discrete_function(
                        datalgo.approximate_discrete_function(
                            distribution.Distribution(
                                hist_pair[1]).get_probability_points())),
                              legend=desc_j + " -> " + desc_i)
        plot.event_board_per_partes(
            [pivot.get_spikes_history()] +
            [train.get_spikes_history() for train, _ in pivot_trains],
            os.path.join(outdir, "spikes_board.png"), start_time,
            start_time + nsteps * dt, 1.0, 5, lambda p: print(
                "    Saving spikes board part: " + os.path.basename(p)),
            [[plot.get_random_rgb_colour()] * len(pivot.get_spikes_history())]
            + [[plot.get_random_rgb_colour()] * len(train.get_spikes_history())
               for train, _ in pivot_trains],
            " " + plot.get_title_placeholder() + " SPIKES BOARD")

    return 0  # No failures
예제 #12
0
def _test_aligned_spike_trains(info):
    """
    Checks alignment of spike trains for different alignment coefficients.
    """
    assert isinstance(info, TestInfo)

    # def make_alignment_histogram(name, lo_event, hi_event, dt, num_events=None):
    #     if num_events is None:
    #         num_events = int((hi_event - lo_event) / dt) + 1
    #     raw_events = [lo_event + (i / float(num_events - 1)) * (hi_event - lo_event) for i in range(0, num_events)]
    #     events = []
    #     for e in raw_events:
    #         t = 0.0
    #         while e > t + 1.5 * dt:
    #             t += dt
    #         events.append(t)
    #     half_epsilon = 0.5 * (dt / 10)
    #     coefs = []
    #     for i in range(len(events)):
    #         for j in range(i + 1, len(events)):
    #             dist = events[j] - events[i]
    #             shift = dt
    #             while shift < dist:
    #                 if not (shift < half_epsilon or shift > dist - half_epsilon):
    #                     # print(format(shift, ".3f") + " / " + format(dist, ".3f") + " = " + format(shift/dist, ".3f") +
    #                     #       "  --->  " +
    #                     #       format(shift/dt, ".3f") + " / " + format(dist / dt, ".3f") + " = " + format((shift/dt)/(dist/dt), ".3f"))
    #                     coef = max(-1.0, min(2.0 * shift / dist - 1.0, 1.0))
    #                     coefs.append(coef)
    #                 shift += dt
    #     coefs = sorted(coefs)
    #     hist = datalgo.make_histogram(coefs, 0.005, 0.0, 1)
    #     plot.histogram(
    #         hist,
    #         os.path.join(info.output_dir, name + ".png"),
    #         normalised=False
    #         )
    #     return 0
    # make_alignment_histogram("xhist", 0.003, 0.030, 0.0001)
    # make_alignment_histogram("yhist", 0.003, 0.130, 0.0001)
    # return 0

    seed = 0

    def get_next_seed():
        nonlocal seed
        seed += 1
        return seed

    numpy.random.seed(get_next_seed())

    def make_spike_train(is_excitatory=True,
                         mean_frequency=None,
                         percentage_of_regularity_phases=None,
                         seed=None):
        # min_event = 0.003
        # max_event = 0.030
        # num_events = 28
        # return spike_train.create(
        #             distribution.Distribution(
        #                 {(min_event + (i / float(num_events - 1)) * (max_event - min_event)): 1.0 / float(num_events)
        #                  for i in range(0, num_events)},
        #                 seed
        #                 ),
        #             0.0
        #             )
        if mean_frequency is None:
            mean_frequency = 15.0 if is_excitatory else 60.0
        if percentage_of_regularity_phases is None:
            percentage_of_regularity_phases = 0.0
        return spike_train.create(
            distribution.hermit_distribution_with_desired_mean(
                1.0 / mean_frequency, 0.003, 0.3, 0.0001, pow_y=2, seed=seed)
            if is_excitatory else
            distribution.hermit_distribution_with_desired_mean(1.0 /
                                                               mean_frequency,
                                                               0.001,
                                                               0.08,
                                                               0.0001,
                                                               bin_size=0.0002,
                                                               pow_y=2,
                                                               seed=seed),
            percentage_of_regularity_phases)

    pivot_trains = [
        (make_spike_train(True, seed=get_next_seed()), "epivot"),
        (make_spike_train(False, seed=get_next_seed()), "ipivot"),
    ]

    nsteps = 1000000
    dt = 0.0001
    start_time = 0.0

    def simulate_trains(trains):
        t = start_time
        for step in range(nsteps):
            utility.print_progress_string(step, nsteps)
            for train, _ in trains:
                train.on_time_step(t, dt)
            t += dt

    print("Simulating pivot trains")
    simulate_trains(pivot_trains)

    def make_aligned_train(pivot,
                           alignment_coefficient,
                           dispersion_fn,
                           is_excitatory=True,
                           mean_frequency=None,
                           seed=None):
        assert isinstance(pivot, spike_train.SpikeTrain)
        train = make_spike_train(is_excitatory, mean_frequency, 0.0, seed)
        train.set_spike_history_alignment(pivot.get_spikes_history(),
                                          alignment_coefficient, dispersion_fn)
        return train

    dispersion_fn = datalgo.AlignmentDispersionToSpikesHistory(2.0)

    other_trains = [
        (make_spike_train(True, seed=get_next_seed()), "eunaligned"),
        (make_spike_train(False, seed=get_next_seed()), "iunaligned"),
    ] + [(make_aligned_train(
        pivot, coef, dispersion_fn, kind, seed=get_next_seed()),
          kname + "aligned(" + desc + "," + format(coef, ".2f") + ")")
         for pivot, desc in pivot_trains
         for kind, kname in [(True, "e"), (False, "i")]
         for coef in [-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0]]

    print("Simulating other trains")
    simulate_trains(other_trains)

    print("Saving results:")
    for pivot, pivot_desc in pivot_trains:
        print("    Saving statistics of spike train: " + pivot_desc)
        with open(
                os.path.join(info.output_dir, "stats_" + pivot_desc + ".json"),
                "w") as ofile:
            ofile.write(
                json.dumps(
                    {
                        "configuration": pivot.get_configuration(),
                        "statistics": pivot.get_statistics()
                    },
                    sort_keys=True,
                    indent=4))
        print("    Saving spike event distributions of spike train: " +
              pivot_desc)
        plot.histogram(datalgo.make_histogram(
            datalgo.make_difference_events(pivot.get_spikes_history()), dt,
            start_time),
                       os.path.join(info.output_dir,
                                    "distrib_" + pivot_desc + ".png"),
                       normalised=False)
    for other, other_desc in other_trains:
        print("    Saving statistics of spike train: " + other_desc)
        with open(
                os.path.join(info.output_dir, "stats_" + other_desc + ".json"),
                "w") as ofile:
            ofile.write(
                json.dumps(
                    {
                        "configuration": other.get_configuration(),
                        "statistics": other.get_statistics()
                    },
                    sort_keys=True,
                    indent=4))
        print("    Saving spike event distributions of spike train: " +
              other_desc)
        plot.histogram(datalgo.make_histogram(
            datalgo.make_difference_events(other.get_spikes_history()), dt,
            start_time),
                       os.path.join(info.output_dir,
                                    "distrib_" + other_desc + ".png"),
                       normalised=False)
    for pivot, pivot_desc in pivot_trains:
        for other, other_desc in other_trains:
            print("    Saving alignment histograms: " + pivot_desc + " VS " +
                  other_desc)
            hist_pair = datalgo.make_alignment_histograms_of_spike_histories(
                pivot.get_spikes_history(), other.get_spikes_history(), 0.005,
                dt / 2.0)
            plot.histogram(hist_pair[0],
                           os.path.join(
                               info.output_dir, "hist_" + pivot_desc + "_vs_" +
                               other_desc + ".png"),
                           normalised=False)
            plot.histogram(
                hist_pair[1],
                os.path.join(
                    info.output_dir,
                    "hist_" + pivot_desc + "_vs_" + other_desc + "_inv.png"))
            print("    Saving alignment curve: " + pivot_desc + " VS " +
                  other_desc)
            with plot.Plot(
                    os.path.join(
                        info.output_dir, "curve_" + pivot_desc + "_vs_" +
                        other_desc + ".png")) as plt:
                plt.curve(datalgo.interpolate_discrete_function(
                    datalgo.approximate_discrete_function(
                        distribution.Distribution(
                            hist_pair[0]).get_probability_points())),
                          legend=pivot_desc + " -> " + other_desc)
                plt.curve(datalgo.interpolate_discrete_function(
                    datalgo.approximate_discrete_function(
                        distribution.Distribution(
                            hist_pair[1]).get_probability_points())),
                          legend=other_desc + " -> " + pivot_desc)
        plot.event_board_per_partes(
            [pivot.get_spikes_history()] +
            [other.get_spikes_history() for other, _ in other_trains],
            os.path.join(info.output_dir,
                         "spikes_board_" + pivot_desc + ".png"), start_time,
            start_time + nsteps * dt, 1.0, 5, lambda p: print(
                "    Saving spikes board part: " + os.path.basename(p)),
            [[plot.get_random_rgb_colour()] * len(pivot.get_spikes_history())]
            + [[plot.get_random_rgb_colour()] * len(other.get_spikes_history())
               for other, _ in other_trains],
            " " + plot.get_title_placeholder() + " SPIKES BOARD")

    return 0  # No failures
예제 #13
0
        Verify that ciphertext = Encrypt(plaintext, key) and delete those which fail this test (due to comms error)
        """
        count = 0
        # Generate hypotheticals
        for i in range(self.acq.numTraces):
            ptStr = self.ptNumpyArray2String(self.acq.inputtext[i])
            ctStr1 = (des_block.des_block(ptStr, 64)).encipher(self.key)
            ctStr2 = self.ptNumpyArray2String(self.acq.outputtext[i])
            if (ctStr1 != ctStr2):
                acq.traces = np.delete(acq.traces, i, axis=0)
                acq.inputtext = np.delete(acq.inputtext, i, axis=0)
                acq.outputtext = np.delete(acq.outputtext, i, axis=0)
                count += 1
        print "Deleted %d traces" % count

    def ptNumpyArray2String(self, inputtext):
        ptStr = ""
        for i in range(self.acq.blockSize):
            ptStr += hex2str(inputtext[i])
        return ptStr


if __name__ == "__main__":
    #acq = LoadTraces.LoadTraces('cw_tc6+trim.trs')
    acq = LoadTraces.LoadTraces('traces/0005.trs', 100000)
    v = Verify(acq)
    v.verify2()
    d = distribution.Distribution(acq)
    print "sbox 1 of 0005"
    d.sboxInputs(0x1f, 1)
예제 #14
0
 def add_player(self, name, connection):
     self.player_cloud[name] = distribution.Distribution(num_particles=200, emission_function=self.player_observation, transition_function=lambda x: self.player_transition(name, x))
     self.player_angles[name] = 0.0
     self.player_connections[name] = connection
     self.player_speeds[name] = 0.0
예제 #15
0
def test_operationsSparse():
    marginalizeTo = [1, 2, 4, 5]
    condition = [0, 1]
    condVal = [0.5, 0.5]

    cov = np.matrix([[3, 0, 0, 2, 0, 2], [0, 1, 0.5, 0, 0, 0],
                     [0, 0.5, 1, 0, 0, 0], [2, 0, 0, 2, 0, 1],
                     [0, 0, 0, 0, 1, 0], [2, 0, 0, 1, 0, 2]])

    mean = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]

    # Invert matrix and compute information vector
    prec = np.linalg.inv(cov)
    inf = prec.dot(mean)

    # Construct Distribution objects
    sparse_dist = dst.Distribution(mean, cov, 'mean')
    sparse_dist_canonical = dst.Distribution(inf, sp.csc_matrix(prec),
                                             'canonical')

    # Marginalize "by hand"
    cov_marginalized = np.take(np.take(cov, marginalizeTo, axis=0),
                               marginalizeTo,
                               axis=1)
    mean_marginalized = np.take(mean, marginalizeTo)

    # Marginalize both distributions with methods
    sparse_dist_marginalized = sparse_dist.marginalize(marginalizeTo)
    sparse_dist_canonical_marginalized = sparse_dist_canonical.marginalize(
        marginalizeTo)

    # models are close
    assert (np.allclose(sparse_dist_marginalized.m, cov_marginalized))
    assert (np.allclose(sparse_dist_marginalized.v, mean_marginalized))

    #Covert canonical to mean and check if close
    temp = sparse_dist_canonical_marginalized.copy()
    temp.meanForm()
    assert (np.allclose(temp.m, sparse_dist_marginalized.m))
    assert (np.allclose(temp.v, sparse_dist_marginalized.v))

    #Compute canonical 'by hand', a little bit redundant^^
    prec_marginalized = np.linalg.inv(cov_marginalized)
    inf_marginalized = prec_marginalized.dot(mean_marginalized)
    assert (np.allclose(prec_marginalized,
                        sparse_dist_canonical_marginalized.m.todense()))
    assert (np.allclose(inf_marginalized,
                        sparse_dist_canonical_marginalized.v))

    # Compute conditioned distribution by hand
    complementary_conditioning_set = list(
        set(range(0, prec_marginalized.shape[0])) - set(condition))

    prec_final = np.take(np.take(prec_marginalized,
                                 complementary_conditioning_set,
                                 axis=0),
                         complementary_conditioning_set,
                         axis=1)

    l_ji = np.take(np.take(prec_marginalized,
                           complementary_conditioning_set,
                           axis=0),
                   condition,
                   axis=1)
    l_jj_i = np.linalg.inv(
        np.take(np.take(prec_marginalized,
                        complementary_conditioning_set,
                        axis=0),
                complementary_conditioning_set,
                axis=1))

    inf_final = np.squeeze(
        np.asarray(
            np.subtract(
                np.take(inf_marginalized, complementary_conditioning_set),
                l_ji.dot(l_jj_i).dot(condVal))))

    cov_final = np.linalg.inv(prec_final)
    mean_final = cov_final.dot(inf_final.T)

    # Check conditioning on canonical form
    canonicalMarginalizedConditioned = sparse_dist_canonical_marginalized.condition(
        condition, condVal)
    assert (np.allclose(prec_final,
                        canonicalMarginalizedConditioned.m.todense()))
    assert (np.allclose(inf_final, canonicalMarginalizedConditioned.v))

    # Check conditioning on mean form
    sparse_dist_marginalized_conditioned = sparse_dist_marginalized.condition(
        condition, condVal)
    print('--------------------------------------')
    print(cov_final)
    print('--------------------------------------')
    print(sparse_dist_marginalized_conditioned.m)
    print('--------------------------------------')
    print(mean_final)
    print('--------------------------------------')
    print(sparse_dist_marginalized_conditioned.v)

    assert (np.allclose(cov_final, sparse_dist_marginalized_conditioned.m))
    assert (np.allclose(mean_final, sparse_dist_marginalized_conditioned.v))

    #Convert to canonical Form and check again
    sparse_dist_marginalized_conditioned.canonicalForm()

    assert (np.allclose(prec_final,
                        sparse_dist_marginalized_conditioned.m.todense()))
    assert (np.allclose(inf_final, sparse_dist_marginalized_conditioned.v))
예제 #16
0
def main(n = 150000, quiet = False):
    """main(n = 150000, quiet = False)

    This script produces a grid of expected numbers of stars according to the selection 
        criteria of Yusef-Zedah et al. 2009, 702,178-225 The Astrophysical Journal.
        The grid is in av for visual extinction, apera for aperature size and age for the maxage 
        of the starformation size

    Parameters
    ----------
    n       integer:
        number of stars to be sampled per parameter set
    quiet   boolean:
        if true suppresses all standard output


    Returns
    ----------
    A number of fits-files with the sampled stars for different parameters to be specified in
    this file.
    Standard output is used to report progress, it will print out the parameter set to be 
    progressed next and the completeness of the script as
    AV aperaturesize maxage completeness ETA
    ETA is the time to complete in seconds based on the single last operation
    """
    t0 = time()         #timing possibility

    if quiet:
        output_stream = StringIO()
    else:
        output_stream = sys.stdout


    print(t0,file=output_stream)
    

    sfr = .01
    # star mass function
    kroupa = np.vectorize(functions.kroupa)
    mf = dist.Distribution(kroupa, .1, 50.)

    #star formation history
    constant_sfr = np.vectorize(functions.constant_sfr)
    
    ages = np.logspace(5,7,7)
    sf = [dist.Distribution(constant_sfr, 1000., ages[i]) for i in range(len(ages))]
    #sfr = [150000*mf.mean()/(ages[i]-1000.) for i in range(len(ages))]

    t1 = time()                 # finished reading the distributions
    print(t1,file=output_stream)


    # setting up model data
    aperas = np.logspace(2, 5, 4)
    avs = np.linspace(10.0, 50.0, 5)
    l = 1
    mpold, tmpnew = 0., time()
    parameters = []
    for i in range(len(avs)):
        for j in range(len(aperas)):
            for k in range(len(ages)):
                tmpold, tmpnew = tmpnew, time()
                starformation.main(massfunction = mf, starformationhistory = sf[k], \
                    A_v = avs[i], sfr = n, apera = aperas[j], maxage = ages[k], \
                    appendix = "%s_%03d_%06d_%09d" % ('sim',avs[i],aperas[j],ages[k]), quiet=True, precise=False)
                print(avs[i],aperas[j],ages[k], l/len(avs)/len(aperas)/len(ages), (len(avs)*len(aperas)*len(ages)-l)*(tmpnew-tmpold),file=output_stream)
                l = l+1
                
                parameters.append([avs[i],aperas[j],ages[k]])

    t2 = time()                 # end of simulation
    print(t2, t1, t2-t1)
    
    print ('number of simulations run: %s' %l , file=output_stream)  
    head = ['#','AV', 'Aperature_size', 'Age']
    f = open('out/__head', 'w')
    f.write( ','.join(head)+'\n' )
    np.savetxt(f, parameters)
    f.close()

    t3 = time()                 # end of saving data

    analysis.main('out')
    print ('analysis complete' , file=output_stream)  
    
    t4 = time()                 # end of analysing data



    print( 'starting script at %f'  %(t0), file=output_stream)
    print( 'initializing       %f'  %(t1-t0), file=output_stream)
    print( "running simulation %f"  %(t2-t1), file=output_stream)
    print( "writing data       %f"  %(t3-t2), file=output_stream)
    print( "analysing data     %f"  %(t4-t3), file=output_stream)
    print( "________________________", file=output_stream)
    print( "total runtime      %f"  %(t4-t0), file=output_stream)
    print( "finishing script   %f"  %t4, file=output_stream)
예제 #17
0
파일: spike_train.py 프로젝트: wrmsr/E2
    def __init__(self, spiking_distribution, percentage_of_regularity_phases,
                 noise_length_distribution, regularity_length_distribution,
                 max_spikes_buffer_size, regularity_chunk_size,
                 recording_controller, seed):
        """
        :param spiking_distribution:
            The distribution used for generation of spike events.
        :param percentage_of_regularity_phases:
            Specifies a percentage (in range 0..100) of whole simulation time, when algorithm enforcing
            correlations of inter-spike-intervals will be active. In the remaining time the algorithm will
            be inactive.
        :param noise_length_distribution:
            A distribution specifying durations of phases when the spikes-correlation algorithm is inactive.
        :param regularity_length_distribution:
            A distribution specifying durations of phases when the spikes-correlation algorithm is active.
        :param max_spikes_buffer_size:
            Spikes generated by `spiking_distribution' are stored a buffer, before they proceed to the
            spike train algorithms. Size of the buffer affects correlation degree of spikes in the spikes
            train. Greater value gives greater degree of correlation. Values in range 75..150 give good
            results. The value 100 can be considered as default value. However, we recommend to do
            same experimentation to tune the values to get correlation degree you need.
        :param regularity_chunk_size:

        :param recording_controller:
            It is a function taking two arguments `last_recording_time' and  `current_time' returning a bool.
            It tells the train whether recording of a spike event should be performed at `current_time' or not.
            The first parameter `last_recording_time' represents the last time when this function returned True
            to the train. Note that initial time of the simulation is considered as a time when the function
            returned True. Here are two basic examples of the function:
                lambda last_recording_time, current_time: True      # Record all generated spikes
                lambda last_recording_time, current_time: False     # Disable recording of spikes

        Look also to assertions below to see requirements (preconditions) of the parameters described above.
        """
        assert isinstance(spiking_distribution, distribution.Distribution)
        assert type(percentage_of_regularity_phases) in [int, float]
        assert 0 <= percentage_of_regularity_phases and percentage_of_regularity_phases <= 100
        assert isinstance(noise_length_distribution, distribution.Distribution)
        assert isinstance(regularity_length_distribution,
                          distribution.Distribution)
        assert regularity_length_distribution.get_mean() > 0.0001
        assert isinstance(max_spikes_buffer_size,
                          int) and max_spikes_buffer_size >= 1
        assert isinstance(regularity_chunk_size,
                          int) and regularity_chunk_size in range(
                              1, max_spikes_buffer_size)
        self._spiking_distribution = spiking_distribution
        self._phases_distribution = distribution.Distribution(
            {
                "regularity": percentage_of_regularity_phases,
                "noise": 100 - percentage_of_regularity_phases
            }, seed)
        self._noise_length_distribution = noise_length_distribution
        self._regularity_length_distribution = regularity_length_distribution
        self._regularity_chunk_index_low = -1
        self._regularity_chunk_index_high = -1
        self._regularity_chunk_size = regularity_chunk_size
        self._spikes_buffer = []
        self._spikes_buffer_creation_times = []
        self._max_spikes_buffer_size = max_spikes_buffer_size
        self._is_noise_phase_active = True
        self._next_spike_time = None
        self._phase_start_time = None
        self._phase_end_time = None
        self._last_recording_time = None
        self._spikes_history = []
        self._recording_controller = recording_controller
        self._statistics = SpikeTrain.get_initial_statistics()
        self._rnd_generator = random.Random(seed)
        self._alignment_spike_history = None
        self._alignment_coefficient = None
        self._alignment_dispersion_fn = None