Exemplo n.º 1
0
def varRed(idat,odat,A,bootstrap = None):
    """
    computed the variance reduction when using A*idat[:,x].T as predictor for odat[:,x].T
    if bootstrap is an integer > 1, a bootstrap with the given number of iterations
    will be performed.
    returns
    tVred, sVred: the total relative variance after prediction (all coordinates)
       and the variance reduction for each coordinate separately. These data are
       scalar and array or lists of scalars and arrays when a bootstrap is performed.
       
    Note: in the bootstrapped results, the first element refers to the "full" 
    data variance reduction.
    """
    
    nBoot = bootstrap if type(bootstrap) is int else 0
    if nBoot < 2:
        nBoot = 0        
    
    odat_pred = dot(A,idat.T)
    rdiff = odat_pred - odat.T # remaining difference
    rvar = var(rdiff,axis=1)/var(odat.T,axis=1) # relative variance
    trvar = var(rdiff.flat)/var(odat.T.flat)    # total relative variance
    
    if nBoot > 0:
        rvar = [rvar,]
        trvar = [trvar,]
    for rep in range(nBoot-1):
        indices = randint(0,odat.T.shape[1],odat.T.shape[1])
        odat_pred = dot(A,idat[indices,:].T)
        rdiff = odat_pred - odat[indices,:].T # remaining difference
        rvar.append( var(rdiff,axis=1)/var(odat.T,axis=1) ) # relative variance
        trvar.append (var(rdiff.flat)/var(odat.T.flat) )    # total relative variance
        
    return trvar, rvar
Exemplo n.º 2
0
def hist_values(parameter, group, strategy, decay_type, label, y_limit=None):
    values = group[parameter]
    values = list(values)
    
    binsize = 0.05
    if 'zoom' in label:
        binsize = 0.01
    
    if y_limit == None:
        y_limit = max(values)
    cutoff = 1
    #weights = np.ones_like(values)/float(len(values))
    
    weights = group['num_lines'] / sum(group['num_lines'])
    weights = np.array(weights)
    
    mu = pylab.average(values, weights=weights)
    sigma2 = pylab.var(values)
    
    pylab.figure()
    pylab.hist(values, weights=weights, bins=np.arange(0, cutoff + binsize, binsize))
    title_items = []
    title_items.append('%s maximum likelihood values %s %s %s' % (parameter, strategy, decay_type, label))
    title_items.append('mean of estimates = %f' % mu)
    title_items.append('variance of estimates = %f' % sigma2)
    title_str = '\n'.join(title_items)
    #pylab.title(parameter + ' maximum likelihood values ' + str(strategy) + ' ' + str(outname))
    #pylab.title(title_str)
    print title_str
    pylab.xlabel('%s mle' % parameter, fontsize=20)
    pylab.ylabel('weighted proportion', fontsize=20)
    pylab.xlim((0, 1))
    pylab.ylim((0, y_limit))
    pylab.savefig('repair_ml_hist_%s_%s_%s_%s.pdf' % (parameter, strategy, decay_type, label), format='pdf')
    pylab.close()
Exemplo n.º 3
0
 def __calculate__(self):
     global USE_IDENTITY_LINE
     sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
     if USE_IDENTITY_LINE:
         return pl.sqrt(pl.sum((sd1**2)) / len(self.signal_plus))
     else:
         return pl.sqrt(pl.var(sd1))
Exemplo n.º 4
0
def vRedPartial(idat, odat, maps, idcs, vaxis=0):
    """
    computes the variance reduction for each coordinate after prediction,
    given input data idat, output data odat, the maps that map idat to odat,
    and the indices that were used to compute the regression.
    An out-of-sample prediction is performed.
    idat, odat: shape n x d, n: number of measurement, d: number of dimensions
    maps: a list of matrices A where idat*A predicts odat
    """
    rvar = []
    for A, idx in zip(maps,idcs):
        tidx = otheridx(idx,idat.shape[0])
        pred = dot(idat[tidx,:],A)
        rvar.append(var(odat[tidx,:] - pred, axis=vaxis) /
                        var(odat[tidx,:], axis=vaxis))
    return rvar
Exemplo n.º 5
0
 def __calculate__(self):
     global USE_IDENTITY_LINE
     sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
     if USE_IDENTITY_LINE:
         return pl.sqrt(pl.sum((sd1 ** 2)) / len(self.signal_plus))
     else:
         return pl.sqrt(pl.var(sd1))
Exemplo n.º 6
0
 def calZsocre(self,core,surface,sampleSize):
     coreMean=mean(core)
     s=[]
     for i in range(sampleSize):
         s.append(mean(sample(surface,len(core))))
     sig= sqrt(var(s))
     return (coreMean-mean(s))/sig
Exemplo n.º 7
0
def optimize_segment(data, dt, interval_start, ftol=1e-3):
    """
    Estimate the recurrence time of a periodic signal within data.

    data : 1d numpy.ndarray
        the periodic signal

    dt : float
        sampling interval for data

    interval_start : float
        the interval that is taken as initial guess for
        the optimization (in the same unit as dt)

    ftol : float
        tolerance on the solution (passed to scipy.optimize.fmin)
    """

    i = fmin(
        lambda interval: p.sum(
            p.var(
                segment(data, dt, interval),
                axis=0)),
        interval_start,
        ftol=ftol)
    return i[0]
Exemplo n.º 8
0
def print_variance(df, strategies, decay_types):
    df2 = df[(df['strategy'].isin(strategies)) & (df['decay_type'].isin(decay_types))]
    for name, group in df2.groupby(['strategy', 'decay_type']):
        print name
        max_explores = []
        max_decays = []
        for sheet in group['sheet'].unique():
            group2 = group[group['sheet'] == sheet]
            max_likelihood = max(group2['likelihood'])
            group2 = group2[group2['likelihood'] == max_likelihood]
            #print group2
            explores = list(group2['explore'])
            decays = list(group2['decay'])
            max_explores += explores
            max_decays += decays
        print "explore", pylab.var(max_explores, ddof=1)
        print "decay", pylab.var(max_decays, ddof=1)
def get_coefficient_of_determination(real_y_vals, predicted_y_vals):
    """
    :param real_y_vals: ndarray
    :param predicted_y_vals:ndarray
    :return: ndarray
    """
    mean_error = ((real_y_vals - predicted_y_vals) ** 2).sum() / len(real_y_vals)
    variance = pylab.var(real_y_vals)
    return 1 - (mean_error / variance)
Exemplo n.º 10
0
def estimate_skew_angle(image, angles):
    estimates = []
    for a in angles:
        v = mean(interpolation.rotate(image, a, order=0, mode='constant'),
                 axis=1)
        v = var(v)
        estimates.append((v, a))
    _, a = max(estimates)
    return a
Exemplo n.º 11
0
def icc(df, category, mu=None):
    if mu == None:
        mu = pop_mean(df)

    df2 = df.drop_duplicates(subset=['name', category])
    betas = []
    errors = []
    for cat_val in df[category].unique():
        alphas = df2['alpha'][df2[category] == cat_val]
        alphas -= mu
        beta = pylab.mean(alphas)
        betas.append(beta)
        epsilon = alphas - beta
        errors += list(epsilon)

    beta_var = pylab.var(betas, ddof=1)
    error_var = pylab.var(errors, ddof=1)

    return beta_var / (beta_var + error_var)
Exemplo n.º 12
0
def variance_of_laplacian(filename, heightDivisor, widthDivisor, counter):
    # Variables
    image = Image.open(filename)  # open file
    (rWidth,
     rHeight) = image.size  # returns width, height or x, y or cols, rows
    image.close()
    # close file
    totalImageRows = pl.arange(rHeight / heightDivisor)
    totalImageColumns = pl.arange(rWidth / widthDivisor)
    numRows_MiniMatrix = pl.arange(heightDivisor)
    numCols_MiniMatrix = pl.arange(widthDivisor)
    miniMatrix = pl.zeros(
        (heightDivisor, widthDivisor
         ))  # heightDivisor x widthDivisor Matrix to copy elements to
    varianceMatrix = pl.zeros((rHeight / heightDivisor, rWidth / widthDivisor))
    Laplacian_Kernel = (pl.array([[0., -1., 0.], [-1., 4., -1.], [0., -1., 0.]
                                  ])) * (1. / 60)

    # 1. Convert image to matrix.
    toBeConverted = pil.Image.open(filename)
    # resize (columns, width)
    toBeConverted = toBeConverted.resize(
        [(rWidth / widthDivisor) * widthDivisor,
         (rHeight / heightDivisor) * heightDivisor],
        resample=Image.LANCZOS)  # resize using LANCZOS filtering
    # truncating the width and height so that they're divisible by heightDivisor & widthDivisor
    imageMatrix = pl.asarray(toBeConverted.convert(
        'L'))  # convert image to greyscale; return matrix
    toBeConverted.close()
    # close file

    # 2. Split Image into sub-matrices, each of size heightDivisor x widthDivisor. For each
    #    heightDivisor x widthDivisor sub-matrix, convolve with the kernel. Calculate the variance
    #    of this convolution. Place variance in a (rHeight / heightDivisor) x (rWidth / widthDivisor) matrix.

    print "\t[", counter, "] Convolving subdivisions of image with Laplacian Kernel... Calculating Variance... ",
    for subset_of_rows in totalImageRows:  # TOTAL Image Matrix
        for subset_of_columns in totalImageColumns:
            image_row = subset_of_rows * heightDivisor  # keeps track of larger matrix's row index to copy from
            image_col = subset_of_columns * widthDivisor  # keeps track of larger matrix's dolumn index to copy from
            for row in numRows_MiniMatrix:
                for col in numCols_MiniMatrix:
                    miniMatrix[row][col] = imageMatrix[image_row +
                                                       row][image_col + col]
            # 3. Convolve part of the image with the Laplacian kernel
            Convolve = signal.fftconvolve(miniMatrix,
                                          Laplacian_Kernel,
                                          mode='full')
            # 4. Compute the variance of the convolution.
            Variance = pl.var(Convolve)
            # 5. Store variance in entry of varianceMatrix
            varianceMatrix[subset_of_rows][subset_of_columns] = Variance
    # 6. return the varianceMatrix
    print "Done."
    return varianceMatrix
Exemplo n.º 13
0
    def estimate_skew_angle(self, image, angles):
        param = self.param
        estimates = []

        for a in angles:
            v = mean(interpolation.rotate(image, a, order=0, mode='constant'), axis=1)
            v = var(v)
            estimates.append((v, a))
        if param['debug'] > 0:
            plot([y for x, y in estimates], [x for x, y in estimates])
            ginput(1, param['debug'])
        _, a = max(estimates)
        return a
Exemplo n.º 14
0
def r_squared(y, estimated):
    """
    Calculate the R-squared error term.
    Args:
        y: list with length N, representing the y-coords of N sample points
        estimated: a list of values estimated by the regression model
    Returns:
        a float for the R-squared error term
    """
    yVals = pylab.array(y)
    estimatedVals = pylab.array(estimated)
    error = ((estimatedVals - yVals)**2).sum()
    meanError = error / len(yVals)
    return 1 - (meanError / pylab.var(yVals))
Exemplo n.º 15
0
def plot_edge_lengths():
    import pandas as pd
    df = pd.read_csv('imaris_lengths.csv', names = ['neuron', 'length'])
    pylab.hist(df['length'])
    pylab.savefig('imaris/imaris_lengths.pdf', format='pdf')
    pylab.close()
    mu = pylab.mean(df['length'])
    sigma2 = pylab.var(df['length'], ddof=1)
    l = 1.0 / mu
    l2 = l ** 2
    print "mean", mu
    print "lambda", l
    print 'variance', sigma2
    print 'var-hat', 1.0 / l2
Exemplo n.º 16
0
def visualize ():
    sample_rate, snd = load_sample(".\\hh-closed\\dh9.WAV")
    print snd.dtype
    data = normalize(snd)
    print data.shape
    n = data.shape[0]
    length = float(n)
    print length / sample_rate, "s"
    timeArray = arange(0, length, 1)
    timeArray = timeArray / sample_rate
    timeArray = timeArray * 1000  #scale to milliseconds
    ion()
    if False:
        plot(timeArray, data, color='k')
        ylabel('Amplitude')
        xlabel('Time (ms)')
        raw_input("press enter")
        exit()
    p = fft(data) # take the fourier transform
    nUniquePts = ceil((n+1)/2.0)
    print nUniquePts
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / float(n) # scale by the number of points so that
                 # the magnitude does not depend on the length
                 # of the signal or on its sampling frequency
    p = p**2  # square it to get the power

    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) -1] = p[1:len(p) - 1] * 2 # we've got even number of points fft

    print p
    freqArray = arange(0, nUniquePts, 1.0) * (sample_rate / n);
    plot(freqArray/1000, 10*log10(p), color='k')
    xlabel('Frequency (kHz)')
    ylabel('Power (dB)')
    raw_input("press enter")

    m = average(freqArray, weights = p)
    v = average((freqArray - m)**2, weights= p)
    r = sqrt(mean(data**2))
    s = var(data**2)
    print "mean freq", m #TODO: IMPORTANT: this is currently the mean *power*, not the mean freq.  What we want is mean freq weighted by power
    print "var freq", v
    print "rms", r
    print "squared variance", s
Exemplo n.º 17
0
def parse_times(infile):

    if not os.path.exists(infile):
        exit(1)

    time_relative = {}
    time_delta = {}

    with open(infile) as f:
        for line in f:
            if line.find(" = ") != -1:
                rawinfo = line.split(" = ")
                if "Ping stat" in rawinfo[0]:
                    time_relative[rawinfo[0]] = ast.literal_eval(rawinfo[1])
                else:
                    time_relative[rawinfo[0]] = float(rawinfo[1])
    time_delta["test"] = time_relative["Stop Test"]
    time_delta["network-uptime"] = time_relative[
        "Network stopped"] - time_relative["Network start"]
    time_delta["network-start"] = time_relative[
        "Network started"] - time_relative["Network start"]
    time_delta["network-stop"] = time_relative[
        "Network stopped"] - time_relative["Network stop"]
    time_delta["mote-uptime"] = time_relative["Mote stopped"] - time_relative[
        "Mote start"]
    #time_delta["mote-start"] = time_relative["Mote detect start"] - time_relative["Mote start"]
    time_delta["mote-stop"] = time_relative["Mote stopped"] - time_relative[
        "Mote reached"]
    time_delta["mote-detect"] = time_relative["Mote detected"] - time_relative[
        "Mote start"]

    time_delta[
        "ping1"] = time_relative["Mote reached"] - time_relative["Mote ping"]

    if time_relative.has_key("Moved mote ping"):
        time_delta["pingm"] = time_relative[
            "Moved mote reached"] - time_relative["Moved mote ping"]
    elif time_relative.has_key("Mote ping2"):
        time_delta["ping2"] = time_relative["Mote reached2"] - time_relative[
            "Mote ping2"]

    if time_relative.has_key("Ping stat"):
        #time_relative["Ping stat"] = ast.literal_eval(time_relative["Ping stat"])
        time_delta["ping2-stat"] = time_relative["Ping stat"]
        time_delta["ping2-mean"] = pylab.mean(time_relative["Ping stat"])
        time_delta["ping2-std"] = pylab.std(time_relative["Ping stat"])
        time_delta["ping2-var"] = pylab.var(time_relative["Ping stat"])

    return time_delta
def r_squared(y, estimated):
    """
    Calculate the R-squared error term.

    Args:
        y: 1-d pylab array with length N, representing the y-coordinates of the
            N sample points
        estimated: an 1-d pylab array of values estimated by the regression
            model

    Returns:
        a float for the R-squared error term
    """
    error = ((y - estimated)**2).sum()
    meanError = error / len(y)
    return 1 - (meanError / pylab.var(y))
Exemplo n.º 19
0
def varRed(idat, odat, A, bootstrap=None):
    """
    computed the variance reduction when using A*idat[:,x].T as predictor for odat[:,x].T
    if bootstrap is an integer > 1, a bootstrap with the given number of iterations
    will be performed.
    returns
    tVred, sVred: the total relative variance after prediction (all coordinates)
       and the variance reduction for each coordinate separately. These data are
       scalar and array or lists of scalars and arrays when a bootstrap is performed.
       
    Note: in the bootstrapped results, the first element refers to the "full" 
    data variance reduction.
    """

    nBoot = bootstrap if type(bootstrap) is int else 0
    if nBoot < 2:
        nBoot = 0

    odat_pred = dot(A, idat.T)
    rdiff = odat_pred - odat.T  # remaining difference
    rvar = var(rdiff, axis=1) / var(odat.T, axis=1)  # relative variance
    trvar = var(rdiff.flat) / var(odat.T.flat)  # total relative variance

    if nBoot > 0:
        rvar = [
            rvar,
        ]
        trvar = [
            trvar,
        ]
    for rep in range(nBoot - 1):
        indices = randint(0, odat.T.shape[1], odat.T.shape[1])
        odat_pred = dot(A, idat[indices, :].T)
        rdiff = odat_pred - odat[indices, :].T  # remaining difference
        rvar.append(var(rdiff, axis=1) /
                    var(odat.T, axis=1))  # relative variance
        trvar.append(var(rdiff.flat) /
                     var(odat.T.flat))  # total relative variance

    return trvar, rvar
Exemplo n.º 20
0
def parse_times(infile):

    if not os.path.exists(infile):
        exit(1)

    time_relative = {}
    time_delta = {}
    
    with open(infile) as f:
        for line in f:
            if line.find(" = ") != -1:
                rawinfo = line.split(" = ")
                if "Ping stat" in rawinfo[0]:
                    time_relative[rawinfo[0]] = ast.literal_eval(rawinfo[1])
                else:
                    time_relative[rawinfo[0]] = float(rawinfo[1])
    time_delta["test"] = time_relative["Stop Test"]
    time_delta["network-uptime"] = time_relative["Network stopped"] - time_relative["Network start"]
    time_delta["network-start"] = time_relative["Network started"] - time_relative["Network start"]
    time_delta["network-stop"] = time_relative["Network stopped"] - time_relative["Network stop"]
    time_delta["mote-uptime"] = time_relative["Mote stopped"] - time_relative["Mote start"]
    #time_delta["mote-start"] = time_relative["Mote detect start"] - time_relative["Mote start"]
    time_delta["mote-stop"] = time_relative["Mote stopped"] - time_relative["Mote reached"]
    time_delta["mote-detect"] = time_relative["Mote detected"] - time_relative["Mote start"]

    time_delta["ping1"] = time_relative["Mote reached"] - time_relative["Mote ping"]

    if time_relative.has_key("Moved mote ping"):
        time_delta["pingm"] = time_relative["Moved mote reached"] - time_relative["Moved mote ping"]
    elif time_relative.has_key("Mote ping2"):
        time_delta["ping2"] = time_relative["Mote reached2"] - time_relative["Mote ping2"]

    if time_relative.has_key("Ping stat"):
        #time_relative["Ping stat"] = ast.literal_eval(time_relative["Ping stat"])
        time_delta["ping2-stat"] = time_relative["Ping stat"]
        time_delta["ping2-mean"] = pylab.mean(time_relative["Ping stat"])
        time_delta["ping2-std"] = pylab.std(time_relative["Ping stat"])
        time_delta["ping2-var"] = pylab.var(time_relative["Ping stat"])

    return time_delta
Exemplo n.º 21
0
def process_window(sample_rate, data):
    # print "processing window"
    # print data.dtype
    # print data.shape
    n = data.shape[0]
    length = float(n)
    # print length / sample_rate, "s"
    p = fft(data) # take the fourier transform
    nUniquePts = ceil((n+1)/2.0)
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / float(n) # scale by the number of points so that
                 # the magnitude does not depend on the length
                 # of the signal or on its sampling frequency
    p = p**2  # square it to get the power

    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) -1] = p[1:len(p) - 1] * 2 # we've got even number of points fft
    freqArray = arange(0, nUniquePts, 1.0) * (sample_rate / n);

    if sum(p) == 0:
        raise Silence
    m = average(freqArray, weights = p)
    v = sqrt(average((freqArray - m)**2, weights= p))
    r = sqrt(mean(data**2))
    s = var(data**2)
    print "mean freq", m #TODO: IMPORTANT: this is currently the mean *power*, not the mean freq.  What we want is mean freq weighted by power
    # print freqArray
    # print (freqArray - m)
    # print p
    print "var freq", v
    print "rms", r
    print "squared variance", s
    return [m, v, r, s]
goldnet = Network()
goldnet.read_goldstd(goldnets[data.keys()[0]])

# Get the variances overall and per time point
# Per time point variances
tp_vars = {}
for name in data.keys():
    values_by_tp = []
    for i in range(len(data[name][0].experiments)):
        tp_vals = []
        for j, rep in enumerate(data[name]):
            tp_vals.append([])
            for gene in rep.gene_list:
                tp_vals[j].append(rep.experiments[i].ratios[gene])
        values_by_tp.append(pylab.var(tp_vals))
        print tp_vals

    tp_vars[name] = pylab.mean(values_by_tp)

print "Variance by TimePoint:"
for name in sorted(tp_vars.keys()):
    print name, tp_vars[name]

print "Variance by Gene:"
overall_gene_vars = {}
for name in data.keys():
    gene_vars = {}
    for j, rep in enumerate(data[name]):
        gv = {}
        for i in range(len(data[name][0].experiments)):
Exemplo n.º 23
0
def figure1 ( ):
    w,h = 28,15
    fig = pl.figure ( figsize=(fullwidth,h*fullwidth/w) )

    a_,b_,d_ = place_axes ( fig, 1.5,9, [9,8,9], [6]*3,
            [True]+[False]+[True], [.5,1.5,1.], (w,h) )
    c1,c2,e1,e2,e3 = place_axes ( fig, 1.5,2, [5.2,5.2,5.2,5.2,5.2], [6]*5,
            [False]*5, [.5]*5, (w,h) )
    a_.text ( .05, laby, r"\textbf{a}", transform=a_.transAxes )
    b_.text ( .05, laby, r"\textbf{b}", transform=b_.transAxes )
    d_.text ( .05, laby, r"\textbf{c}", transform=d_.transAxes )
    c1.text ( .05, laby, r"\textbf{d}", transform=c1.transAxes )
    c2.text ( .05, laby, r"\textbf{e}", transform=c2.transAxes )
    e1.text ( .05, laby, r"\textbf{f}", transform=e1.transAxes )
    e2.text ( .05, laby, r"\textbf{g}", transform=e2.transAxes )
    e3.text ( .05, laby, r"\textbf{h}", transform=e3.transAxes )


    # Figure 1A ==============================
    print "nu0",results['model_nohist'].w,results['model_nohist'].nu
    print results['model_nohist'].applythreshold
    plotinfo_ = dict ( plotinfo )
    if observer in ["KP",'sim_KP','sim_KP_nh']:
        plotinfo_['conditions'] = (0,5)
    plotinfo_['xmin'] = -25
    plotinfo_['xmax'] =  25
    if observer in ['nico','konrad']:
        plotinfo_['xmin'] = -100
        plotinfo_['xmax'] =  100
    print plotinfo_
    convenience.pmfplot ( data, results, plotinfo_, a_, errors=False )
    if observer is 'pk':
        a_.set_xlim ( -30, 30  )
        a_.set_xticks ( (-30,-20,-10,0,10,20,30) )
    a_.set_title ('')
    a_.set_ylim ( -.03, 1.03 )
    a_.yaxis.set_major_formatter ( myformatter )
    a_.xaxis.set_major_formatter ( myformatter )
    a_.set_ylabel ( "Probability" )
    a_.set_xlabel ( "Transduced stimulus intensity" )

    # Figure 1B ===============================
    textfile.write ("Figure 1B:\n" )
    l_obs,c95,caic,cpe = convenience.permutationplot ( data, results, plotinfo, b_, noaic=True )
    b_.set_title ( '' )
    b_.set_xlabel ( "Log-likelihood" )
    b_.xaxis.set_major_formatter ( myformatter )
    b_.set_xlim ( trimmed_hlim ( results['permutation_wh'][:,0], l_obs, (-2000,1000),20 ) )
    for l in b_.get_children():
        if isinstance ( l, matplotlib.legend.Legend ):
            pl.setp(l, visible=False )
    print l_obs,c95,caic,cpe,caic-6
    textfile.write ( "  l_obs = %g\n  l_95%% = %g\n  l_AIC = %g\n  cpe = %g\n" % (l_obs,c95,caic,cpe) )

    if getattr ( data, 'audio', False ):
        easy,difficult = data.performance_filter ()
        M = statistics.EvaluationCollector ( results['model_w_hist'], easy, difficult )
    else:
        M = statistics.EvaluationCollector ( results['model_w_hist'] )
    M(results['model_w_hist'])

    print results['permutation_wh'].shape

    # Figure 1Ca
    textfile.write ( "Figure 1C:\n" )
    # Variance explained on difficult trials
    hist,c95 = statistics.historytest ( results['permutation_wh'][:,3] )
    graphics.montecarlo_test ( M.vdifficult, hist, c95, ax=c1, labeling='other' )
    c1.set_xlabel ( "Var. explained [\%]\n difficult stimuli", fontsize=8, multialignment='center' )
    c1.xaxis.set_major_formatter ( prcformatter )
    c1.set_xlim ( 0, max(M.vdifficult*1.1, hist[1].max()) )
    textfile.write ( "  variance explained on difficult trials: %g, crit: %g, cpe: %g\n" %\
            (M.vdifficult,c95,pl.mean ( results['permutation_wh'][:,3]<M.vdifficult)) )

    # figure 1Cb
    # Variance explained on easy trials
    hist,c95 = statistics.historytest ( results['permutation_wh'][:,4] )
    graphics.montecarlo_test ( M.veasy, hist, c95, ax=c2, labeling='other' )
    c2.set_xlabel ( "Var. explained [\%]\n easy stimuli", fontsize=8, multialignment='center' )
    c2.xaxis.set_major_formatter ( prcformatter )
    c2.set_xlim ( 0, max(M.veasy*1.1, hist[1].max()) )
    textfile.write ( "  variance explained on easy trials: %g, crit: %g, cpe: %g\n" % \
            (M.veasy,c95,pl.mean( results['permutation_wh'][:,4]<M.veasy)) )
    textfile.write ( "  variance explained by stimulus on easy trials: %g\n" % \
            (M.vstimulus,) )

    Mh = results['model_w_hist']
    M0 = results['model_nohist']
    print "LL/trial=",(Mh.loglikelihood-results['permutation_wh'][:,0].mean())
    current_stimulus = pl.dot ( Mh.X[:,1:Mh.hf0], Mh.w[1:Mh.hf0] )
    history_features = pl.dot ( Mh.X[:,Mh.hf0:],  Mh.w[Mh.hf0:] )
    decision_signal  = current_stimulus + history_features
    textfile.write ( "  predicted slope reduction: %g\n" % ((1-0.25*pl.var(history_features)),) )
    textfile.write ( "  actual slope reductions (al_wh/al_nh): %s\n" %  (str((Mh.w[1:Mh.hf0]/M0.w[1:M0.hf0]).tolist()),) )


    # figure 1D
    S,C,V = M.stimuli,M.conditions,M.variance_explained
    conditions = pl.unique ( C )
    print conditions,plotinfo_
    for j,c in enumerate(plotinfo_['conditions']):
        print j,c,plotinfo['colors']
        i = C==plotinfo_['indices'][c][1]
        d_.plot ( S[i], V[i], '.', color=plotinfo['colors'][j] )
    pl.setp ( d_, xlim=(0,max(-plotinfo['xmin'],plotinfo['xmax'])), xlabel='Stimulus intensity',
            ylabel='Var. explained [\%]', ylim=(-.03,1.03) )
    d_.yaxis.set_major_formatter ( prcformatter )
    d_.xaxis.set_major_formatter ( myformatter )
    d_.set_xticks ( (0,10,20,30) )
    d_.set_xlim ( -.1, 30.1 )

    # Figure 1E
    textfile.write ( "Figure 1E:\n" )
    # prediction from history+stimulus
    pS_samples = results['permutation_wh'][:,6]
    pSH_samples = results['permutation_wh'][:,7]
    hist,c95 = statistics.historytest ( results['permutation_wh'][:,7] )
    if pl.var ( pSH_samples ) < 1e-7:
        mpSH = pl.mean ( pSH_samples )
        hist = (pl.array ( [len(pSH_samples)] ),pl.array([mpSH-1e-3,mpSH+1e-3]))
    graphics.montecarlo_test ( M.pSH, hist, c95, Caic=M.pstim, ax=e1, labeling='other' )
    e1.set_xlabel ( 'Prediction acc. [\%]\ndifficult stimuli', fontsize=8, multialignment='center' )
    e1.xaxis.set_major_formatter ( prcformatter )
    e1.set_xlim ( trimmed_hlim ( pSH_samples, (M.pSH,M.pstim)))
    print pSH_samples.mean(),pSH_samples.std(),"---"

    textfile.write ( "  prediction accuracy H+S, difficult: %g, crit: %g, cpe: %g\n" %\
            (M.pSH, c95, pl.mean(pSH_samples<M.pSH)) )
    textfile.write ( "  prediction accuracy S, difficult: %g, crit: %g, cpe: %g\n" %\
            (M.pstim, pl.prctile(pS_samples,95), pl.mean(pS_samples<M.pstim)) )


    hist,c95 = statistics.historytest ( results['permutation_wh'][:,5] )
    graphics.montecarlo_test ( M.phist, hist, c95, Caic=M.pstim, ax=e2, labeling='other' )
    e2.set_xlabel ( 'Prediction acc. [\%]\ndifficult stimuli', fontsize=8, multialignment='center' )
    e2.xaxis.set_major_formatter ( prcformatter )
    pH_samples = results['permutation_wh'][:,5]
    e2.set_xlim ( trimmed_hlim ( pH_samples, M.phist))
    textfile.write ( "  prection accuracy H, difficult: %g, crit: %g, cpe: %g\n" %\
            (M.phist,c95,pl.mean ( pH_samples<M.phist)) )

    hist,c95 = statistics.historytest ( results['permutation_wh'][:,8] )
    graphics.montecarlo_test ( M.peasy, hist, c95, ax=e3, labeling='other' )
    e3.set_xlabel ( 'Prediction acc. [\%]\neasy stimuli', fontsize=8, multialignment='center' )
    e3.xaxis.set_major_formatter ( prcformatter )
    peasy_samples = results['permutation_wh'][:,8]
    e3.set_xlim ( trimmed_hlim ( peasy_samples, M.peasy))
    textfile.write ( "  prection accuracy H, easy: %g, crit: %g, cpe: %g\n" %\
            (M.peasy,c95,pl.mean ( peasy_samples<M.peasy)) )

    # a_.xaxis.set_major_locator (
    #         tckr ( density=0.45, figure=fig, which=0 ) )
    b_.xaxis.set_major_locator (
            tckr ( density=0.2, figure=fig, which=0 ) )
    for ax in (c1,c2):
        ax.xaxis.set_major_locator (
                tckr ( density=0.3, figure=fig, which=0 ) )
        ax.set_xlim ( 0, None )
    d_.xaxis.set_major_locator ( tckr ( density=0.4, figure=fig,which=0 ) )
    d_.set_xlim ( -.2, None )
    for ax in (e1,e2,e3):
        ax.xaxis.set_major_locator (
                MaxNLocator ( 5 ) )
                # tckr ( density=0.3, figure=fig, which=0 ) )
    if observer in ['pk']:
        e1.set_xticks ( pl.array((60,62,64,66))/100. )
        e1.set_xlim ( .60,.67 )

    pl.savefig ( "figures/%s1.pdf" % (figname,) )
    pl.savefig ( "figures/%s1.eps" % (figname,) )
Exemplo n.º 24
0
    def _stop_training(self, debug=False):
        """
        Finish the training, i.e. for the time series plots: take the
        accumulated time series and divide by the number of samples per
        condition.
        For the
        """
        # Compute avg
        for label in self.mean_time_series.keys():
            self.mean_time_series[label] /= self.samples_per_condition[label]
            self.time_series_histo[label] = \
                pylab.array(self.time_series_histo[label])

            # Compute error of desired type - strip the numerals:
            if self.error_type is not None:
                if self.error_type.strip('0123456789.') == 'SampleStdDev':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0))
                elif self.error_type.strip('0123456789.') == 'StdError':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0)) /\
                     pylab.sqrt(pylab.shape(self.time_series_histo[label])[0])

                multiplier = float(''.join([
                    nr for nr in self.error_type if (nr.isdigit() or nr == ".")
                ]))
                self.error[label] = multiplier * self.error[label]

        # other plots only if features where passed
        if (self.feature_vector != None):
            self.feature_time_series = \
                convert_feature_vector_to_time_series(self.feature_vector,
                                                      self.sample_data)

            # in the alternative scaling space, the feature "importance" is
            # determined by the feature values
            # weighted by the expected difference in time series values
            # between the two classes (difference of avg std and avg target)
            # The standard P3 and LRP cases are handeled separately to make
            # sure that the sign of the difference is consistent
            if self.alternative_scaling:
                if all([
                        True if label_iter in ['Target', 'Standard'] else False
                        for label_iter in self.mean_time_series.keys()
                ]):
                    self.feature_time_series *= (
                        self.mean_time_series['Target'] -
                        self.mean_time_series['Standard'])
                elif all([
                        True if label_iter in ['LRP', 'NoLRP'] else False
                        for label_iter in self.mean_time_series.keys()
                ]):
                    self.feature_time_series *= (
                        self.mean_time_series['LRP'] -
                        self.mean_time_series['NoLRP'])
                else:
                    self.feature_time_series *= (
                        self.mean_time_series[self.mean_time_series.keys()[0]]
                        -
                        self.mean_time_series[self.mean_time_series.keys()[1]])
                    print "AverageFeatureVis (alternative_scaling): " +\
                      "Present classes don't match the standards " +\
                      "(Standard/Target or LRP/NoLRP). Used the difference "+\
                      "%s - %s" % (self.mean_time_series.keys()[0],
                       self.mean_time_series.keys()[1]) +" for computation "+\
                       "of the alternative scaling."

            # greatest feature val that occures is used for the normalization
            # of the color-representation of the feature values
            self.max_feature_val = \
                (abs(self.feature_time_series)).max(0).max(0)
            self.normalizer = colors.Normalize(vmin=-self.max_feature_val,
                                               vmax=self.max_feature_val)
            cdict = {
                'red': [(0.0, 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)],
                'green': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)],
                'blue': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0)]
            }
            self.own_colormap = \
                colors.LinearSegmentedColormap('owncm', cdict, N=256)

            # sort the features with descending importance
            self.indexlist = pylab.transpose(
                self.feature_time_series.nonzero())
            indexorder = abs(
                self.feature_time_series[abs(self.feature_time_series) > self.
                                         important_feature_thresh]).argsort()

            self.indexlist = self.indexlist[indexorder[-1::-1]]  #reverse order
            self.indexlist = map(
                list, self.indexlist[:len(self.feature_vector) *
                                     self.percentage_of_features / 100])

            self.histo_plot = self._generate_histo_plot()

            try:
                # try to generate a plot of the feature crosscorrelation
                # matrix. Might fail if the threshold is set such that no
                # features are left.
                for label in self.mean_time_series.keys():
                    self.labeled_corr_matrix[label] = \
                        self._generate_labeled_correlation_matrix(label)
                    self.corr_plot[label] = \
                        self._get_corr_plot(self.corr_important_feats[label],
                                            label)

                # if 2 class labels exist, also compute the difference in the
                # cross correlation between the classes.
                if len(self.corr_important_feats.keys()) == 2:
                    self.corr_plot['Diff'] = self._get_corr_plot((
                        self.corr_important_feats
                            [self.corr_important_feats.keys()[0]]
                      - self.corr_important_feats
                            [self.corr_important_feats.keys()[1]]),
                        self.corr_important_feats.keys()[0] + ' - ' + \
                            self.corr_important_feats.keys()[1])
            except TypeError:
                import warnings
                warnings.warn("\n\nFeatureVis doesn't have enough important" +
                              " features left for correlation plots..." +
                              " Check threshold.\n")

        # Compute avg time series plot anyway
        self.ts_plot = self._generate_time_series_plot()
Exemplo n.º 25
0
c = [0.8916583583 ,0.9364599092 ,0.9418026692 ,0.9660107754 ,0.9735619037 
,0.9752730086 ,0.9795233774 ,0.9736945491 ,0.983412122 ,0.8847568897 
,0.937049294 ,0.9556460673 ,0.9521823306 ,0.9457192893 ,0.9755469101 
,0.9781225838 ,0.9804915898 ,0.7425709229 ,0.885471973 ,0.8549843111 
,0.9540545879 ,0.9638071451 ,0.9549170066 ,0.9591822503 ,0.9771572723 
,0.9802537765 ,0.9703582279 ,0.9436619718 ,0.9485614647 ,0.8532666905 
,0.9380387931 ,0.9383123181 ,0.9020750758 ,0.8996929376 ,0.9635932203 
,0.9663973089 ,0.9712227524 ,0.9697056889 ,0.9709112973 ]
import numpy as np
import matplotlib.pyplot as plt
from pylab import var,mean
cc = range(len(c))
plt.figure()
v = var(c)
plt.errorbar(cc, c, xerr=v,label=str(var(c)))
plt.plot(cc,[mean(c)]*len(c),'--',label=str(mean(c)))
plt.plot(cc,[0]*len(c),'.w')
plt.title("Sequential Parallel Ratio")
plt.legend(loc='lower center')
plt.show()

Exemplo n.º 26
0
 def __calculate__(self):
     sd2 = (self.signal_plus + self.signal_minus) / pl.sqrt(2)
     return pl.sqrt(pl.var(sd2))
Exemplo n.º 27
0
 def plot_variance(self, X, mat):
     var = p.var(mat, 0, dtype=p.float64)
     p.plot(X, var)
     p.show()
Exemplo n.º 28
0
def fitQuality(L,N):
	"""
	"""	
	# Load raw data
	pth = '/home/ashivni/Work/SVN_WORK/repos/Fracture/Duxbury/trunk/Convergence/DATA/NoShift/'
	fName = 'FAIL_STRESS_DIA_'+str(L/2)+'x'+str(L)+'.dat'
	d = numpy.array(pylab.loadtxt(pth+fName))
	d.sort()
	
	# Find emperical cdf and scale to avoid zeros in logs
	cf = empCDF(d,d)*len(d)/(1.0 + len(d))	
	#cf = empCDF(d,d)

	# Draw a random sample from the data
	samp = numpy.array(random.sample(d,N))
	samp.sort()

	# Find weibull, gumbel and duxbury fit parameters
	wbl_k, wbl_l = weibull_mle(samp)
	gbl_m, gbl_bt = mingumbel_mle(samp)
	dux_s = duxbury_mle(samp,L)

	# Evaluate the various quantiles with the fit parameters
	wICDF = weibull_icdf(cf,wbl_k,wbl_l)
	gICDF = mingumbel_icdf(cf,gbl_m,gbl_bt)
	dICDF = duxbury_icdf(cf,L,dux_s)
	wCDF = weibull_cdf(d,wbl_k,wbl_l)
	gCDF = mingumbel_cdf(d,gbl_m,gbl_bt)
	dCDF = duxbury_cdf(d,L,dux_s)

	fig = mpl.pyplot.figure()
	# Setup for the semilog plot
	fig.subplots_adjust(bottom=0.2,left=0.2)
	ax = fig.add_subplot(111)
	ax.set_xscale('linear')
	ax.set_yscale('log')
	ax.set_xlabel('stress',fontsize=30)
	ax.set_ylabel('P( Failure )',fontsize=30)
	ax.set_xlim([0.20,0.28])
	ax.set_ylim([1E-5,1E-1])
	ax.xaxis.set_ticks([0.20,0.22,0.24,0.26,0.28])
	for t in ax.get_xticklabels():
		t.set_fontsize(20)
	for t in ax.get_yticklabels():
		t.set_fontsize(20)

	"""
	line, = ax.plot(d,wICDF,'g.')
	line, = ax.plot(d,gICDF,'r.')
	line, = ax.plot(d,dICDF,'b.')
	line, = ax.plot(d,d,'k-')
	"""

	line, = ax.plot(d,wCDF,'g-',lw=3,label='Weibull')
	line, = ax.plot(d,gCDF,'r-',lw=3,label='Gumbel')
	line, = ax.plot(d,dCDF,'b-',lw=3,label='Duxbury')
	line, = ax.plot(d,cf,'k.',label='Emperical')

	ax.legend(loc='lower right',fancybox=True)
	leg = ax.get_legend()
	mpl.pyplot.setp(leg.get_texts(),fontsize=30)
	mpl.pyplot.draw()
	mpl.pyplot.savefig('FailureProb.png')

	nTrial = 10
	qRank = 10
	w_est = numpy.zeros(nTrial)
	g_est = numpy.zeros(nTrial)
	d_est = numpy.zeros(nTrial)
	for j in range(nTrial):
		# Draw a random sample from the data
		samp = numpy.array(random.sample(d,N))
		samp.sort()

		# Find weibull, gumbel and duxbury fit parameters
		wbl_k, wbl_l = weibull_mle(samp)
		gbl_m, gbl_bt = mingumbel_mle(samp)
		dux_s = duxbury_mle(samp,L)

		w_est[j] =  weibull_cdf(d[qRank-1],wbl_k,wbl_l) 
		g_est[j] = mingumbel_cdf(d[qRank-1],gbl_m,gbl_bt)
		d_est[j] = duxbury_cdf(d[qRank-1],L,dux_s)
		

	print "Stress = " + str(d[qRank-1]) 
	print "Emperical Failure Probability = " + str(cf[qRank-1]) + " Equiv to 1 in " +  str((1.0/cf[qRank-1]))
	print "Weibull Estimate = " + str(pylab.mean(w_est)) + " +/- " + str(pylab.var(w_est)**0.5)  
	print " Equiv to 1 in " +  str((1/pylab.mean(w_est))) 
	print "Gumbel  Estimate = " + str(pylab.mean(g_est)) + " +/- " + str((pylab.var(g_est)**0.5))
	print " Equiv to 1 in " +  str((1/pylab.mean(g_est))) 
	print "Duxbury Estimate = " + str(pylab.mean(d_est)) + " +/- " + str(pylab.var(d_est)**0.5) 
	print " Equiv to 1 in " +  str((1/pylab.mean(d_est))) 
Exemplo n.º 29
0
    y += 1.5 * np.random.normal(size=len(x))
    return x, y

#%% e.g.

xx, yy = create_data()

pl.plot(xx, yy, 'o', label='date')

min(xx)
max(xx)
sum(xx)

pl.mean(xx)
pl.median(xx)
pl.var(xx)

#%%
## main program

xx, yy = create_data(0,5,11)
pl.plot(xx, yy, 'o', label='data points')

#%%
## use finer and regular mesh for plot
a, b = min(xx), max(xx)
h = b-a
n = 1000

xfine = np.linspace( a, b, 1000)
type(xfine)
Exemplo n.º 30
0
def main():
    # Parse command line
    parser = optparse.OptionParser(usage="%prog [options] ")
    parser.add_option(
        "-N",
        "--population-size",
        type='int',
        dest="N",
        default=50,
        help="Total number of lineages in the population is 2N (default N=50) "
    )
    parser.add_option("-k",
                      "--num-lineage",
                      dest="k",
                      type='int',
                      default=5,
                      help="Number of lineages to coalesce (default=5)")
    parser.add_option("-u",
                      "--mutation-rate",
                      dest="u",
                      type='float',
                      default='0.01',
                      help="Mutation rate (default=0.01)")
    parser.add_option("-m",
                      "--num-simulation",
                      type='int',
                      dest="m",
                      default=1,
                      help="Number of simulations to run (default=1) ")

    options, args = parser.parse_args()

    if options.m == 1:  # simulate once, will plot the genealogy tree
        lineage, coalesce_time, mutation_time, coalesce_event, mutation_event = coalesce_with_mutation(
            options.N, options.k, options.u)
        drawtree(pylab.figure(), coalesce_time, mutation_time, coalesce_event,
                 mutation_event)
        pylab.title('N=' + str(options.N) + ', k=' + str(options.k) + ', u=' +
                    str(options.u))
    else:
        firstCoalesceTime = []
        finalCoalesceTime = []
        numMutation = []
        for i in range(options.m):
            lineage, coalesce_time, mutation_time, coalesce_event, mutation_event = coalesce_with_mutation(
                options.N, options.k, options.u)
            firstCoalesceTime.append(coalesce_time[0])
            finalCoalesceTime.append(coalesce_time[-1])
            numMutation.append(len(mutation_event))
        print('-----------statistics---------------')
        print('first coalescence time: \tmean =',
              pylab.mean(firstCoalesceTime), '\tvar =',
              pylab.var(firstCoalesceTime))
        print('last coalescence time: \tmean =', pylab.mean(finalCoalesceTime),
              '\tvar =', pylab.var(finalCoalesceTime))
        print('number of mutations: \tmean =', pylab.mean(numMutation),
              '\tvar =', pylab.var(numMutation))
        # plot histgram
        pylab.figure()
        pylab.hist(firstCoalesceTime, bins=20)
        pylab.title('first coalescence time')
        pylab.xlabel('unit time')
        pylab.ylabel('number of simulations')
        pylab.figure()
        pylab.hist(finalCoalesceTime, bins=20)
        pylab.title('last coalescence time')
        pylab.xlabel('unit time')
        pylab.ylabel('number of simulations')
        pylab.figure()
        pylab.hist(numMutation, bins=20)
        pylab.title('number of mutations')
        pylab.xlabel('unit time')
        pylab.ylabel('number of simulations')
    pylab.show()
Exemplo n.º 31
0
    n, state, J, T = fileio.parsefilename(f)
    J1 = J
    N = n
    Ts = [pl.around(0.01 * (x + 1), 3) for x in range(0, 500)]
    ns = [20]
    Js = [1]
    states = [1]
    #This is where results could be filtered according to parameters if necessary
    if fileio.checkparameters([ns, states, Js, Ts], [n, state, J, T]):
        print("Current file: %s" % f)
        sys.stdout.flush()
        Etotals, Stotals = fileio.readdata(join("results", f))
        Eaverages = pl.array(Etotals) / n**2
        Saverages = pl.array(Stotals) / n**2

        chi.append(1 / T * pl.var(Saverages))
        Cv.append(1 / T**2 * pl.var(Eaverages))
        Smean.append(pl.absolute(pl.mean(Saverages)))
        Emean.append(pl.mean(Eaverages))

Tc = 2 * float(J1) / pl.log(1. + pl.sqrt(2.))

Stheory = [(1 - (pl.sinh(pl.log(1 + pl.sqrt(2.)) * Tc / T))**(-4))**(1. / 8)
           for T in Ts if T < Tc]
Stheory += [0 for T in Ts if T >= Tc]

time = datetime.datetime.now()
filename = join("results",
                "results_%s.txt" % time.strftime("%H:%M:%S_%d-%m-%Y"))

fileio.writedata(filename, [Ts, Emean, Smean, Stheory, Cv, chi])
Exemplo n.º 32
0
Arquivo: gen.py Projeto: ansh5441/code
import pylab
def possible_mean(L):
    return sum(L)/len(L)

def possible_variance(L):
    mu = possible_mean(L)
    temp = 0
    for e in L:
        temp += (e-mu)**2
    return temp / len(L)

x =[6,7,11,12,13,15]
print sum(x)/len(x)
print possible_mean(x)
print pylab.var(x)
print possible_variance(x)
Exemplo n.º 33
0
 def var_len(group):
     return pylab.var(group['length'], ddof=1)
Exemplo n.º 34
0
S[:,0]=S0    #assign e1st input as S0
S[:,1:]=S0*p.exp(nu*t[1:]+sigma*B[:,1:])   # generate the equation
S_5line=S[0:5] #simulate the first five run and show on plot

p.plot(t,S_5line.transpose())
#label
label = 'Time ' ; p.xlabel(label)
label = 'Stock prices ' ; p.ylabel(label)
p.title('GBM of ' + label + ' ,mu=0.1 ' + ' and sigma=0.26  ' )
p.show()        

#expectation at S3
ST=S[:,1:]
S3=ST[:,-1]
S3_mean=p.mean(S3)
S3_variance=p.var(S3)
print('E(S3) = '+str(S3_mean))

#variance at S3
print('Var(S3) = ' + str(S3_variance))

S3_exceed = S3 > 39
number_exceed = sum (S3_exceed) #number of price exceed 39
print ('number [S(3)>39] = ' + str(number_exceed))

#prob.of exceed 39
print ('(P[S(3)>39]) = ' + str(number_exceed/n_path))

#matrix with end price exceed 39
S3_exceed_price = S3_exceed*S3
Exemplo n.º 35
0
    x2 = aggl[3]
    pylab.title(str(sum([(x1[j]-x2[j])**2 for j in range(len(x2))])))
    pylab.plot(x1)
    pylab.plot(x2)
show()
for i in range(len(aggl)):
    minst,midx = 100,0
    for k in range(len(aggl)):
        dis = sum([(aggl[i][j]-mrrp[k][j])**2 for j in range(len(x2))])
        if dis < minst:
            minst = dis
            midx=i
    print midx


def invBoxMuller(x,mu,sigma):
	y = (x-mu)/sigma
	return erfc(-y/sqrt(2.))-1.


mx = mean(x)
sx = sqrt(var(x))
my = mean(x)
sy = sqrt(var(x))

ux = [invBoxMuller(xx,mx,sx) for xx in x]
uy = [invBoxMuller(yy,my,sy) for yy in y]
plot(ux,uy,'.')
show()
erfc?
Exemplo n.º 36
0
    n,state,J,T = fileio.parsefilename(f)
    J1 = J
    N = n
    Ts = [pl.around(0.01*(x+1),3) for x in range(0,500)]
    ns = [20]
    Js = [1]
    states = [1]
    #This is where results could be filtered according to parameters if necessary
    if fileio.checkparameters([ns,states,Js,Ts],[n,state,J,T]):
        print("Current file: %s" % f)
        sys.stdout.flush()
        Etotals,Stotals = fileio.readdata(join("results",f))
        Eaverages = pl.array(Etotals) / n**2
        Saverages = pl.array(Stotals) / n**2
        
        chi.append(1/T*pl.var(Saverages))
        Cv.append(1/T**2*pl.var(Eaverages))
        Smean.append(pl.absolute(pl.mean(Saverages)))
        Emean.append(pl.mean(Eaverages))        

Tc = 2*float(J1)/pl.log(1.+pl.sqrt(2.))

Stheory = [(1 - (pl.sinh(pl.log(1+pl.sqrt(2.))*Tc/T))**(-4))**(1./8) for T in Ts if T < Tc]
Stheory += [0 for T in Ts if T >= Tc]

time = datetime.datetime.now()
filename = join("results","results_%s.txt" % time.strftime("%H:%M:%S_%d-%m-%Y"))

fileio.writedata(filename,[Ts,Emean,Smean,Stheory,Cv,chi])

system("git add %s" % filename)
Exemplo n.º 37
0
 def __calculate__(self):
     return pl.sqrt(pl.var(self.signal, ddof=0))
Exemplo n.º 38
0
      str(sdr.center_freq / 1.0e6) + "MHz, sample rate: " +
      str(sdr.sample_rate / 1.0e6) + " MHz")
s = sdr.read_samples(sdr.sample_rate * 1)
# simple way to get 1 second

# remove DC offset:
m = s.mean()
for samp in range(0, len(s)):
    s[samp] = s[samp] - m

# Verify gain did not change during collection:
print("Gain: " + str(sdr.gain) + ", center freq: " +
      str(sdr.center_freq / 1.0e6) + "MHz, sample rate: " +
      str(sdr.sample_rate / 1.0e6) + " MHz")

print("Variance of s: " + str(var(s)) + ", " + str(20 * log10(var(s))) +
      "dBfs")

[power, freq] = psd(s,
                    NFFT=4096 * 2,
                    Fs=sdr.sample_rate / 1e6,
                    Fc=sdr.center_freq / 1e6)
# Note: Freq is now in MHz
#xlabel("Frequency, MHz");
#ylabel("Power, dBfs");
#show();

clf()

plot(freq, 10 * log10(power), 'b')
xlim([cfm - halfbw, cfm + halfbw])
Exemplo n.º 39
0
 def __calculate__(self):
     sd2 = (self.signal_plus + self.signal_minus) / pl.sqrt(2)
     return pl.sqrt(pl.var(sd2))
Exemplo n.º 40
0
"""
Demonstrates the"reshape with non-integer intevals" and the optimization of
the reshape interval by minimizing the standard deviation of the overlap.
"""

import pylab as p
from segment_psp_trace import segment, optimize_segment

if __name__ == '__main__':
    data = p.np.load("traces/spiketrace_1.npz")["arr_0"][:2400000]

    dt = 1.
    p.figure()
    m = lambda interval: p.sum(p.var(segment(data, dt, interval), axis=0))

    shift_values = p.arange(3841, 3842, .01)
    p.plot(shift_values, map(m, shift_values), 'x')
    p.show()

    r = optimize_segment(data, 1., 3840)
    print r

    p.figure()
    seg = segment(data, 1., r)
    print len(seg)
    mean = p.mean(seg, axis=0)
    std = p.std(seg, axis=0)
    dt = 1.
    time = p.arange(len(std)) * dt
    p.plot(time, mean, "r-")
    p.fill_between(time, mean - std, mean + std, alpha=.2)
Exemplo n.º 41
0
    def crop_resize_vol(self):
        """
        crop_resize_vol: crop, resize, and create Laplacian Stack without noise-removing resampling and interpolation methods
        """
        workingDir = rawImagesTestDir if self.TestModeOn else rawImagesRealDir
        imageList = os.listdir(workingDir)
        if not imageList:
            raise SystemExit(
                "Please populate 'rawImages/' with images. It's currently empty."
            )
        print "[x] Initiating the bulk of the program's execution (i.e. crop_resize_vol)"
        counter = 1
        for imStr in imageList:
            im = Vips.Image.new_from_file(workingDir + imStr)
            if im is None:
                raise SystemExit("Image to process not opened successfully.")
            elif self.Parameters.mps > 1.0 or self.Parameters.mps <= 0.0:
                raise ValueError(
                    "You must enter a value in the interval (0.0, 1.0].")
            else:
                # Modularize crop method
                print "\t[", counter, "] ", im.width, ",", im.height,  # Output image's starting size to console.
                upperEdge = int((0.5 - self.Parameters.mps / 2) * im.height)
                im = im.crop(0, upperEdge, im.width,
                             im.height * self.Parameters.mps)  # "Pre" cropping
                background = im.getpoint(0, 0)
                mask = (im.median(3) - background).abs() > self.Parameters.ctl
                columns, rows = mask.project()
                left = columns.profile()[1].min()
                right = columns.width - columns.flip(
                    "horizontal").profile()[1].min()
                top = rows.profile()[0].min()
                bottom = rows.height - rows.flip("vertical").profile()[0].min()
                im = im.crop(left, top, right - left, bottom - top)
                print "cropped to", im.width, ",", im.height  # Output ending size to the console

                # Convert to PIL image; modularize...
                mem_img = im.write_to_memory()
                pil_img = Image.fromarray(pl.fromstring(
                    mem_img, dtype=pl.uint8).reshape(im.height, im.width,
                                                     im.bands),
                                          mode='RGB')

                # Resize PIL image
                print "\t[", counter, "] ", pil_img.size[0], ",", pil_img.size[
                    1], "resized to",
                resizedIm = pil_img.resize(self.ResizeImagesSize,
                                           resample=self.Parameters.rf)
                resizedIm.save(resizedImagesDir + "readyToAnalyze" +
                               str(counter) +
                               ".jpg")  # save resized im to correct dir
                print resizedIm.size[0], ",", resizedIm.size[1]

                # Create Laplacian Stack; modularize
                print "\t[", counter, "]  Performing 'variance_of_laplacian' method on resized image...",
                Laplacian_Kernel = (pl.array([[0., -1., 0.], [-1., 4., -1.],
                                              [0., -1., 0.]])) * (1. / 60)
                miniMatrix = pl.zeros(
                    (self.Parameters.hd, self.Parameters.wd)
                )  # heightDivisor x widthDivisor Matrix to copy elements to
                varianceMatrix = pl.zeros(
                    (resizedIm.size[1] / self.Parameters.hd,
                     resizedIm.size[0] / self.Parameters.wd))
                imageMatrix = pl.asarray(resizedIm.convert(
                    'L'))  # convert image to greyscale; return matrix
                for subset_of_rows in pl.arange(
                        resizedIm.size[1] /
                        self.Parameters.hd):  # TOTAL Image Matrix
                    for subset_of_columns in pl.arange(resizedIm.size[0] /
                                                       self.Parameters.wd):
                        image_row = subset_of_rows * self.Parameters.hd  # keeps track of larger matrix's row index to copy from
                        image_col = subset_of_columns * self.Parameters.wd  # keeps track of larger matrix's dolumn index to copy from
                        for row in pl.arange(self.Parameters.hd):
                            for col in pl.arange(self.Parameters.wd):
                                miniMatrix[row][col] = imageMatrix[
                                    image_row + row][image_col + col]
                        Convolve = signal.fftconvolve(miniMatrix,
                                                      Laplacian_Kernel,
                                                      mode='full')
                        varianceMatrix[subset_of_rows][
                            subset_of_columns] = pl.var(Convolve)
                print "Done."

                # Append to internal laplacian stack; modularize
                self.VoLImageStack.append(varianceMatrix)
            # Lastly, update counter
            counter += 1
        # After all of that, finally just dump it to a file
        with open(internalFilesDir + internalList, 'wb') as f:
            pickle.dump(self.VoLImageStack,
                        f,
                        protocol=pickle.HIGHEST_PROTOCOL)
Exemplo n.º 42
0
 def __calculate__(self):
     return pl.sqrt(pl.var(self.signal, ddof=0))
Exemplo n.º 43
0
    def _stop_training(self, debug=False):
        """
        Finish the training, i.e. for the time series plots: take the
        accumulated time series and divide by the number of samples per
        condition.
        For the
        """
        # Compute avg
        for label in self.mean_time_series.keys():
            self.mean_time_series[label] /= self.samples_per_condition[label]
            self.time_series_histo[label] = \
                pylab.array(self.time_series_histo[label])
 
            # Compute error of desired type - strip the numerals:
            if self.error_type is not None:
                if self.error_type.strip('0123456789.') == 'SampleStdDev':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0))
                elif self.error_type.strip('0123456789.') == 'StdError':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0)) /\
                     pylab.sqrt(pylab.shape(self.time_series_histo[label])[0])
            
                multiplier = float(''.join([nr for nr in self.error_type 
                                            if (nr.isdigit() or nr == ".")]))
                self.error[label] = multiplier * self.error[label]
            
        # other plots only if features where passed
        if (self.feature_vector != None):
            self.feature_time_series = \
                convert_feature_vector_to_time_series(self.feature_vector,
                                                      self.sample_data)
            
            # in the alternative scaling space, the feature "importance" is
            # determined by the feature values
            # weighted by the expected difference in time series values 
            # between the two classes (difference of avg std and avg target)
            # The standard P3 and LRP cases are handeled separately to make
            # sure that the sign of the difference is consistent
            if self.alternative_scaling:
                if all(
                    [True if label_iter in ['Target', 'Standard'] else False
                               for label_iter in self.mean_time_series.keys()]):
                    self.feature_time_series*=(
                        self.mean_time_series['Target']-
                                            self.mean_time_series['Standard'])
                elif all(
                    [True if label_iter in ['LRP', 'NoLRP'] else False
                               for label_iter in self.mean_time_series.keys()]):
                    self.feature_time_series*=(
                        self.mean_time_series['LRP']-
                                            self.mean_time_series['NoLRP'])
                else:
                    self.feature_time_series*=(
                       self.mean_time_series[self.mean_time_series.keys()[0]]-
                       self.mean_time_series[self.mean_time_series.keys()[1]])
                    print "AverageFeatureVis (alternative_scaling): " +\
                      "Present classes don't match the standards " +\
                      "(Standard/Target or LRP/NoLRP). Used the difference "+\
                      "%s - %s" % (self.mean_time_series.keys()[0],
                       self.mean_time_series.keys()[1]) +" for computation "+\
                       "of the alternative scaling."

            
            # greatest feature val that occures is used for the normalization
            # of the color-representation of the feature values
            self.max_feature_val = \
                (abs(self.feature_time_series)).max(0).max(0)
            self.normalizer = colors.Normalize(vmin=-self.max_feature_val,
                                               vmax= self.max_feature_val)            
            cdict={  'red':[(0.0, 1.0, 1.0),(0.5, 1.0, 1.0),(1.0, 0.0, 0.0)],
                   'green':[(0.0, 0.0, 0.0),(0.5, 1.0, 1.0),(1.0, 0.0, 0.0)],
                    'blue':[(0.0, 0.0, 0.0),(0.5, 1.0, 1.0),(1.0, 1.0, 1.0)]}
            self.own_colormap = \
                colors.LinearSegmentedColormap('owncm', cdict, N=256)
            
            # sort the features with descending importance
            self.indexlist=pylab.transpose(self.feature_time_series.nonzero())
            indexorder = abs(self.feature_time_series
                             [abs(self.feature_time_series) > 
                                    self.important_feature_thresh]).argsort()

            self.indexlist = self.indexlist[indexorder[-1::-1]] #reverse order
            self.indexlist = map(list,self.indexlist[
                :len(self.feature_vector)*self.percentage_of_features/100])
            
            self.histo_plot = self._generate_histo_plot()
            
            try:
                # try to generate a plot of the feature crosscorrelation
                # matrix. Might fail if the threshold is set such that no
                # features are left.
                for label in self.mean_time_series.keys():
                    self.labeled_corr_matrix[label] = \
                        self._generate_labeled_correlation_matrix(label)
                    self.corr_plot[label] = \
                        self._get_corr_plot(self.corr_important_feats[label],
                                            label)

                # if 2 class labels exist, also compute the difference in the
                # cross correlation between the classes.
                if len(self.corr_important_feats.keys()) == 2:
                    self.corr_plot['Diff'] = self._get_corr_plot((
                        self.corr_important_feats
                            [self.corr_important_feats.keys()[0]]
                      - self.corr_important_feats
                            [self.corr_important_feats.keys()[1]]),
                        self.corr_important_feats.keys()[0] + ' - ' + \
                            self.corr_important_feats.keys()[1])
            except TypeError:
                import warnings
                warnings.warn("\n\nFeatureVis doesn't have enough important" +
                              " features left for correlation plots..."+
                              " Check threshold.\n")
                              
        # Compute avg time series plot anyway         
        self.ts_plot = self._generate_time_series_plot()
Exemplo n.º 44
0
 def calMeanZ(self,coreR,surfR,zscores):
     z2=[]
     for i in coreR:
         z2.append(zscores[surfR.index(i)])
     return [mean(z2),sqrt(var(z2))]
Exemplo n.º 45
0
 def plot_variance(self, X, mat):
     var = p.var(mat, 0, dtype=p.float64)
     p.plot(X, var)
     p.show()