Ejemplo n.º 1
0
def fig3(x):
  # Figure 3
  # now we create a cumulative histogram of the data
  #
  P.figure()

  n, bins, patches = P.hist(x, 50, normed=1, histtype='step', cumulative=True)

  # add a line showing the expected distribution
  y = P.normpdf(bins, mu, sigma).cumsum()
  y /= y[-1]
  l = P.plot(bins, y, 'k--', linewidth=1.5)

  # create a second data-set with a smaller standard deviation
  sigma2 = 15.
  x = mu + sigma2 * P.randn(10000)

  n, bins, patches = P.hist(x, bins=bins, normed=1, histtype='step', cumulative=True)

  # add a line showing the expected distribution
  y = P.normpdf(bins, mu, sigma2).cumsum()
  y /= y[-1]
  l = P.plot(bins, y, 'r--', linewidth=1.5)

  # finally overplot a reverted cumulative histogram
  n, bins, patches = P.hist(x, bins=bins, normed=1,
                            histtype='step', cumulative=-1)

  P.grid(True)
  P.ylim(0, 1.05)
Ejemplo n.º 2
0
    def plot(self, normed=True, N=1000, Xmin=None, Xmax=None, bins=50, color='red', lw=2, 
            hist_kw={'color':'#5F9EA0'}, ax=None):

        if ax:
            ax.hist(self.data, normed=normed, bins=bins, **hist_kw)
        else:
            pylab.hist(self.data, normed=normed, bins=bins, **hist_kw)
        if Xmin is None:
            Xmin = self.data.min()
        if Xmax is None:
            Xmax = self.data.max()
        X = pylab.linspace(Xmin, Xmax, N)

        if ax:
            ax.plot(X, [self.model.pdf(x, self.results.x) for x in X], color=color, lw=lw)
        else:
            pylab.plot(X, [self.model.pdf(x, self.results.x) for x in X], color=color, lw=lw)

        K = len(self.results.x)
        # The PIs must be normalised
        for i in range(0, K/3):
            
            mu, sigma, pi_ = self.results.mus[i], self.results.sigmas[i], self.results.pis[i]
            if ax:
                ax.plot(X, [pi_ * pylab.normpdf(x, mu, sigma) for x in X], 'g--', alpha=0.5)
            else:
                pylab.plot(X, [pi_ * pylab.normpdf(x, mu, sigma) for x in X], 'g--', alpha=0.5)
Ejemplo n.º 3
0
def extraHistogram():
    import pylab as P
    import numpy as N
    
    
    P.figure()
    bins = 25
    min = 23310.
    max = 23455.
    nu, binsu, patchesu = P.hist(telfocusOld, bins=bins, rwidth=0.9, range = (min, max) , 
           label = 'UnCorrected Data', fc ='b', alpha = 0.4, normed = True)
    n, bins, patches = P.hist(telfocusCorrected, bins=bins, rwidth=0.7, range = (min, max),
           label='Corrected Data', fc = 'r', alpha = 0.6, normed = True)
    #P.axvline(medianNew, label = 'Corrected Median', color = 'r', lw = 1.1)
    #P.axvline(medianOld, label = 'UnCorrected Median', color = 'b', lw = 1.1)
    y1 = P.normpdf(binsu, N.mean(telfocusOld), N.std(telfocusOld))
    y2 = P.normpdf(bins, N.mean(telfocusCorrected), N.std(telfocusCorrected))
    P.plot(binsu, y1, 'b-', linewidth = 2.5, label='Gaussian Fit')
    P.plot(bins, y2, 'r-', linewidth = 3., label='Gaussian Fit')
    P.xlim(min,max)
    P.xlabel('Telescope Focus + median Offset')
    P.ylabel('Normed Values')
    P.legend(shadow=True, loc ='best')
    P.savefig('TelFocusHistogram.png')
    P.close()
Ejemplo n.º 4
0
def PlotVelSigDist(plotDir, relErrX, relErrY):

    py.clf()
    fontsize1 = 10
    bins = np.arange(-7, 7, 1)
    print(relErrX)
    paxes = py.subplot(1, 2, 1)
    (n, b, p) = py.hist(relErrX, bins, color='b')
    py.axis([-5, 5, 0, 180], fontsize=10)
    py.xlabel('X Residuals (sigma)', fontsize=fontsize1)
    py.ylabel('Number of Trias', fontsize=fontsize1)
    ggx = np.arange(-7, 7, 0.25)
    ggy = py.normpdf(ggx, 0, 1)
    ggamp = ((py.sort(n))[-2:]).sum() / (2.0 * ggy.max())
    py.plot(ggx, ggy * ggamp, 'r-', linewidth=2)

    paxes = py.subplot(1, 2, 2)

    #             subplot(3, 2, 5)
    (n, b, p) = py.hist(relErrY, bins, color='b')
    py.axis([-5, 5, 0, 180], fontsize=10)
    py.xlabel('Y Residuals (sigma)', fontsize=fontsize1)
    py.ylabel('Number of Trials', fontsize=fontsize1)
    ggx = np.arange(-7, 7, 0.25)
    ggy = py.normpdf(ggx, 0, 1)
    ggamp = ((py.sort(n))[-2:]).sum() / (2.0 * ggy.max())
    py.plot(ggx, ggy * ggamp, 'r-', linewidth=2)
    py.savefig(plotDir + '/velDist_a3_m20_w1-4.png')
    py.clf()
Ejemplo n.º 5
0
 def dibuja_t2(self, muestreo, umbral):
   logging.debug('entramos en dibuja')
   plt.figure(2)
   puntoMuestreo = int(muestreo/self.inc_tiempo_t2)
   amp = []
   
   for i in range(len(self.lista_medidas_t2)): # Guardamos los puntos entre mas y menos 25 posiciones del punto de muestreo de todas las tramas guardadas
     for j in range(-25, 25):
       try:
         amp.append(self.lista_medidas_t2[i][puntoMuestreo + j])
       except IndexError:
         logging.debug('oob')
   
   # Discriminamos segun el umbral
   val0 = []
   val1 = []
   
   for i in range(len(amp)):
     if(amp[i] < umbral):
       val0.append(amp[i])
     else:
       val1.append(amp[i])
   
   # Pintamos los histogramas y las gaussianas
   self.ax2_t2.cla()
   self.ax2_t2.set_xlabel('amplitud')
   norm0, bins, patches = self.ax2_t2.hist(val0, bins=200,range=[(5/4)*self.intervalo_amplitud_t2[0], (5/4)*self.intervalo_amplitud_t2[1]], normed=True, histtype='step', color='#8181f7', rwidth=100)
   
   norm1, bins, patches = self.ax2_t2.hist(val1, bins=200,range=[(5/4)*self.intervalo_amplitud_t2[0], (5/4)*self.intervalo_amplitud_t2[1]], normed=True, histtype='step', color='#fa5858', rwidth=100)
   
   v0, sigma0 = self.media_y_varianza(val0)
   gauss0 = pylab.normpdf(bins, v0, sigma0)
   self.ax2_t2.plot(bins, gauss0, linewidth=2, color='#0404b4')#azul
   
   v1, sigma1 = self.media_y_varianza(val1)
   gauss1 = pylab.normpdf(bins, v1, sigma1)
   self.ax2_t2.plot(bins, gauss1, linewidth=2, color='#b40404')#rojo
   
   # Calculamos la ber
   q = math.fabs(v1-v0)/(sigma1+sigma0)
   ber = 0.5*erfc(q/math.sqrt(2))
   
   self.muestra_resultados_t2(v0, sigma0, v1, sigma1, q, ber, len(val0), len(val1))
   
   # Recolocamos todas las barras
   self.ax2_t2.add_line(self.barDecision2_t2) # Vuelve a pintar la barra del umbral cuando se redibuja
   self.ax3_t2.add_line(self.bar_q_t2)
   self.ax3_t2.add_line(self.bar_ber_t2)
   self.barMuestreo_t2.set_xdata(muestreo)
   self.barUmbral_t2.set_ydata(umbral)
   self.barDecision2_t2.set_xdata(umbral)
   logging.debug('colocamos las barras en ax3')
   self.bar_q_t2.set_xdata(q)
   self.bar_ber_t2.set_ydata(ber)
   logging.debug('colocadas')
   
   self.canvas_t2.draw()
   logging.debug('ya se ha redibujado')
Ejemplo n.º 6
0
def plotHistPopScore(population, fitness=False):
    """ Population score distribution histogram

   Example:
      >>> Interaction.plotHistPopScore(population)

   :param population: population object (:class:`GPopulation.GPopulation`)
   :param fitness: if True, the fitness score will be used, otherwise, the raw.
   :rtype: None

   """
    score_list = getPopScores(population, fitness)
    n, bins, patches = pylab.hist(score_list,
                                  50,
                                  facecolor='green',
                                  alpha=0.75,
                                  normed=1)
    pylab.plot(
        bins, pylab.normpdf(bins, numpy.mean(score_list),
                            numpy.std(score_list)), 'r--')
    pylab.xlabel('Score')
    pylab.ylabel('Frequency')
    pylab.grid(True)
    pylab.title("Plot of population score distribution")
    pylab.show()
Ejemplo n.º 7
0
def load_spectrum_gnirs(file, velScale, resolution):
    """
    Load up a spectrum from the Gemini GNIRS library.
    """
    spec, hdr = pyfits.getdata(file, header=True)

    pixScale = hdr['CD1_1'] * 1.0e-4    # microns

    wavelength = np.arange(len(spec), dtype=float)
    wavelength -= (hdr['CRPIX1']-1.0)   # get into pixels relative to the reference pix
    wavelength *= hdr['CD1_1']          # convert to the proper wavelength scale (Ang)
    wavelength += hdr['CRVAL1']         # shift to the wavelength zeropoint

    # Convert from Angstroms to microns
    wavelength *= 1.0e-4

    deltaWave = 2.21344 / resolution         # microns
    resInPixels = deltaWave / pixScale       # pixels
    sigmaInPixels = resInPixels / 2.355
    psfBins = np.arange(-4*math.ceil(resInPixels), 4*math.ceil(resInPixels))
    psf = py.normpdf(psfBins, 0, sigmaInPixels)
    specLowRes = np.convolve(spec, psf, mode='same')

    # Rebin into equals logarithmic steps in wavelength
    logWave, specNew, vel = log_rebin2(wavelength, specLowRes,
                                       inMicrons=True, velScale=velScale)

    return logWave, specNew
Ejemplo n.º 8
0
def complicatedHisto(x, dists, var, mean):
    
    '''
    this histogram will take a 2d x with each column being a different
    set of data and then plot each one with its own color.. like in one of the
    histogram matplotlib examples
    
    '''
    fig = plt.figure()
    dist = ['{:.2f}'.format(k) for k in dists]    
    colors = ('g', 'b', 'r','c', 'm','y', 'k','w')    

    n, bins, patches = P.hist(x, 20, normed=1, histtype='bar',
                            color=colors[0:len(dist)],
                            label=dist)
                            
    P.legend()
    P.show()
    
    print "woohoo"
    fig2 = plt.figure(2)
    xbins = np.linspace(-0.5,3,200)
    for sigma, mu,d,c  in zip(var, mean, dists,colors):
        
        y = P.normpdf(xbins,  mu, np.sqrt(sigma))
        l = P.plot(xbins, y, c, label=d, linewidth=1.5)
        P.legend()
Ejemplo n.º 9
0
    def pdf(self, x, params, normalise=True):
        """Expected parameters are


        params is a list of gaussian distribution ordered as mu, sigma, pi, 
        mu2, sigma2, pi2, ...

        """
        assert divmod(len(params), 3)[1] == 0
        assert len(params) >= 3 * self.k
        k = len(params) / 3

        self.k = k

        pis = np.array(params[2::3])

        if any(np.array(pis)<0):
            return 0
        if normalise is True:
            pis /= pis.sum()
        # !!! sum pi must equal 1 otherwise may diverge badly
        data = 0
        for i in range(0, k):
            mu, sigma, pi_ = params[i*3: (i+1)*3]
            pi_ = pis[i]
            if sigma != 0:
                data += pi_ * pylab.normpdf(x, mu, sigma)
        return data
Ejemplo n.º 10
0
def plot_histogram():
    import numpy as np
    import pylab as P

    # The hist() function now has a lot more options
    #

    #
    # first create a single histogram
    #
    P.figure()
    mu, sigma = 40, 35


    x = abs(np.random.normal(mu, sigma, 1000000))

    # the histogram of the data with histtype='step'
    n, bins, patches = P.hist(x, 100, normed=1, histtype='stepfilled')
    P.setp(patches, 'facecolor', 'g', 'alpha', 0.50)
    P.vlines(np.mean(x), 0, max(n))
    P.vlines(np.median(x), 0, max(n))
    # add a line showing the expected distribution
    y = np.abs(P.normpdf( bins, mu, sigma))
    l = P.plot(bins, y, 'k--', linewidth=1.5)

    P.show()
Ejemplo n.º 11
0
def draw_normal_histogram(x, bins, y_label='', x_label='', title="", body=""):
    "Plot a histogram chart"
    # x are matplotlib pylab arrays, body is a StringIO
    import pylab
    import matplotlib
    # clear graph
    matplotlib.pyplot.clf()
    matplotlib.use('Agg')
    n, bins1, patches = pylab.hist(x,
                                   bins,
                                   histtype='bar',
                                   facecolor='green',
                                   alpha=0.75)
    #pylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    pylab.ylabel(y_label)
    pylab.xlabel(x_label)
    # add a line showing the expected distribution
    mu = pylab.mean(x)
    sigma = pylab.std(x)
    y = pylab.normpdf(bins, mu, sigma)
    l = pylab.plot(bins, y, 'k--', linewidth=1.5)

    pylab.title(title)

    pylab.grid(True)
    pylab.savefig(body)
    return body.getvalue()
Ejemplo n.º 12
0
    def pdf(self, x, params, normalise=True):
        """Expected parameters are


        params is a list of gaussian distribution ordered as mu, sigma, pi,
        mu2, sigma2, pi2, ...

        """
        assert divmod(len(params), 3)[1] == 0
        assert len(params) >= 3 * self.k
        k = len(params) / 3

        self.k = k

        pis = np.array(params[2::3])

        if any(np.array(pis) < 0):
            return 0
        if normalise is True:
            pis /= pis.sum()
        # !!! sum pi must equal 1 otherwise may diverge badly
        data = 0
        for i in range(0, int(k)):
            mu, sigma, pi_ = params[i * 3:(i + 1) * 3]
            pi_ = pis[i]
            if sigma != 0:
                data += pi_ * pylab.normpdf(x, mu, sigma)
        return data
Ejemplo n.º 13
0
def main():
	''' Main Function'''
	# Start and End date of the charts
	dt_start = dt.datetime(2011, 1, 1)
	dt_end = dt.datetime(2012, 12, 31)
	
	#goog = pd.io.data.get_data_yahoo("GOOG",  dt_start, dt_end) # not working
	SPY = DataReader("SPY",  "yahoo", dt_start, dt_end)
	#YHOO = DataReader("YHOO",  "yahoo", dt_start, dt_end)
	
	# normalize prices
	nPrice = sc.normalizedPrice(SPY['Adj Close'].values)
	
	#daily return
	daily_ret = sc.computeDailyReturn(nPrice)
	plt.subplot(1,2,1)
	plt.plot(daily_ret*100, 'b-')
	plt.ylabel('Daily return (%)')
	plt.legend(['SPY-Daily Return based on Adjuested close'])
	
	#daily return histogram
	plt.subplot(1,2,2)
	n, bins, patches = plt.hist(daily_ret, 100, normed=1, facecolor='green', alpha=0.5)
	mean = np.mean(daily_ret)
	sigma = np.std(daily_ret)
	y = P.normpdf( bins, mean, sigma)
	plt.plot(bins, y, 'k--', linewidth=1.5)

	plt.ylabel('Probability')
	plt.legend(['normal distribution approximation','SPY-hostogram of daily return'])
	
	plt.show()
Ejemplo n.º 14
0
    def plot(self,
             normed=True,
             N=1000,
             Xmin=None,
             Xmax=None,
             bins=50,
             color='red',
             lw=2,
             hist_kw={'color': '#5F9EA0'},
             ax=None):

        if ax:
            ax.hist(self.data, normed=normed, bins=bins, **hist_kw)
        else:
            pylab.hist(self.data, normed=normed, bins=bins, **hist_kw)
        if Xmin is None:
            Xmin = self.data.min()
        if Xmax is None:
            Xmax = self.data.max()
        X = pylab.linspace(Xmin, Xmax, N)

        if ax:
            ax.plot(X, [self.model.pdf(x, self.results.x) for x in X],
                    color=color,
                    lw=lw)
        else:
            pylab.plot(X, [self.model.pdf(x, self.results.x) for x in X],
                       color=color,
                       lw=lw)

        K = len(self.results.x)
        # The PIs must be normalised
        for i in range(self.k):

            mu, sigma, pi_ = self.results.mus[i], self.results.sigmas[
                i], self.results.pis[i]
            if ax:
                ax.plot(X, [pi_ * pylab.normpdf(x, mu, sigma) for x in X],
                        'k--',
                        alpha=0.7,
                        lw=2)
            else:
                pylab.plot(X, [pi_ * pylab.normpdf(x, mu, sigma) for x in X],
                           'k--',
                           alpha=0.7,
                           lw=2)
Ejemplo n.º 15
0
def BinaryClassification(samples, cluster_sizes, mu, sigma):
    pdf = []
    for i in range(2):
        pdf.append(np.transpose(P.normpdf(samples, mu[i], sigma[i])))

    our_classification = P.prod(pdf[0], axis=0) * cluster_sizes[0] < P.prod(
        pdf[1], axis=0) * cluster_sizes[1]
    return our_classification
Ejemplo n.º 16
0
def build_hist(ax, dat, title=None) :
    mu, sigma = np.average(dat), np.std(dat)
    n, bins, patches = ax.hist(dat, 50, normed=1, histtype='stepfilled')
    P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)

    y = P.normpdf(bins, mu, sigma)
    l = ax.plot(bins, y, 'k--', linewidth=1.5)
    if title != None :
        ax.set_title(title)
Ejemplo n.º 17
0
 def histDemo(self):
     mu, sigma = 200, 25
     x = mu + sigma*P.randn(10000)
     n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')
     P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
     
     y = P.normpdf( bins, mu, sigma)
     l = P.plot(bins, y, 'k--', linewidth=1.5)
     P.savefig(inspect.stack()[0][3]+".png")
Ejemplo n.º 18
0
def run_rimea_test7(inifile, trajfile):
    velocities = []
    # Read data
    fps, numpeds, traj = parse_file(trajfile)
    numPedsGr = numpeds / len(sigmas)
    peds = np.unique(traj[:, 0])
    logging.info("=== npeds: %d, numpeds_group = %d, fps: %d", numpeds,
                 numPedsGr, fps)
    for ped in peds:
        # Take only data of ped i
        ptraj = traj[traj[:, 0] == ped]
        # Only when ped i moves
        ptraj2 = np.delete(ptraj, 1, 0)  #Das letzte Element entfernt
        #        ptraj = ptraj[np.diff(ptraj[:, 2]) != 0] (Orginalfunktion)
        ptraj = ptraj2[np.diff(ptraj[:, 2]) != 0]  #Neue Funktion
        # Distance
        dx = np.sqrt(np.sum((ptraj[0, 2:] - ptraj[-1, 2:])**2))
        # Time
        dt = (ptraj[-1, 1] - ptraj[0, 1]) / fps
        # Velocity
        v = dx / dt
        velocities.append(v)

    id_velocity = np.vstack((peds, velocities))
    np.savetxt(csv_file, id_velocity.T, delimiter=',', fmt=["%d", "%f"])

    # Plotting
    i = 0
    j = 1  # Subplot index
    logging.info("Ploting distributions...")
    for (mu, sigma, c) in zip(means, sigmas, colors):
        P.subplot("41%d" % j)
        numPedsGr = int(numPedsGr)
        n, bins, patches = P.hist(velocities[i:i + numPedsGr - 1],
                                  50,
                                  normed=1,
                                  histtype='bar',
                                  facecolor='%s' % c,
                                  alpha=0.7)
        y = P.normpdf(bins, mu, sigma)
        P.plot(bins,
               y,
               'k--',
               linewidth=1.5,
               label=r"$\mathcal{N}(%.2f, %.2f)$" % (mu, sigma))
        P.legend()
        i = int(i + numPedsGr)
        j += 1

    P.tight_layout()
    P.savefig(figname)
    if min(velocities) < min_velocity or max(velocities) > max_velocity:
        logging.critical(
            "%s exits with FAILURE. min_velocity = %.3f (>%.3f?), max_velocity = %.3f (<%.3f?)",
            argv[0], min(velocities), min_velocity, max(velocities),
            max_velocity)
        exit(FAILURE)
Ejemplo n.º 19
0
 def pdf_model(x, p):
     print
     print "pdf_model()"
     print "  x=%s" % x
     print "  p=%s" % (p,)
     mu1, sig1, mu2, sig2, pi_1 = p
     print "  mu1:  %s" % mu1
     print "  sig1: %s" % sig1
     print "  mu2:  %s" % mu2
     print "  sig2: %s" % sig2
     print "  pi_1: %s" % pi_1
     raw1 = py.normpdf(x, mu1, sig1)
     print "  raw1: %s" % raw1
     raw2 = py.normpdf(x, mu2, sig2)
     print "  raw2: %s" % raw2
     ret = pi_1 * raw1 + (1 - pi_1) * raw2
     print "  ret: %s" % ret
     print
     return ret
Ejemplo n.º 20
0
def analysis(series_dir):
    files = glob(series_dir + "/*.dicm")
    files.sort()
    series_intensity = [imageIntensity(name) for name in files]
    y = normpdf(series_intensity, min(series_intensity), max(series_intensity))
    plot([i for i in range(len(series_intensity))], series_intensity, 'b')
    plot([i for i in range(len(series_intensity))], deriv2(series_intensity), 'r')
    grid(True)
    show()
    return series_intensity
Ejemplo n.º 21
0
 def pdf_model(x, p):
     print
     print 'pdf_model()'
     print '  x=%s' % x
     print '  p=%s' % (p, )
     mu1, sig1, mu2, sig2, pi_1 = p
     print '  mu1:  %s' % mu1
     print '  sig1: %s' % sig1
     print '  mu2:  %s' % mu2
     print '  sig2: %s' % sig2
     print '  pi_1: %s' % pi_1
     raw1 = py.normpdf(x, mu1, sig1)
     print '  raw1: %s' % raw1
     raw2 = py.normpdf(x, mu2, sig2)
     print '  raw2: %s' % raw2
     ret = pi_1 * raw1 + (1 - pi_1) * raw2
     print '  ret: %s' % ret
     print
     return ret
Ejemplo n.º 22
0
def plotIRRChart(irr_values_lst, simulation_no, yearly, country):
    """Plots irr_values histogram and scatter plot for the @country and @simulation_no."""

    irr_values_lst = irr_values_lst[:-1]  # remove simple payback time

    figures = OrderedDict()
    for dic in irr_values_lst:
        dig_values = dic['digit_values']

        title1 = "%s - Sim. N%s. Histogram of %s - %s values" % (
            country, simulation_no, dic['field'], len(dig_values))
        figures[title1] = dig_values

        title2 = "%s - Sim N%s. Chart of %s - %s values" % (
            country, simulation_no, dic['field'], len(dig_values))
        figures[title2] = dig_values

    fig, axeslist = pylab.subplots(ncols=2, nrows=3)

    for ind, title in zip(range(len(figures)), figures):
        if title is not None:
            values = figures[title]
            if ind % 2 == 0:
                mu = numpy.mean(values)
                sigma = numpy.std(values)

                mu_offset = 0.10
                num_of_bins = 150
                weights = numpy.ones_like(values) / float(len(values))
                counts, bins, patches = axeslist.ravel()[ind].hist(
                    values,
                    bins=numpy.linspace(mu - mu_offset, mu + mu_offset,
                                        num_of_bins + 1),
                    weights=weights)

                y = []
                for x in bins:
                    try:
                        y.append(
                            pylab.normpdf(x, mu, sigma) * 2 * mu_offset /
                            num_of_bins)
                    except FloatingPointError:  # underflow encountered
                        y.append(0)

                axeslist.ravel()[ind].plot(bins, y, 'r--', linewidth=2)
                axeslist.ravel()[ind].set_xlim(mu - mu_offset, mu + mu_offset)
            else:
                limx, limy = getLimitValues(range(len(values)), values)
                axeslist.ravel()[ind].plot(values, 'o')
                axeslist.ravel()[ind].set_xlim(limx)
                axeslist.ravel()[ind].set_ylim(limy)

            axeslist.ravel()[ind].set_title(title)

    pylab.show()
Ejemplo n.º 23
0
def stability(paths, show=False, output=None, annotations=None, aspect=2):
  # COLOR = "#548BE3"
  COLOR = "#8CB8FF"
  figure = p.figure()
  pltnum = len(paths)//2 + len(paths)%2
  for i, fname in enumerate(paths):
    with open(fname) as fd:
      values = []
      t = fd.readline().strip("#")
      for l in fd.readlines():
        values += [float(l.strip())]
      p.subplot(pltnum, 2, i+1)

      ## title
      if annotations:
        p.title(annotations[i])
      else:
        p.title(t)
      avg = average(values)
      percents = list(map(lambda x: avg/x-1, values))
      n, bins, patches = p.hist(percents,
        bins=50, normed=True,
        histtype='bar', color=COLOR)

      mu, sigma = norm.fit(percents)
      y = p.normpdf(bins, mu, sigma)
      p.plot(bins, y, 'r-', linewidth=3)
      p.xlim(min(bins), max(bins))

      ## set aspect
      xmin, xmax = p.xlim()
      ymin, ymax = p.ylim()
      xr = xmax - xmin
      yr = ymax - ymin
      aspect = xr/yr/2
      g = p.gca()
      g.set_aspect(aspect)
      # p.figaspect(aspect)


    ## remove y axis
    yaxis = p.gca().yaxis
    yaxis.set_major_locator(MaxNLocator(nbins=4, prune='lower'))
    # yaxis.set_visible(False)

    ## xaxis
    xaxis = p.gca().xaxis
    xaxis.set_major_formatter(to_percent)
    xaxis.set_major_locator(MaxNLocator(nbins=5, prune='lower'))

  p.tight_layout(pad=0.5)
  if output:
    p.savefig(output, bbox_inches='tight')
  if show:
    p.show()
Ejemplo n.º 24
0
def fig3(x):
    # Figure 3
    # now we create a cumulative histogram of the data
    #
    P.figure()

    n, bins, patches = P.hist(x,
                              50,
                              normed=1,
                              histtype='step',
                              cumulative=True)

    # add a line showing the expected distribution
    y = P.normpdf(bins, mu, sigma).cumsum()
    y /= y[-1]
    l = P.plot(bins, y, 'k--', linewidth=1.5)

    # create a second data-set with a smaller standard deviation
    sigma2 = 15.
    x = mu + sigma2 * P.randn(10000)

    n, bins, patches = P.hist(x,
                              bins=bins,
                              normed=1,
                              histtype='step',
                              cumulative=True)

    # add a line showing the expected distribution
    y = P.normpdf(bins, mu, sigma2).cumsum()
    y /= y[-1]
    l = P.plot(bins, y, 'r--', linewidth=1.5)

    # finally overplot a reverted cumulative histogram
    n, bins, patches = P.hist(x,
                              bins=bins,
                              normed=1,
                              histtype='step',
                              cumulative=-1)

    P.grid(True)
    P.ylim(0, 1.05)
Ejemplo n.º 25
0
def fig1(x):
    # Figure 1
    # first create a single histogram
    #
    # the histogram of the data with histtype='step'
    P.figure()
    n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')
    P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)

    # add a line showing the expected distribution
    y = P.normpdf(bins, mu, sigma)
    l = P.plot(bins, y, 'k--', linewidth=1.5)
Ejemplo n.º 26
0
def fig1(x):
  # Figure 1
  # first create a single histogram
  #
  # the histogram of the data with histtype='step'
  P.figure()
  n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')
  P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)

  # add a line showing the expected distribution
  y = P.normpdf(bins, mu, sigma)
  l = P.plot(bins, y, 'k--', linewidth=1.5)
Ejemplo n.º 27
0
def densityplot(data):
    """
    Plots a histogram of daily returns from data, plus fitted normal density.
    """
    dailyreturns = percent_change(data)
    pylab.hist(dailyreturns, bins=200, normed=True)
    m, M = min(dailyreturns), max(dailyreturns)
    mu = pylab.mean(dailyreturns)
    sigma = pylab.std(dailyreturns)
    grid = pylab.linspace(m, M, 100)
    densityvalues = pylab.normpdf(grid, mu, sigma)
    pylab.plot(grid, densityvalues, 'r-')
    pylab.show()
Ejemplo n.º 28
0
def gaussianSubstractor(frame, back_mu, back_sig):
    height = len(frame)
    width = len(frame[0])
    rgblist = len(frame[0][0])

    #tempFrame = np.zeros(shape=(height, width, rgblist), dtype=float)
    nopdf = 0
    diff_frame = np.zeros(shape=(height, width), dtype=float)

    frame = pylab.normpdf(frame, back_mu, back_sig)

    #limi = lambda px:((px-1)*255)

    #frame = (pow(frame[:][:][0]*frame[:][:][1]*frame[:][:][2], 0.33333333333333333))
    #frame = limi(frame)

    #frame  =  cv2.threshold(frame,125, 255, cv2.THRESH_BINARY)

    for i in xrange(0, height):
        for j in xrange(0, width):
            normdr = frame[i][j][0]
            normdg = frame[i][j][1]
            normdb = frame[i][j][2]
            nopdf = pow((normdr * normdg * normdb), 0.3333333333333333)
            px = limiarization(nopdf)
            diff_frame[i][j] = px
    """
    for i in xrange(0, height):
        for j in xrange(0, width):
            frame[i][j][0] = frame[i][j][0] * frame[i][j][1] * frame[i][j][2]
            frame[i][j][0] = pow(frame[i][j][0], 0.3333333333333333)

            frame[i][j][0] = limiarization(nopdf)
            frame[i][j][0] = frame[i][j][0]
    """

    #for i in range(0, len(frame[0][0])):

    #diff_framer = frame[:][:][0]
    #diff_frameg = frame[:][:][1]
    #diff_frameb = frame[:][:][2]

    #diff_frame = diff_frame**0.3333333333333333333333333333

    print 'Diff Frame: '
    print frame
    print

    #cv2.imwrite('images/diff_frame.bmp', diff_frame)
    return diff_frame
Ejemplo n.º 29
0
def extraHistogram():
    import pylab as P
    import numpy as N

    P.figure()
    bins = 25
    min = 23310.
    max = 23455.
    nu, binsu, patchesu = P.hist(telfocusOld,
                                 bins=bins,
                                 rwidth=0.9,
                                 range=(min, max),
                                 label='UnCorrected Data',
                                 fc='b',
                                 alpha=0.4,
                                 normed=True)
    n, bins, patches = P.hist(telfocusCorrected,
                              bins=bins,
                              rwidth=0.7,
                              range=(min, max),
                              label='Corrected Data',
                              fc='r',
                              alpha=0.6,
                              normed=True)
    #P.axvline(medianNew, label = 'Corrected Median', color = 'r', lw = 1.1)
    #P.axvline(medianOld, label = 'UnCorrected Median', color = 'b', lw = 1.1)
    y1 = P.normpdf(binsu, N.mean(telfocusOld), N.std(telfocusOld))
    y2 = P.normpdf(bins, N.mean(telfocusCorrected), N.std(telfocusCorrected))
    P.plot(binsu, y1, 'b-', linewidth=2.5, label='Gaussian Fit')
    P.plot(bins, y2, 'r-', linewidth=3., label='Gaussian Fit')
    P.xlim(min, max)
    P.xlabel('Telescope Focus + median Offset')
    P.ylabel('Normed Values')
    P.legend(shadow=True, loc='best')
    P.savefig('TelFocusHistogram.png')
    P.close()
Ejemplo n.º 30
0
def probdist1(l):
    '''This function creates an array of random numbers from Gaussian 
    distribution, and then plots a histogram of those random numbers and 
    the exptected probability distribution computed solely from the mean
    and variance of the data points.'''
    lstrand = []
    for i in range(l):
        lstrand.append(numpy.random.randn());
    mu = numpy.mean(lstrand)
    sigma = math.sqrt(numpy.var(lstrand))
    n, bins, patches = pylab.hist(lstrand, 100, normed=1)
    pylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    y = pylab.normpdf(bins, mu, sigma)
    line = pylab.plot(bins, y, 'k--', linewidth=1.5)
    pylab.show()
Ejemplo n.º 31
0
def densityplot(data):
    """
    Plots a histogram of daily returns from data, 
    plus fitted normal density.
    """

    dailyreturns = data
    pylab.hist(dailyreturns, bins=200, normed=True)
    m, M = min(dailyreturns), max(dailyreturns)
    mu = pylab.mean(dailyreturns)
    sigma = pylab.std(dailyreturns)
    grid = pylab.linspace(m, M, 100)
    densityvalues = pylab.normpdf(grid, mu, sigma)
    pylab.plot(grid, densityvalues, 'r-')
    pylab.show()
Ejemplo n.º 32
0
def main():
    df = pd.read_csv(INDATA)
    df['basin_id'] = np.arange(15006)+1
    df.set_index('basin_id', inplace=True)
    wdf = pd.read_csv(AREA)
    wdf.set_index('basin_id', inplace=True)
    df.join(wdf, inplace=True)
    df.dropna(inplace=True)
    x = np.asarray(df["2040"])
    w = np.asarray(df["F_AREA"])
    sd = np.std(x)
    n, bins, patches = pl.hist(x, 50, normed=1)
    y = pl.normpdf(bins, 0, sd)
    l = pl.plot(bins, y, 'k--', linewidth=1.5)
    pl.show()
Ejemplo n.º 33
0
def plot_histogram(fig):
    mu, sigma = 100, 15
    x = mu + sigma*randn(10000)

    axes = fig.gca()
    # the histogram of the data
    n, bins, patches = axes.hist(x, 100, normed=1)

    # add a 'best fit' line
    y = normpdf( bins, mu, sigma)
    l = axes.plot(bins, y, 'r--', linewidth=2)

    axes.set_xlim((40, 160))
    axes.set_xlabel('Smarts')
    axes.set_ylabel('P')
    axes.set_title('IQ: mu=100, sigma=15')
Ejemplo n.º 34
0
def plot_histogram(fig):
    mu, sigma = 100, 15
    x = mu + sigma*randn(10000)

    axes = fig.gca()
    # the histogram of the data
    n, bins, patches = axes.hist(x, 100, normed=1)

    # add a 'best fit' line
    y = normpdf( bins, mu, sigma)
    l = axes.plot(bins, y, 'r--', linewidth=2)

    axes.set_xlim((40, 160))
    axes.set_xlabel('Smarts')
    axes.set_ylabel('P')
    axes.set_title('IQ: mu=100, sigma=15')
Ejemplo n.º 35
0
def plot_spectrum_componenten():
    """Plot voor componenten pulshoogte spectrum"""

    multiplot = MultiPlot(3, 1, axis='semilogy', width=r'0.5\linewidth')

    pylab.clf()
    subplot0 = multiplot.get_subplot_at(0, 0)
    subplot1 = multiplot.get_subplot_at(1, 0)
    subplot2 = multiplot.get_subplot_at(2, 0)
    x = pylab.linspace(1e-10, 11, 200)
    th_signal = []
    signal = pylab.zeros(x.shape)
    for N in range(1, 15):
        scale = 100. / N ** 3
        pdf = 80 * scale * pylab.normpdf(x, N, pylab.sqrt(N) * 0.35)
        # pylab.plot(x, pdf)
        subplot1.plot(x, pdf, mark=None)
        # subplot1.add_pin('%d MIP' % N, 'above right', x=N, use_arrow=True,
        #                  style='lightgray')
        subplot2.plot(x, pdf, mark=None, linestyle='lightgray')
        signal += pdf
        th_signal.extend(int(100 * scale) * [N])

    gammas = 1e2 * x ** -3
    subplot1.plot(x, gammas, mark=None)
    subplot2.plot(x, gammas, mark=None, linestyle='lightgray')

    signal += gammas
    pylab.plot(x, signal)
    pylab.plot(x, gammas)
    subplot2.plot(x, signal, mark=None)
    pylab.yscale('log')
    pylab.ylim(ymin=1)

    n, bins = pylab.histogram(th_signal, bins=pylab.linspace(0, 11, 100))
    n = pylab.where(n == 0, 1e-10, n)
    subplot0.histogram(n, bins)

    multiplot.show_xticklabels_for_all([(0, 0), (2, 0)])
    multiplot.set_xticks_for_all([(0, 0), (2, 0)], range(20))
    # multiplot.show_yticklabels_for_all([(0, 0), (1, 0), (2, 0)])
    multiplot.set_xlabel('Aantal deeltjes')
    multiplot.set_ylabel('Aantal events')
    multiplot.set_ylimits_for_all(min=1, max=1e5)
    multiplot.set_xlimits_for_all(min=0, max=10.5)

    multiplot.save('spectrum_componenten')
Ejemplo n.º 36
0
	def test_NormalByCauchyAcceptReject(self, no_of_samples=10000):
		import rpy, math, random, pylab
		sample_list = []
		M = math.pi/(math.sqrt(2*math.pi)) +0.1 #"+0.1" makes it a little bit less efficient, but keeps the density around 0 normal
		for i in range(no_of_samples):
			cauchy_sample = rpy.r.rcauchy(1)
			u = random.random()
			normal_mass = rpy.r.dnorm(cauchy_sample)
			cauchy_mass = 1/(math.pi*(1+cauchy_sample*cauchy_sample))
			if u<= normal_mass/(M*cauchy_mass):
				sample_list.append(cauchy_sample)
		accept_ratio = len(sample_list)/float(no_of_samples)
		print "accept ratio is %s"%(accept_ratio)
		n, bins, patches = pylab.hist(sample_list, 100, normed=1)
		pylab.title("Normal via cauchy, accept_ratio:%s"%accept_ratio)
		y = pylab.normpdf( bins, 0, 1)
		l = pylab.plot(bins, y, 'ro-', linewidth=2)
		pylab.show()
Ejemplo n.º 37
0
def analyse_residuals(data):
    model = Model12(data)
    iterative_estimation(model, 3)
    bins = 30
    residuals = np.array([ model.error(s,p) for s in range(data.students) for p in range(data.problems) if not math.isnan(model.error(s,p))])
    mean, std = np.mean(residuals), np.std(residuals)
    print "std", std
    n, bins, patches = plt.hist(residuals, bins, normed=1)
    y = plt.normpdf(bins, mean, std)
    plt.rcParams.update({'font.size': 16})
    plt.plot(bins, y, linewidth=3)
    plt.title("Histogram of residuals")
    plt.savefig("results/residuals-hist.svg")
    print "skew", sp.stats.skew(residuals)
    print "skewtest", sp.stats.skewtest(residuals)
    plt.figure()
    sp.stats.probplot(residuals, dist="norm", plot=plt)
    plt.savefig("results/residuals-pp.png", dpi = 300)
Ejemplo n.º 38
0
def plot_afstelling_pmt():
    """Plot voor afstellen spanning"""

    multiplot = MultiPlot(1, 3, width=r'0.4\linewidth')

    x = pylab.linspace(1e-10, 11, 200)

    signal = pylab.zeros(x.shape)

    for N in range(1, 15):
        scale = 100. / N ** 3
        pdf = 80 * scale * pylab.normpdf(x, N, pylab.sqrt(N) * 0.35)
        signal += pdf

    gammas = 1e2 * x ** -3
    signal += gammas

    p = multiplot.get_subplot_at(0, 0)
    v = x * 35
    p.plot(v, pylab.where(v >= 30, signal, 1), mark=None)
    p.draw_vertical_line(200, linestyle='gray')
    p.set_label(r"$V_\mathrm{PMT}$ te laag")

    p = multiplot.get_subplot_at(0, 1)
    v = x * 200
    p.plot(v, pylab.where(v >= 30, signal, 1), mark=None)
    p.draw_vertical_line(200, linestyle='gray')
    p.set_label(r"$V_\mathrm{PMT}$ correct")

    p = multiplot.get_subplot_at(0, 2)
    v = x * 400
    p.plot(v, pylab.where(v >= 30, signal, 1), mark=None)
    p.draw_vertical_line(200, linestyle='gray')
    p.set_label(r"$V_\mathrm{PMT}$ te hoog")

    multiplot.set_xlabel(r"Pulseheight [\si{\milli\volt}]")
    multiplot.set_ylabel("Counts")
    multiplot.set_xlimits_for_all(min=0, max=1000)
    multiplot.set_ylimits_for_all(min=1)
    multiplot.show_xticklabels_for_all()
    multiplot.set_xticklabels_position(0, 1, 'top')
    multiplot.set_yticks_for_all(ticks=None)
    multiplot.save('afstelling_pmt')
Ejemplo n.º 39
0
def plotHistPopScore(population, fitness=False):
   """ Population score distribution histogram

   Example:
      >>> Interaction.plotHistPopScore(population)

   :param population: population object (:class:`GPopulation.GPopulation`)
   :param fitness: if True, the fitness score will be used, otherwise, the raw.
   :rtype: None

   """
   score_list = getPopScores(population, fitness)
   n, bins, patches = pylab.hist(score_list, 50, facecolor='green', alpha=0.75, normed=1)
   pylab.plot(bins, pylab.normpdf(bins, numpy.mean(score_list), numpy.std(score_list)), 'r--')
   pylab.xlabel('Score')
   pylab.ylabel('Frequency')
   pylab.grid(True)
   pylab.title("Plot of population score distribution")
   pylab.show()
Ejemplo n.º 40
0
def plotHistSwarmFitness(topology, bestFitness=False):
    """ Swarm fitness distribution histogram 

       Example:
          >>> Interaction.plotHistSwarmFitness(topology)

   :param topology: topology object subclass of (:class:`TopologyBase.TopologyBase`)
   :param bestFitness: If it's True, the bestFitness score will be used, otherwise, the current one.
   :rtype: None
   
   """
    fitness_list = getSwarmFitness(topology,bestFitness)
    n,bins,patches = pylab.hist(fitness_list,50,facecolor='green',alpha=0.75,normed=1)
    pylab.plot(bins,pylab.normpdf(bins,numpy.mean(fitness_list),numpy.std(fitness_list)),'r--')
    pylab.xlabel('Fitness')
    pylab.ylabel('Frequency')
    pylab.grid(True)
    pylab.title("Plot of the swarm fitness distribution")
    pylab.show()
Ejemplo n.º 41
0
def run_rimea_test7(inifile, trajfile):
    velocities = []
    # Read data
    fps, numpeds, traj = parse_file(trajfile)
    numPedsGr = numpeds/len(sigmas)
    peds = np.unique(traj[:, 0])
    logging.info("=== npeds: %d, numpeds_group = %d, fps: %d", numpeds, numPedsGr, fps)
    for ped in peds:
        # Take only data of ped i
        ptraj = traj[traj[:, 0] == ped]
        # Only when ped i moves
        ptraj = ptraj[np.diff(ptraj[:, 2]) != 0]
        # Distance
        dx = np.sqrt(np.sum((ptraj[0, 2:] - ptraj[-1, 2:])**2))
        # Time
        dt = (ptraj[-1, 1] - ptraj[0, 1])/fps
        # Velocity
        v = dx/dt
        velocities.append(v)

    id_velocity = np.vstack((peds, velocities))
    np.savetxt(csv_file, id_velocity.T, delimiter=',', fmt=["%d", "%f"])
    
    # Plotting
    i = 0
    j = 1 # Subplot index
    logging.info("Ploting distributions...")
    for (mu, sigma, c) in zip(means, sigmas, colors):
        P.subplot("41%d"%j)
        n, bins, patches = P.hist(velocities[i:i+numPedsGr-1],50, normed=1,histtype='bar',facecolor='%s'%c, alpha=0.7)
        y = P.normpdf(bins, mu, sigma)
        P.plot(bins, y, 'k--',linewidth=1.5,label=r"$\mathcal{N}(%.2f, %.2f)$"%(mu, sigma))
        P.legend()
        i += numPedsGr
        j += 1

    P.tight_layout()
    P.savefig(figname)
    if min(velocities) < min_velocity or max(velocities) > max_velocity:
        logging.critical("%s exits with FAILURE. min_velocity = %.3f (>%.3f?), max_velocity = %.3f (<%.3f?)",
                         argv[0], min(velocities), min_velocity, max(velocities), max_velocity)
        exit(FAILURE)
Ejemplo n.º 42
0
def draw_normal_histogram(x, bins, y_label='', x_label='', title="", body=""):
    "Plot a histogram chart"
    # x are matplotlib pylab arrays, body is a StringIO
    import pylab
    import matplotlib
    # clear graph
    matplotlib.pyplot.clf()
    matplotlib.use('Agg') 
    n, bins1, patches = pylab.hist(x, bins, histtype='bar', facecolor='green', alpha=0.75)
    #pylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    pylab.ylabel(y_label)
    pylab.xlabel(x_label)
    # add a line showing the expected distribution
    mu = pylab.mean(x)
    sigma = pylab.std(x)
    y = pylab.normpdf(bins, mu, sigma)
    l = pylab.plot(bins, y, 'k--', linewidth=1.5)

    pylab.title(title)
    
    pylab.grid(True)
    pylab.savefig(body) 
    return body.getvalue()
Ejemplo n.º 43
0
def main():
    ''' Main Function'''
    # Start and End date of the charts
    dt_start = dt.datetime(2011, 1, 1)
    dt_end = dt.datetime(2012, 12, 31)

    #goog = pd.io.data.get_data_yahoo("GOOG",  dt_start, dt_end) # not working
    SPY = DataReader("SPY", "yahoo", dt_start, dt_end)
    #YHOO = DataReader("YHOO",  "yahoo", dt_start, dt_end)

    # normalize prices
    nPrice = sc.normalizedPrice(SPY['Adj Close'].values)

    #daily return
    daily_ret = sc.computeDailyReturn(nPrice)
    plt.subplot(1, 2, 1)
    plt.plot(daily_ret * 100, 'b-')
    plt.ylabel('Daily return (%)')
    plt.legend(['SPY-Daily Return based on Adjuested close'])

    #daily return histogram
    plt.subplot(1, 2, 2)
    n, bins, patches = plt.hist(daily_ret,
                                100,
                                normed=1,
                                facecolor='green',
                                alpha=0.5)
    mean = np.mean(daily_ret)
    sigma = np.std(daily_ret)
    y = P.normpdf(bins, mean, sigma)
    plt.plot(bins, y, 'k--', linewidth=1.5)

    plt.ylabel('Probability')
    plt.legend(
        ['normal distribution approximation', 'SPY-hostogram of daily return'])

    plt.show()
def two_qubit_ssro_fidelity(label,
                            fig_format='png',
                            qubit_labels=('q0', 'q1')):
    # extracting data sets
    states = ['00', '01', '10', '11']
    nr_states = len(states)
    namespace = globals()

    data = ma.MeasurementAnalysis(auto=False, label=label)
    data.get_naming_and_values()

    # extract fit parameters for q0

    w0_data = data.measured_values[0]
    w1_data = data.measured_values[1]
    lengths = []
    i = 0
    for nr_state, state in enumerate(states):
        if i == 0:
            namespace['w0_data_r{}'.format(state)] = []
            namespace['w1_data_r{}'.format(state)] = []

    for nr_state, state in enumerate(states):
        namespace['w0_data_sub_{}'.format(
            state)] = w0_data[nr_state::nr_states]
        namespace['w1_data_sub_{}'.format(
            state)] = w1_data[nr_state::nr_states]
        lengths.append(len(w0_data[nr_state::nr_states]))

    # capping off the maximum lengths
    min_len = np.min(lengths)
    for nr_state, state in enumerate(states):
        namespace['w0_data_sub_{}'.format(state)] = namespace[
            'w0_data_sub_{}'.format(state)][0:min_len]
        namespace['w1_data_sub_{}'.format(state)] = namespace[
            'w1_data_sub_{}'.format(state)][0:min_len]
    for nr_state, state in enumerate(states):
        namespace['w0_data_r{}'.format(state)] += list(
            namespace['w0_data_sub_{}'.format(state)])
        namespace['w1_data_r{}'.format(state)] += list(
            namespace['w1_data_sub_{}'.format(state)])

    for nr_state, state in enumerate(states):
        namespace['w0_data_{}'.format(state)] = namespace['w0_data_r{}'.format(
            state)]
        namespace['w1_data_{}'.format(state)] = namespace['w1_data_r{}'.format(
            state)]

    min_len_all = min_len / 2

    ###########################################################################
    # Extracting and plotting the results for q0 (first weight function)
    ###########################################################################

    ma.SSRO_Analysis(label=label,
                     auto=True,
                     channels=[data.value_names[0]],
                     sample_0=0,
                     sample_1=1,
                     nr_samples=4,
                     rotate=False)
    ana = ma.MeasurementAnalysis(label=label, auto=False)
    ana.load_hdf5data()
    Fa_q0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['F_a']
    Fd_q0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['F_d']
    mu0_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu0_0']
    mu1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu1_0']
    mu0_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu0_1']
    mu1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu1_1']

    sigma0_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma0_0']
    sigma1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma1_1']
    sigma0_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma0_1']
    sigma1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma1_0']
    frac1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['frac1_0']
    frac1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['frac1_1']
    V_opt = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['V_th_a']
    V_opt_d = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['V_th_d']

    SNR_q0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['SNR']

    n, bins0, patches = plt.hist(namespace['w0_data_00'],
                                 bins=int(min_len_all / 50),
                                 label='input state {}'.format(state),
                                 histtype='step',
                                 color='red',
                                 normed=True,
                                 visible=False)
    n, bins1, patches = plt.hist(namespace['w0_data_01'],
                                 bins=int(min_len_all / 50),
                                 label='input state {}'.format(state),
                                 histtype='step',
                                 color='red',
                                 normed=True,
                                 visible=False)
    fig, ax = plt.subplots(figsize=(8, 5))
    colors = ['blue', 'red', 'grey', 'magenta']
    markers = ['o', 'o', 'o', 'v']

    for marker, color, state in zip(markers, colors, states):

        n, bins, patches = ax.hist(namespace['w0_data_{}'.format(state)],
                                   bins=int(min_len_all / 50),
                                   histtype='step',
                                   normed=True,
                                   visible=False)
        ax.plot(bins[:-1] + 0.5 * (bins[1] - bins[0]),
                n,
                color=color,
                linestyle='None',
                marker=marker,
                label='|{}>'.format(state))

    y0 = (1-frac1_0)*pylab.normpdf(bins0, mu0_0, sigma0_0) + \
        frac1_0*pylab.normpdf(bins0, mu1_0, sigma1_0)
    # y1_0 = frac1_0*pylab.normpdf(bins0, mu1_0, sigma1_0)
    # y0_0 = (1-frac1_0)*pylab.normpdf(bins0, mu0_0, sigma0_0)

    # building up the histogram fits for on measurements
    y1 = (1-frac1_1)*pylab.normpdf(bins1, mu0_1, sigma0_1) + \
        frac1_1*pylab.normpdf(bins1, mu1_1, sigma1_1)
    # y1_1 = frac1_1*pylab.normpdf(bins1, mu1_1, sigma1_1)
    # y0_1 = (1-frac1_1)*pylab.normpdf(bins1, mu0_1, sigma0_1)

    ax.semilogy(bins0, y0, 'b', linewidth=1.5, label='fit |00>')
    ax.semilogy(bins1, y1, 'r', linewidth=1.5, label='fit |01>')
    ax.set_ylim(0.2e-6, 1e-3)

    pdf_max = (max(max(y0), max(y1)))
    ax.set_ylim(pdf_max / 100, 2 * pdf_max)

    ax.set_title('Histograms for {}'.format(qubit_labels[0]))
    ax.set_xlabel('Integration result {} (a.u.)'.format(qubit_labels[0]))
    ax.set_ylabel('Fraction of counts')
    ax.axvline(V_opt,
               ls='--',
               linewidth=2,
               color='grey',
               label='SNR={0:.2f}\n $F_a$={1:.5f}\n $F_d$={2:.5f}'.format(
                   SNR_q0, Fa_q0, Fd_q0))
    ax.axvline(V_opt_d, ls='--', linewidth=2, color='black')
    ax.legend(frameon=False, loc='upper right')
    a = ax.get_xlim()
    ax.set_xlim(a[0], a[0] + (a[1] - a[0]) * 1.2)
    plt.savefig(join(ana.folder, 'histogram_w0.' + fig_format),
                format=fig_format)
    plt.close()

    V_th = np.zeros(len(qubit_labels))
    V_th_d = np.zeros(len(qubit_labels))

    V_th[0] = V_opt
    V_th_d[0] = V_opt_d

    ###########################################################################
    # Extracting and plotting the results for q1 (second weight function)
    ###########################################################################

    ma.SSRO_Analysis(label=label,
                     auto=True,
                     channels=[data.value_names[1]],
                     sample_0=0,
                     sample_1=2,
                     nr_samples=4,
                     rotate=False)
    ana = ma.MeasurementAnalysis(label=label, auto=False)
    ana.load_hdf5data()
    Fa_q1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['F_a']
    Fd_q1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['F_d']
    mu0_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu0_0']
    mu1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu1_0']
    mu0_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu0_1']
    mu1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['mu1_1']

    sigma0_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma0_0']
    sigma1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma1_1']
    sigma0_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma0_1']
    sigma1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['sigma1_0']
    frac1_0 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['frac1_0']
    frac1_1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['frac1_1']
    V_opt = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['V_th_a']
    V_opt_d = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['V_th_d']
    SNR_q1 = ana.data_file['Analysis']['SSRO_Fidelity'].attrs['SNR']

    n, bins0, patches = plt.hist(namespace['w1_data_00'],
                                 bins=int(min_len_all / 50),
                                 label='input state {}'.format(state),
                                 histtype='step',
                                 color='red',
                                 normed=True,
                                 visible=False)
    n, bins1, patches = plt.hist(namespace['w1_data_10'],
                                 bins=int(min_len_all / 50),
                                 label='input state {}'.format(state),
                                 histtype='step',
                                 color='red',
                                 normed=True,
                                 visible=False)
    fig, axes = plt.subplots(figsize=(8, 5))
    colors = ['blue', 'red', 'grey', 'magenta']
    markers = ['o', 'o', 'o', 'v']
    for marker, color, state in zip(markers, colors, states):

        n, bins, patches = plt.hist(namespace['w1_data_{}'.format(state)],
                                    bins=int(min_len_all / 50),
                                    histtype='step',
                                    normed=True,
                                    visible=False)
        pylab.plot(bins[:-1] + 0.5 * (bins[1] - bins[0]),
                   n,
                   color=color,
                   linestyle='None',
                   marker=marker)

    y0 = (1-frac1_0)*pylab.normpdf(bins0, mu0_0, sigma0_0) + \
        frac1_0*pylab.normpdf(bins0, mu1_0, sigma1_0)

    # building up the histogram fits for on measurements
    y1 = (1-frac1_1)*pylab.normpdf(bins1, mu0_1, sigma0_1) + \
        frac1_1*pylab.normpdf(bins1, mu1_1, sigma1_1)
    # y1_1 = frac1_1*pylab.normpdf(bins1, mu1_1, sigma1_1)
    # y0_1 = (1-frac1_1)*pylab.normpdf(bins1, mu0_1, sigma0_1)

    plt.semilogy(bins0, y0, 'b', linewidth=1.5, label='fit |00>')

    plt.semilogy(bins1, y1, 'r', linewidth=1.5, label='fit |10>')
    (pylab.gca()).set_ylim(0.2e-6, 1e-3)
    pdf_max = (max(max(y0), max(y1)))
    (pylab.gca()).set_ylim(pdf_max / 100, 2 * pdf_max)

    axes.set_title('Histograms for {}'.format(qubit_labels[1]))
    plt.xlabel('Integration result {} (a.u.)'.format(qubit_labels[1]))
    plt.ylabel('Fraction of counts')
    plt.axvline(V_opt,
                ls='--',
                linewidth=2,
                color='grey',
                label='SNR={0:.2f}\n $F_a$={1:.5f}\n $F_d$={2:.5f}'.format(
                    SNR_q1, Fa_q1, Fd_q1))
    plt.axvline(V_opt_d, ls='--', linewidth=2, color='black')
    plt.legend(frameon=False, loc='upper right')
    a = plt.xlim()
    plt.xlim(a[0], a[0] + (a[1] - a[0]) * 1.2)
    plt.savefig(join(ana.folder, 'histogram_w1.' + fig_format),
                format=fig_format)
    plt.close()
    V_th[1] = V_opt
    V_th_d[1] = V_opt_d

    # calculating cross-talk matrix and inverting
    ground_state = '00'
    weights = [0, 1]
    cal_states = ['01', '10']
    ground_state = '00'
    mu_0_vec = np.zeros(len(weights))
    for j, weight in enumerate(weights):
        mu_0_vec[j] = np.average(namespace['w{}_data_{}'.format(
            weight, ground_state)])

    mu_matrix = np.zeros((len(cal_states), len(weights)))
    for i, state in enumerate(cal_states):
        for j, weight in enumerate(weights):
            mu_matrix[i, j] = np.average(namespace['w{}_data_{}'.format(
                weight, state)]) - mu_0_vec[j]

    mu_matrix_inv = inv(mu_matrix)
    V_th_cor = np.dot(mu_matrix_inv, V_th)
    V_offset_cor = np.dot(mu_matrix_inv, mu_0_vec)
    res_dict = {
        'mu_matrix': mu_matrix,
        'V_th': V_th,
        'V_th_d': V_th_d,
        'mu_matrix_inv': mu_matrix_inv,
        'V_th_cor': V_th_cor,
        'V_offset_cor': V_offset_cor,
        'Fa_q0': Fa_q0,
        'Fa_q1': Fa_q1,
        'Fd_q0': Fd_q0,
        'Fd_q1': Fd_q1,
        'SNR_q0': SNR_q0,
        'SNR_q1': SNR_q1
    }
    return res_dict
def beam_histogram(n_part, variable, string):

	fig 		= figure()
	ax 			= fig.add_subplot(1, 1, 1)

	mu 			= np.mean(variable)
	sigma 		= np.std(variable)

	# To put it in units of sigma 
	# --------------------------------------------------------------------------------------------------------------
	var_sigma	= variable/sigma
	var_max 	= max(var_sigma)
	var_min 	= min(var_sigma)

	# --------------------------------------------------------------------------------------------------------------
	# Compute the best number of bins 
	# --------------------------------------------------------------------------------------------------------------

	# Interquartile range (IQR)
	# --------------------------------------------------------------------------------------------------------------
	IQR 		= np.percentile(var_sigma, 0.75) - np.percentile(var_sigma, 0.25)

	# Bin size following the Freedman Diaconis rule
	# --------------------------------------------------------------------------------------------------------------
	bin_size 	= 2 * IQR * n_part**(-1.0/3)
	print 'Bin size = %s'% bin_size

	# Number of bins
	# --------------------------------------------------------------------------------------------------------------
	nbins 		= int(var_max - var_min/ (bin_size))
	print 'Number of bins= %s'% nbins

	# --------------------------------------------------------------------------------------------------------------
	# Plot Histogram of Samples
	# --------------------------------------------------------------------------------------------------------------
	n, bins, patches = P.hist((var_sigma), bins = nbins - nbins/3, normed = True, histtype = 'stepfilled', label = 'Histogram of the samples')
	P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)

	# --------------------------------------------------------------------------------------------------------------
	# Plot the Probability Density Function
	# --------------------------------------------------------------------------------------------------------------
	mu_new			= np.mean(var_sigma)
	sigma_new		= np.std(var_sigma)
	y 				= P.normpdf(bins, mu_new, sigma_new)
	P.plot(bins, y, 'k--', linewidth=1.5, label = 'Probability Density Function')

	density = gaussian_kde(var_sigma)  # your data
	xgrid = np.linspace(var_min, var_max, n_part)   
	plt.plot(xgrid, density(xgrid),'k--', color = 'red', linewidth=1.5, label = 'Kernel Density Estimation Method')

	# Horizontal axis
	# --------------------------------------------------------------------------------------------------------------
	plt.xlabel(r'%s [$\sigma$]'%string)
	plt.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0,0))
	plt.xlim(var_min, var_max)
	# plt.grid(b = True, which = 'both', axis = 'both', linestyle = '--')

	# Vertical axis
	# --------------------------------------------------------------------------------------------------------------
	plt.ylabel(r'Normalized beam profile')
	# ax.set_yscale('log')
	# plt.ylim(0, max(pdf_fitted))
	# plt.grid(b = True, which = 'minor', axis = 'both', linestyle = '--')
	
	# Title, legends and annotations
	# --------------------------------------------------------------------------------------------------------------
	# title 		= 'Distribution in : particles = %.0f, $\mu$ = %E, $\sigma$ = %E'%(n_part,mu,sigma)
	# plt.title(title)
	plt.legend(loc = 'upper left')
	plt.text((var_max/2) + (var_max/14), max(y)/2 + max(y)/3.0, r'Particles = %.0f'%n_part)
	plt.text((var_max/2) + (var_max/14), max(y)/2 + max(y)/3.7 , r'$\mu$ = %E'%mu)
	plt.text((var_max/2) + (var_max/14), max(y)/2 + max(y)/4.5, r'$\sigma$ = %E'%sigma)   
Ejemplo n.º 46
0
def comb_plots(directory, out_directory, skip):
    #    open(directory, 'w').close()
    air_con = []
    paste_air = []
    avg_ch = []
    spac_fac = []
    freq = []

    print directory

    if directory[
            -1] != '\\':  ##This only works if the file has a '\' at the end.
        directory = directory + '\\'

    if out_directory[-1] != '\\':
        out_directory = out_directory + '\\'

    files = glob(directory + '*.txt')  ##Only uses files ending in '.txt'
    if not (files):
        files = glob(directory + '.*.txt')
    files = files[::skip]

    #Delete Files in Output Directory
    for fn in os.listdir(out_directory):
        if fn != 'Thumbs.db':
            os.remove(out_directory + fn)

    for file_name in files:
        file_output = out_directory + file_name[start_ind:-4] + '.out'
        #        os.remove(out_directory)
        #Nmes the files '.out'.

        a, p, ch, s, f, v, count_i = Main(
            file_name, file_output, start_row,
            skip_row)  #Calling variables from Bubble_Code.py
        air_con += a
        paste_air += p
        avg_ch += ch
        spac_fac += s
        freq += f

#    mu, sigma=mean(a_con), std(a_con)

#Making the histograms
    plt.subplot(1, 1, 1)

    file_output = out_directory + 'Combined Air Content' + '.png'
    normal_hist(air_con, 50, range=(0, airxa), facecolor='g')
    mu_ac = np.mean(air_con)
    sigma_ac = np.std(air_con)
    plt.title(name_plot + '\nCombined Air Content')
    plt.text(50, .1, r'$\mu=%.3f,\ \sigma=%.3f$' % (mu_ac, sigma_ac))
    plt.xlabel('Air Content')
    plt.ylabel('Frequency')
    plt.axis([0, airxa, 0, airya])
    j = np.linspace(0, airxa, num=50)
    y = P.normpdf(j, mu_ac, sigma_ac) * airxa / 50
    P.plot(j, y, 'k', linewidth=1.5)
    plt.axvline(mu_ac, color='b', linestyle='dashed', linewidth=2)
    plt.savefig(file_output)

    file_output = out_directory + 'Combined Average Chord Length' + '.png'
    normal_hist(avg_ch, 50, range=(0, aclxa), facecolor='violet')
    mu_avg = np.mean(avg_ch)
    sigma_avg = np.std(avg_ch)
    plt.title(name_plot + '\nCombined Average Chord Length')
    plt.text(60, .15, r'$\mu=%.3f,\ \sigma=%.3f$' % (mu_avg, sigma_avg))
    plt.xlabel('Average Chord Length')
    plt.ylabel('Frequency')
    plt.axis([0, aclxa, 0, aclya])
    x = np.linspace(0, aclxa, num=50)
    w = P.normpdf(x, mu_avg, sigma_avg) * aclxa / 50
    P.plot(x, w, 'k', linewidth=1.5)
    plt.axvline(mu_avg, color='b', linestyle='dashed', linewidth=2)
    plt.savefig(file_output)
    plt.cla()

    file_output = out_directory + 'Combined Paste Air Ratio' + '.png'
    normal_hist(paste_air, 50, range=(0, paxa), facecolor='y')
    mu_pa = np.mean(paste_air)
    sigma_pa = np.std(paste_air)
    plt.title(name_plot + '\nCombined Paste Air Ratio')
    plt.text(20, .3, r'$\mu=%.3f,\ \sigma=%.3f$' % (mu_pa, sigma_pa))
    plt.xlabel('Paste Air Ratio')
    plt.ylabel('Frequency')
    plt.axis([0, paxa, 0, paya])
    x = np.linspace(0, paxa, num=50)
    #    y = P.gamma(mu_pa, sigma_pa, x)*paxa/20
    y = P.normpdf(x, mu_pa, sigma_pa) * paxa / 50
    P.plot(x, y, 'k', linewidth=1.5)
    plt.axvline(mu_pa, color='b', linestyle='dashed', linewidth=2)
    plt.savefig(file_output)
    plt.cla()

    file_output = out_directory + 'Combined Spacing Factor' + '.png'
    normal_hist(spac_fac, 50, range=(0, spacxa), facecolor='orange')
    mu_spac = np.mean(spac_fac)
    sigma_spac = np.std(spac_fac)
    plt.title(name_plot + '\nCombined Spacing Factor')
    plt.text(20, .1, r'$\mu=%.3f,\ \sigma=%.3f$' % (mu_spac, sigma_spac))
    plt.xlabel('Spacing Factor')
    plt.ylabel('Frequency')
    plt.axis([0, spacxa, 0, spacya])
    x = np.linspace(0, spacxa, num=50)
    h = P.normpdf(x, mu_spac, sigma_spac) * spacxa / 50
    P.plot(x, h, 'k', linewidth=1.5)
    plt.axvline(mu_spac, color='b', linestyle='dashed', linewidth=2)
    plt.savefig(file_output)
    plt.cla()

    file_output = out_directory + 'Combined Void Frequency' + '.png'
    normal_hist(freq, 50, range=(0, voidxa), facecolor='red')
    mu_freq = np.mean(freq)
    sigma_freq = np.std(freq)
    plt.title(name_plot + '\nCombined Void Frequency')
    #    plt.text(.025, .2, r'$\mu=%.3f,\ \sigma=%.3f$' %(mu_freq, sigma_freq))
    plt.text(.025, .2, r'$\mu=%.3f,\ \sigma=%.3f$' % (mu_freq, sigma_freq))
    plt.xlabel('Void Frequency')
    plt.ylabel('Frequency')
    plt.axis([0, voidxa, 0, voidya])
    x = np.linspace(0, voidxa, num=50)
    g = P.normpdf(x, mu_freq, sigma_freq) * voidxa / 50
    P.plot(x, g, 'k', linewidth=1.5)
    plt.axvline(mu_freq, color='b', linestyle='dashed', linewidth=2)
    plt.savefig(file_output)
    plt.cla()

    file_count = len(files)

    #Calling the programs from Calibration.py and Excel.py
    calibration(voxel, out_directory, v, file_count, skip_row, count_i)
    excel(out_directory, air_con, paste_air, avg_ch, spac_fac, freq, name_plot)
            requestor.start_job(workers)
        worker_rep.append(worker_avg)
        requestor_rep.append(requestor_avg)
        time.append(x+1)
#        Add new workers
        for y in xrange(random.randint(1,20)):
            workers.append(Worker(x+1,getAverageRep(workers)))
        for y in xrange(random.randint(1,5)):
            requestors.append(Requestor(x+1,getAverageRep(requestors)))

        
    #Create lines for plot of average overall worker and requestor reputation over time
    work_line.set_xdata(time)
    work_line.set_ydata(worker_rep)
    req_line.set_ydata(requestor_rep)
    req_line.set_xdata(time)
    ax.relim()
    ax.autoscale_view()
    plt.draw()
    plt.show()
    
    #Plot histogram of all worker reputations
    P.figure()
    all_reputations = [x.get_reputation() for x in workers]
    sigma = P.std(all_reputations)
    mu = P.average(all_reputations)
    n, bins, patches = P.hist(all_reputations, 20, normed=1, histtype='step',cumulative=True)
    y = P.normpdf(bins, mu, sigma)
    P.plot(bins, y)
    
    
Ejemplo n.º 48
0
import os, time
import numpy as np
import pylab as P
megPath = '/scr/kuba2/Dohorap/Main/Data/MEG/motionCorrected/'
files = dict()
times = []
for dirname, dirnames, filenames in os.walk(megPath):
    for filename in filenames:
        if '_mc_hp004_ica.fif' in filename:
            f = os.path.join(dirname, filename)
            t = os.path.getmtime(f)
            if t > 1402680000.0:
                times.append(t)
                files[t] = f
times.sort()
timeDifferences = []
for t in range(len(times) - 1):
    difference = times[t + 1] - times[t]
    print times[t], files[times[t]]
    if difference < 10000:
        timeDifferences.append(difference)
n, bins, patches = P.hist(timeDifferences, 10, normed=1, histtype='stepfilled')
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
y = P.normpdf(bins, np.median(timeDifferences),
              np.sqrt(np.var(timeDifferences)))
l = P.plot(bins, y, 'k--', linewidth=1.5)
P.show()
Ejemplo n.º 49
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 25 09:26:40 2010

@author: -
"""

import matplotlib.finance as f
import numpy as N
import pylab as P

if __name__ == '__main__':

    sp = f.quotes_historical_yahoo('^GSPC', (1,1,2001), (1,1,2010), asobject=True, adjusted=True)
    returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
    [n,bins,patches] = P.hist(returns, 100)
    mu = N.mean(returns)
    sigma = N.std(returns)
    x = P.normpdf(bins, mu, sigma)
    P.plot(bins, x, color='red', lw=2)

    print returns
Ejemplo n.º 50
0
pi_ = np.array([pi_1, 1 - pi_1])  # mixture parameter

gamma = np.zeros((2, s.size))
N_ = np.zeros(2)
p_new = p0

# EM we start here
delta = 0.000001
improvement = float('inf')

counter = 0

while (improvement > delta):
    # Compute the responsibility func. and new parameters
    for k in [0, 1]:
        gamma[k, :] = pi_[k] * py.normpdf(s, mu[k], sig[k]) / pdf_model(
            s, p_new)  # responsibility

        N_[k] = 1. * gamma[k].sum(
        )  # effective number of objects to k category
        mu[k] = sum(gamma[k] * s) / N_[k]  # new sample mean of k category
        sig[k] = np.sqrt(sum(gamma[k] * (s - mu[k])**2) /
                         N_[k])  # new sample var of k category
        pi_[k] = N_[k] / s.size  # new mixture param of k category
        # updated parameters will be passed at next iter
        p_old = p_new
        p_new = [mu[0], sig[0], mu[1], sig[1], pi_[0]]
        # check convergence
        improvement = max(abs(p_old[0] - p_new[0]), abs(p_old[1] - p_new[1]))
        counter += 1
Ejemplo n.º 51
0
import numpy as np
import pylab as P
mu, sigma = 200, 25
x = mu + sigma*P.randn(10000)
n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
y = P.normpdf( bins, mu, sigma)
l = P.plot(bins, y, 'k--', linewidth=1.5)
P.figure()
bins = [100,125,150,160,170,180,190,200,210,220,230,240,250,275,300]
n, bins, patches = P.hist(x, bins, normed=1, histtype='bar', rwidth=0.8)
P.figure()
n, bins, patches = P.hist(x, 50, normed=1, histtype='step', cumulative=True)
y = P.normpdf( bins, mu, sigma).cumsum()
y /= y[-1]
l = P.plot(bins, y, 'k--', linewidth=1.5)
sigma2 = 15.
x = mu + sigma2*P.randn(10000)
n, bins, patches = P.hist(x, bins=bins, normed=1, histtype='step', cumulative=True)
y = P.normpdf( bins, mu, sigma2).cumsum()
y /= y[-1]
l = P.plot(bins, y, 'r--', linewidth=1.5)
n, bins, patches = P.hist(x, bins=bins, normed=1,
    histtype='step', cumulative=-1)
P.grid(True)
P.ylim(0, 1.05)
P.figure()
x = mu + sigma*P.randn(1000,3)
n, bins, patches = P.hist(x, 10, normed=1, histtype='bar',
                            color=['crimson', 'burlywood', 'chartreuse'],
                            label=['Crimson', 'Burlywood', 'Chartreuse'])
Ejemplo n.º 52
0
#
# The hist() function now has a lot more options
#

#
# first create a single histogram
#
mu, sigma = 200, 25
x = mu + sigma*P.randn(10000)

# the histogram of the data with histtype='step'
n, bins, patches = P.hist(x, 50, normed=1, histtype='stepfilled')
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)

# add a line showing the expected distribution
y = P.normpdf( bins, mu, sigma)
l = P.plot(bins, y, 'k--', linewidth=1.5)


#
# create a histogram by providing the bin edges (unequally spaced)
#
P.figure()

bins = [100,125,150,160,170,180,190,200,210,220,230,240,250,275,300]
# the histogram of the data with histtype='step'
n, bins, patches = P.hist(x, bins, normed=1, histtype='bar', rwidth=0.8)

#
# now we create a cumulative histogram of the data
#
Ejemplo n.º 53
0
def pdf_model(x, p):
    mu1, sig1, mu2, sig2, pi_1 = p
    return pi_1 * py.normpdf(x, mu1, sig1) + (1 - pi_1) * py.normpdf(
        x, mu2, sig2)
Ejemplo n.º 54
0
    def callback(self, x):
        if self.callback_counter[0] % self.print_interval == 0:
            opt_params = self.unpack(x)
            params = extend(self.inputs, self.fixed_params, opt_params)

            if self.use_exact_A:
                opt_A_mean = self.vssgp.f['opt_A_mean'](**params)
                opt_A_cov = self.vssgp.f['opt_A_cov'](**params)
                if 'm' in self.fixed_params:
                    self.fixed_params['m'] = opt_A_mean
                    self.fixed_params['ls'] = opt_A_cov
                else:
                    opt_params['m'] = opt_A_mean
                    opt_params['ls'] = opt_A_cov

            pylab.clf()
            pylab.subplot(3, 1, 1)
            self.plot_func(params['X'], params['Y'], False)
            self.plot_predict(self.inputs['X'], opt_params, False)
            if 'X' in self.test_set:
                self.plot_func(self.test_set['X'], self.test_set['Y'], True)
                self.plot_predict(self.test_set['X'], opt_params, True)
            for c in xrange(self.components):
                pylab.scatter(params['Z'][0, :, c],
                              0 * params['Z'][0, :, c],
                              c=self.colours[c],
                              zorder=3,
                              edgecolors='none')

            hyp = np.exp(params['lhyp'].copy())
            sf2s = hyp[0]
            lss = hyp[1:1 + self.Q]
            ps = hyp[1 + self.Q:]
            mean_p, std_p = ps**-1, (2 * np.pi * lss)**-1  # Q x comp
            mu, Sigma = params['mu'].copy(), np.exp(params['lSigma'].copy())
            min_mean = (std_p[None, :] * mu[0, :, :] + mean_p[None, :]).min()
            max_mean = (std_p[None, :] * mu[0, :, :] + mean_p[None, :]).max()
            min_std = (std_p[None, :] * Sigma[0, :, :]).max()**0.5
            max_std = (std_p[None, :] * Sigma[0, :, :]).max()**0.5
            linspace = np.linspace(min_mean - 2 * min_std,
                                   max_mean + 2 * max_std, 1000)

            pylab.subplot(3, 1, 2)
            for c in xrange(self.components):
                pdf = pylab.normpdf(linspace, mean_p[:, c],
                                    np.min(std_p[:, c], 1e-5))
                pylab.plot(linspace, pdf, c=self.colours[c], linewidth=1.0)
            pylab.ylim(0, 100)

            pylab.subplot(3, 1, 3)
            for c in xrange(self.components):
                for (mean, std) in zip(mu[0, :, c], Sigma[0, :, c]**0.5):
                    pdf = pylab.normpdf(linspace,
                                        std_p[:, c] * mean + mean_p[:, c],
                                        np.min(std_p[:, c] * std, 1e-5))
                    pylab.plot(linspace, pdf, c=self.colours[c], linewidth=1.0)
            pylab.ylim(0, 100)
            pylab.draw()

            print 'sf2 = ' + str(sf2s.squeeze())
            print 'l = ' + str(lss.squeeze())
            print 'p = ' + str(ps.squeeze())
            print 'tau = ' + str(np.exp(params['ltau']))
            print 'mu = '
            print params['mu'][:, :5, :]
            print 'Sigma = '
            print np.exp(params['lSigma'][:, :5, :])
            print 'm = '
            print params['m'][:5, :].T
            print 's = '
            print np.exp(params['ls'][:5, :].T)
            print 'a = ' + str(params['a']) + ', b = ' + str(params['b'])
            print 'EPhi = '
            EPhi = self.vssgp.f['EPhi'](**params)
            print EPhi[:5, :5]
            LL = self.vssgp.f['LL'](**params)
            KL = self.vssgp.f['KL'](**params)
            print LL - KL
        self.callback_counter[0] += 1
Ejemplo n.º 55
0
def tailfit(filename,
            display=None,
            start_point=None,
            direction='down',
            output_jpegs=False,
            plotlengths=False,
            tail_startpoint=None,
            scale=0.5):
    # avi, 1st time:   fittedtail,startpoint,  direction, FPS, numframes  = tailfit(videopath,(first or not displayonlyfirst) and display ,startpoints)
    # fittedtail,startpoint,  direction, FPS, numframes  = tailfit(videopath,(first or not displayonlyfirst) and display ,startpoints[i])
    # Question. Keep eyes on how does start_point work?
    '''
    Takes an avi filepath, fits the tail of the fish
    Display sets if the fit is shown as it is processed (slower)
    Start point is where fitting begins, if None the user is queried
    Direction is which direction the fit happens
    '''
    '''1ST PART. INITIATE THE PARAMETERS AND READ THE FRAME'''
    directions = {
        "up": [0, -1],
        "down": [0, 1],
        "left": [-1, 0],
        "right": [1, 0]
    }
    # Question. up and down are inversed?
    fitted_tail = []

    ##  print filename, os.path.exists(filename)
    cap = cv2.VideoCapture(filename)  ########DT error here...tag
    if not cap.isOpened():
        print "Error with video or path!"
        raise Exception('Issues opening video file!')

    frame = cap.read()[1]
    frame = cv2.resize(
        frame, (0, 0), fx=scale, fy=scale,
        interpolation=cv2.INTER_CUBIC)  #[ADJUST] resize-tag #resize the frame!

    # Question. abt the grammar? why not start from [0]
    # Answer. cv2.VideoCapture.read([image]) returns tuple (retval, image), so frame only takes the returned image
    cv2.destroyAllWindows()

    max_points = 200  # mostly in case it somehow gets stuck in a loop, and to preallocate the result array
    # Question. meaning of this max_points?

    frame_fit = np.zeros((max_points, 2))
    # frame_fit is a 2d array, tuple inside the np.zeros() defines the shape of frame

    first_frame = True
    # first_frame just told the program if it is processing the first frame
    widths, convolveresults = [], []
    test, slices = [], []
    '''2ND PART. ANALYSIS FRAME ONE BY ONE'''
    while type(frame) != type(None):
        # LOOP NO.1 while is the biggest loop, it continutes until analysis of the 1st frame.
        # Question. how to break this loop? Answer. At the end, when next read of frame returns None

        if display:
            # display in main-function is boolean
            frame_display = frame.copy()
        if direction:
            guess_vector = np.array(directions[direction])
            # guess_vector represent which direction the fit happens
        else:
            raise Exception('Need to define a direction!')  #could ask here
        '''2-1. IF FIRST FRAME'''
        '''This 2-1. session is only implemented one time during the 1st frame'''
        if first_frame:
            # Note, first_frame=True, this is defined outside the while loops

            #TODO try to ID the head?
            #predict tail pos, and dir?
            #then query user for ok
            #detect things aligned the same as before, by looking for big shifts between first frames of video?
            '''2-1.1 SET THE STARTPOINT'''
            #SET THE STARTPOINT. if we don't have a start point, query user for one
            if type(start_point) == type(np.array(
                [])) or type(start_point) is list:
                current = np.array(start_point)
                point = current
            elif str(type(tail_startpoint)) != "<type 'NoneType'>":
                start_point = tail_startpoint
                point = start_point
                current = start_point
            else:
                handlec = handleclick
                cv2.namedWindow('first')
                cv2.imshow("first", frame)
                cv2.moveWindow('first', 0, 0)
                #would be nice to raise window, since it doesn't always spawn top

                cv2.waitKey(10)
                point = np.array([-1, -1])
                cv2.setMouseCallback("first", handlec, point)
                # cv2.setMouseCallback(windowName, onMouse[, param])
                print "Click on start of the fish's tail"
                cv2.waitKey(
                    10)  # Question. difference between 0 and 10?    #tag
                while (point == np.array([-1, -1])).all(
                ):  # Question. this all() is strange ... Answer. point is a list/array
                    cv2.waitKey(10)
                current = point
                start_point = current
                print 'start point is ', start_point
                # the start_point is set here.... it seems to be correlated to threshold as well?
                cv2.destroyWindow('first')
                # TODO Simplify the code, too redunctant

            # NOTE. CONFIRMED. current can be accessed outside the if & else
            # DT-Code: print 'current can be accessed outside the if & else, like: ', current
            '''2-1.2 ILLUMINATION ANALYSIS FOR BG & FISH'''
            # BUILD THE HISTOGRAM, frame is np.ndarray, 2D-gray scale, 3D-RGB
            if frame.ndim == 2:
                hist = np.histogram(frame[:, :], 10, (0, 255))
                # Question. 255 can't be divided by 10?   A. 10 means the number of bars, not the interval
                # hist/returned value of np.histogram is a tuple, the first item is occurrence in each bin, the second item is bin
                # numpy.histogram(a, bins=10, range=None, normed=False, weights=None, density=None)
                # If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths.
            elif frame.ndim == 3:
                # Task. maybe this loop for RGB vedios, try this later
                hist = np.histogram(frame[:, :, 0], 10, (0, 255))
                # Question. the meaning of this sentence?
                # Question. if frame is 3D then it should be converted into gray scale??? Can't just take R to construct histogram
            else:
                raise Exception('Unknown video format!')

            # find background - 10 bin hist of frame, use most common as background
            background = hist[1][hist[0].argmax()] / 2 + hist[1][min(
                hist[0].argmax() + 1, len(hist[0]))] / 2
            # DT-CODE. print 'hist: ', hist
            # Note. CONFIRMED. hist[1] should have one more item than hist[0].
            # Note. CONFIRMED. this histogram is only calculated one time for 1st frame
            # Q. why add len(hist[0]) A. to avoid argmax()+1 bigger than len
            # Q. why divided by 2?  A. they take the middle value of the most frequent bar
            # np.argmax(), Returns the indices of the maximum values along an axis.

            # find fish luminosity - area around point
            if frame.ndim == 2:
                fish = frame[point[1] - 2:point[1] + 2,
                             point[0] - 2:point[0] + 2].mean()
                # TASK-DONE. I want to draw and see how big is this chosen area?
                # A. The drawn area is quite small and is within the contour of the fish
                # Note. CONFIRMED. point is the start point set by user
                # Question. why point[1] is the first dimension? Why reverse the x and y axis?
                # fish is like the average grayscale/brightness of the fish image
                # numpy.ndarray.mean(), Returns the average of the array elements along given axis.
            elif frame.ndim == 3:
                fish = frame[point[1] - 2:point[1] + 2,
                             point[0] - 2:point[0] + 2, 0].mean()
            '''2-1.3 BUILD THE GAUSSIAN KERNEL & SET DISPLAY '''
            print "Starting tailfit on:  ", filename
            FPS = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            # CV_CAP_PROP_FPS represent Frame rate
            numframes = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            # total frame number

            guess_line_width = 51  ###PARAMETERS
            # Question. the meaning of guess_line_width?
            # Question. why bother to divide 4?

            # gaussian kernel, used to find middle of tail  #tag
            normpdf = pylab.normpdf(
                np.arange((-guess_line_width + 1) / 4 + 1,
                          (guess_line_width - 1) / 4), 0, 8)  ###PARAMETERS
            # JULIE-Question. what if I chagnge the mu and sigma? What is the function of this? Why it can be used to find middle of tail?
            # [ADJUST] !!! I think the parameters related to this Gaussian has some relationship with
            # return a 1-D numpy.array with 24 items, delineate the contour of the Gaussian
            # "Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
            # return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
            # numpy.arange([start, ]stop, [step, ]dtype=None), Return a numpy.ndarray, evenly spaced values within a given interval. The default step size is 1. If step is specified, start must also be given.

            # Display sets if the fit is shown as it is processed (slower)
            if display:
                cv2.namedWindow("frame_display")
                cv2.moveWindow("frame_display", 0, 0)

            starttime = time.time()
            # Return the current time in seconds since the Epoch.

        else:  #task. temporarily skip
            # Question. the if above did so many thing while this else only did this????
            current = fitted_tail[-1][0, :]
            # Question. why there could be [-1]?
            # Question. when is fitted_tail filled? what does this means?
        '''2-2. SET SPACING'''
        # change this multiplier to change the point spacing
        tailpoint_spacing = 5
        '''2-3.FIT THE TAIL WITH CIRCILES IN THIS FRAME(BIG FOR COUNT LOOPS)'''
        # Question. so the whole for loops simply just analysis one tail in one frame?
        # Question. Come on! the meaning of guess?
        # A. should be the point that delineate the contour of tail

        for count in range(max_points):
            # Note. this big for loop breaks when count meets the tail_length, which is the total circles drawn to fit the tail.
            # Question. difficult. But when is tail_length defined?
            '''2-3.1 SET THE GUESS POINT/GUESS_LINE'''
            '''2-3.1.1 GUESS IS THE NEXT FITTED POINTS'''
            if count == 0:
                guess = current  ###tag
                # Question. what is current? A. should be the startpoint
                # task. temp skip the following case
            elif count == 1:
                # DT-CODE:
                # print 'count: ', count
                # print 'guess_vector: ', guess_vector
                # print 'current: ', current
                # print 'before calculation guess is: ', guess
                guess = current + guess_vector * tailpoint_spacing  #can't advance guess vector, since we didn't move from our previous point
                # DT-CODE: print 'after calculation guess is: ', guess
                # Question. what is the difference between guess and new_point?
                # A. it's like guess just confirm the location of guess_slice, guess can be close to new_point, but may not be the same
                # A. new_point is the accurate fit based on the estimation of illumination
            else:
                guess_vector = guess_vector / (
                    ((guess_vector**2).sum())**.5)  #normalize guess vector
                # Question. what if you did not normalize it?
                # A. you will get only two point with large interval
                guess = current + guess_vector * tailpoint_spacing
            '''2-3.1.2 DRAW THE START AND END'''
            # TASK-DIFFICULT TO UNDERSTAND THE SEMANTIC
            # NOTE. start and end is a line vertical to the direction of tail with length of guess_line_width
            guess_line_start = guess + np.array([
                -guess_vector[1], guess_vector[0]
            ]) * guess_line_width / 2  #####tag
            # QUESTION. DIDN'T GET IT... WHEN DID WE GET THIS?
            # directions={"up":[0,-1],"down":[0,1],"left":[-1,0],"right":[1,0]}
            # guess here for the very first time is just start point
            guess_line_end = guess + np.array(
                [guess_vector[1], -guess_vector[0]]) * guess_line_width / 2
            # Question. I understand the grammar above, but not the sematic

            x_indices = np.int_(
                np.linspace(guess_line_start[0], guess_line_end[0],
                            guess_line_width))
            y_indices = np.int_(
                np.linspace(guess_line_start[1], guess_line_end[1],
                            guess_line_width))
            # default interval for np.linspace should be 1
            # 51 = guess_line_width items in x_indices
            # returned x_indices & y_indices are type 'numpy.ndarray'
            # numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
            # np.linspace, Return evenly spaced numbers over a specified interval.
            # num, Number of samples to generate. Default is 50. Must be non-negative.
            # NumPy knows that int refers to np.int_
            '''2-3.1.3 JUDGE IF THE CLIP IS PROPER'''
            # TASK. I am not sure how useful this session would be. wait and see
            if max(y_indices) >= frame.shape[0] or min(y_indices) < 0 or max(
                    x_indices) >= frame.shape[1] or min(x_indices) < 0:
                # Question. I don't understand why is the x and y reversed? x should correspond to frame[0]
                # Answer. IT IS NOT REVERSED! INNER DIMENSION CORRESOPOND TO X!
                y_indices = np.clip(y_indices, 0, frame.shape[0] - 1)
                x_indices = np.clip(x_indices, 0, frame.shape[1] - 1)
                print "Tail got too close to the edge of the frame, clipping search area!"
                #TODO if too many values are clipped, break?
            '''2-3.1.4 DRAW THE GUESS_SLICE'''
            # y_indices and x_indices are np.ndarray!!!!!!
            guess_slice = frame[y_indices, x_indices]
            # DT-code.
            # print 'frame: ', frame
            # print 'shape of frame: ', frame.shape
            # print 'x_indices: ', x_indices
            # print 'y_indices: ', y_indices
            # print 'shape of x_indices: ', x_indices.shape
            # print 'shape of y_indices: ', y_indices.shape
            # print 'guess_slice: ', guess_slice
            # print 'shape of guess_slice: ', guess_slice.shape
            # DT-CODE: frame_display[y_indices, x_indices] = 255  # right way to modify the frame

            # guess_slice is a line that is vertical to the direction of the tail with centre as start_point with two ends as start and end
            # guess_slice is the line with each point coordinate as x & y represented by y_indices & x_indices
            # TASK DIFFICULT! the frame is transposed compared to what might be expected Question. how does this transposition work?
            # JULIE-Question. y and x always inverse?
            # Answer. IT IS NOT REVERSED! INNER DIMENSION CORRESOPOND TO X!

            # S-Question. what is the meaning of this if session?
            if guess_slice.ndim == 2:
                guess_slice = guess_slice[:, 0]
                # Question. why only keep the first row in guess_slice
            else:
                guess_slice = guess_slice[:]
                # enter else for the 1st frame in avi situation
            '''2-3.2 BASELINE SUBSTRACTION'''
            if fish < background:
                # fish is like the average grayscale/brightness of the fish image
                guess_slice = (background - guess_slice)
            else:
                guess_slice = (guess_slice - background)
                # Julie-Question. why do the substraction???

            slices += [guess_slice]  ######tag
            # Question. real meaning?
            # Note. Question. difficult. they create lot of 's' to collect variables, how are they going to be used?

            hist = np.histogram(guess_slice, 10)
            # numpy.histogram(a, bins=10, range=None, normed=False, weights=None, density=None)
            # range is naturally set

            #DT-CODE
            #print 'guess_slice, before: ', guess_slice
            #print 'hist: ', hist
            #print '(hist[1][hist[0].argmax()] <= guess_slice): ', hist[1][hist[0].argmax()] <= guess_slice
            #print 'guess_slice<hist[1][hist[0].argmax()+1]: ', guess_slice<hist[1][hist[0].argmax()+1]
            #print 'guess_slice[((hist[1][hist[0].argmax()] <= guess_slice)&(guess_slice<hist[1][hist[0].argmax()+1]))]: ', guess_slice[((hist[1][hist[0].argmax()] <= guess_slice)&(guess_slice<hist[1][hist[0].argmax()+1]))]

            #DT-CODE
            #plt.plot(guess_slice)
            #plt.ylabel('before')
            #plt.show()
            guess_slice = guess_slice - guess_slice[(
                (hist[1][hist[0].argmax()] <= guess_slice) &
                (guess_slice < hist[1][hist[0].argmax() + 1]))].mean()  #tag
            # QUESTION. WHAT IS THE MEANING OF THIS PROCESSING?
            # Note. CONFIRMED. Baseline substraction, build a histogram and substract the the mean value of smallest bin
            # Question. principles underlying this ?
            # A. baseline substraction. draw a histogram based on guess_slice and choose the interval with most items and get the mean of these items and substracted it
            # & means only keep the items that is both true in either formula
            # only the item corresponds to true could be selected out to remain
            # Question. <=, hist[1][index] is integer, can't be comparied with guess_slice, a  right????
            # A. return value is going to be a matrix containing boolean, same shape of guess_slice
            '''2-3.3 FILTER! SMOOTH THE GUESS_SLICE '''
            # Note. this seems to do a nice job of smoothing out while not moving edges too much
            sguess = scipy.ndimage.filters.percentile_filter(
                guess_slice, 50, 5)
            #QUESTION. does this has to be length of 51??1

            # WHAT DOES THIS sguess actually do?
            # QUESTION. WILL THIS NUMBER 50 OR 5 EFFECTS THE FITTING?
            # Task. DIFFICULT! JULIE-Question. I want to know the principle of this filter and exactly how this sguess is calculated. why you choose this one
            # Note. CONFIRMED. type of sguess is <type 'numpy.ndarray'>, with 51 items, this 'filter' is not constant, change every frame?
            # scipy.ndimage.filters.percentile_filter(input, percentile, size=None, footprint=None, output=None, mode='reflect', cval=0.0, origin=0)

            # DT-CODE
            # plt.plot(guess_slice)
            # plt.plot(sguess)
            # plt.ylabel('before')
            # plt.show()
            '''2-3.4, 1ST FRAME-1, DELINEATE ALL THE NEWPOINT'''
            if first_frame:
                # first time through, profile the tail

                # DT-CODE
                # print 'sguess>(sguess.max()*.25: ', sguess>(sguess.max()*.25)
                # print 'np.diff(sguess>(sguess.max()*.25)): ', np.diff(sguess>(sguess.max()*.25))
                # print 'np.where(np.diff(sguess>(sguess.max()*.25))): ', np.where(np.diff(sguess>(sguess.max()*.25)))
                '''2-3.4.1 DEFINE THE EDGE OF TAIL AND FIND THE MID-POINT'''
                tailedges_threshold = 0.45
                # the smaller the value, the tighter the tailfitting would be, less dots
                tailedges = np.where(
                    np.diff(sguess > (sguess.max() * tailedges_threshold)))[0]
                # [ADJUSTMENT] The original value is .25 ....
                # QUESTION. WHY SOMETIMES TAILEDGES COULD HAVE MULTIPLE COMPONENTS?
                # Note. Use 1/4 of max to define the edge of tail!
                # Note. returned tailedges is (array([17, 32])) for one frame, item correspond to the index in list
                # numpy.where(condition[, x, y]), Return elements, either from x or y, depending on condition.
                # numpy.diff(a, n=1, axis=-1), Calculate the n-th order discrete difference along given axis.
                # numpy.diff would return the value correspond to True, if the two items are Boolean and different

                if len(tailedges) >= 2:
                    # Question. Normally, we won't see this len(tailedges) bigger than 2, right?
                    # DT-CODE: print 'tailedges: ', tailedges
                    # DT-CODE: print 'tailedges-len(sguess)/2.0: ', tailedges-len(sguess)/2.0

                    tailedges = tailedges - len(sguess) / 2.0
                    # S-Question. why minus this one, len(sguess)/2.0?
                    # DT-CODE: print 'np.argsort(np.abs(tailedges)): ', np.argsort(np.abs(tailedges))
                    # DT-CODE: print 'tailedges[np.argsort(np.abs(tailedges))[0:2]]: ', tailedges[np.argsort(np.abs(tailedges))[0:2]]

                    tailindexes = tailedges[np.argsort(np.abs(tailedges))[0:2]]
                    # so this operation just sorted the tailedges, and only keep the biggest two items
                    # np.argsort, Returns the indices that would sort an array.
                    # DT-CODE: print '(tailindexes).mean()+len(sguess)/2.0: ', (tailindexes).mean()+len(sguess)/2.0

                    result_index_new = (tailindexes).mean() + len(sguess) / 2.0
                    # QUESTION. WHY ADD len(sguess)?
                    # Note. result_index_new is the mid point of tailedges
                    # Note. this complex calculation can be replaced with: result_index_new = tailedges.mean()

                    widths += [abs(tailindexes[0] - tailindexes[1])]
                    # Note. widths of the tail
                else:
                    result_index_new = None
                    tail_length = count
                    # Note. means the stop of guessing!
                    break
                '''2-3.4.2 CONVOLUTION & NEWPOINT'''
                results = np.convolve(normpdf, guess_slice, "valid")
                # Note. so normpdf here acts like a kernel to process the guess_slice?
                # Note. results looks like Gaussian after processing
                # 'valid', Mode 'valid' returns output of length max(M, N) - min(M, N)+1. The convolution product is only given for points where the signals overlap completely. Values outside the signal boundary have no effect.

                convolveresults += [results]
                ##              test+=[guess_slice.mean()]

                result_index = results.argmax(
                ) - results.size / 2 + guess_slice.size / 2
                #Q. why is result_index calculated this way?
                #A. yeap, result_index is the peak, here they want find the orginal position of the peak in guess_slice
                #Q. the result_index caculated for the first frame, is it going to be used?
                #A. I think it is going to be accessed in else part, in other frame.

                newpoint = np.array([
                    x_indices[int(result_index_new)],
                    y_indices[int(result_index_new)]
                ])  #DT-modification: add int() here
                # QUESTION. WHEN DOES THE NEWPOINT ACTUALLY GOES ONE STEP MORE???
                #!!! NOTES... newpoint is actually the final output of this tailfitting
                #QUESTION. what is the function for result_index/convolveresults/result?

                #####################################################################################################################
                # DT CODE: print newpoint, ' is the newpoint of ', count, 'count'
                # Note. CONFIRMED: each time only one newpoint is produced.
                # Question. what is the meaning of newpoint?
                # Answer. IT iS the new point growing along the axis?

            else:  ############task. temp omit#################### ##########must come back and figure this out!#############
                results = np.convolve(tailfuncs[count], guess_slice, "valid")
                result_index = results.argmax(
                ) - results.size / 2 + guess_slice.size / 2
                newpoint = np.array(
                    [x_indices[result_index], y_indices[result_index]])
            '''2-3.5, 1ST FRAME-2, FUNCTION UNKNOWN'''
            if first_frame:
                '''2-3.5.1 CHECK FITTING SESSION, BREAK IF NECCESSARY'''
                # task. don't really understand the principles of this session but... let it be
                if count > 10:
                    #@ SCALE FIT GOODNESS WITH CONTRAST
                    trapz = [
                        pylab.trapz(result - result.mean())
                        for result in convolveresults
                    ]  #tag
                    # Integrate along the given axis using the composite trapezoidal rule. Integrate y(x) along given axis.
                    # numpy.trapz(y, x=None, dx=1.0, axis=-1)
                    # x: array_like, optional. The sample points corresponding to the y values. If x is None, the sample points are assumed to be evenly spaced dx apart. The default is None.
                    # dx: scalar, optional. The spacing between sample points when x is None. The default is 1.
                    # Question. what is the real meaning of this...?
                    # Answer. trapz should be a list storing the 'area under curve' of convolveresults
                    # Question. why calculate sth like this?

                    slicesnp = np.vstack(slices)
                    # slices, a list storing guess_slice
                    # np.vstack, Stack arrays in sequence vertically (row wise). Just put the arrays together without changing the shape or any items.

                    if np.array(trapz[-3:]).mean() < .2:  #tag
                        # Question. what is the meaning of this array?
                        ##                        pdb.set_trace()
                        tail_length = count
                        break
                    elif slicesnp[-1, result_index - 2:result_index +
                                  2].mean() < 5:
                        # slicenp is a 2-d array, using -1 to access the last row, then using result_index-2:result+2 to access the corresponding part of array
                        ##                    elif -np.diff(sliding_average(slicesnp.mean(1)),4).min()<0:
                        ##                    elif np.diff(scipy.ndimage.filters.percentile_filter(trapz,50,4)).min()<-20:
                        ##                        print np.abs(np.diff(trapz))
                        ##                        pdb.set_trace()
                        tail_length = count
                        break
##            elif count > 1 and pylab.trapz(results-results.mean())<.3: #lower means higher contrast threshold
            elif count > tail_length * .8 and np.power(
                    newpoint - current, 2).sum()**.5 > tailpoint_spacing * 1.5:
                # Question. I mean what f**k does the above function means???
                # let's assume newpoint is the new growing point from the recognized tail?
                # semantically, current should be the current-point (start point when first time)
                # np.power, First array elements raised to powers from second array, element-wise.
                ##                print count, ' Point Distance Break', np.power(newpoint-current,2).sum()**.5
                break

            elif count == tail_length:
                # Question. difficult. when is tail_length defined?
                break  #should be end of the tail
#threshold changes with tail speed?
#also could try overfit, and seeing where the elbow is
            '''2-3.6 DRAW THE CIRCLES ALONG THE TAIL, UPDATE VECTORS AND THEN CURRENT'''
            if display:
                cv2.circle(frame_display, (int(newpoint[0]), int(newpoint[1])),
                           2, (0, 0, 0))  #tag
                # DT CODE: print 'newpoint: ', newpoint
                # Note. CONFIRMED: circle is drawed one by one, newpoint is simple list consists of two items
                # cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]), returns img
                # frame_display is defined by this: frame_display=frame.copy()
##                frame_display[y_indices,x_indices]=0

            frame_fit[count, :] = newpoint
            # frame_fit=np.zeros((max_points,2))
            # put the newpoint into the frame_fit array, a 2D array

            if count > 0:
                guess_vector = newpoint - current
            # Question. function of this if block?
            # A. guess_vector gives the direction of guess, current is old point

            current = newpoint  #####################################################################tag################################################
            # update the current with the newpoint

            #@ autoscale guess line width and then regen normpdf
##        trapz = [pylab.trapz(result-result.mean()) for result in convolveresults]
##        pylab.scatter(range(len(trapz)),trapz)
##        pylab.figure()
##        td = sliding_average(np.abs(np.diff(trapz)),5)
##
##        pylab.scatter(range(len(td)),td,c='r');pylab.show()
##        pylab.axhline()

##        pylab.plot(trapz)
####        pylab.plot(np.abs(np.diff(scipy.ndimage.filters.percentile_filter(trapz,50,4))))
##        pylab.plot(scipy.ndimage.filters.percentile_filter(trapz,50,4))
##        pylab.plot(test)
####        pylab.plot(slices
##        slices = np.vstack(slices)
##        pylab.show()
##
##        pylab.plot(sliding_average(slices.mean(1)));
##        pylab.plot(np.abs(np.diff(sliding_average(slices.mean(1),8))));
##        pylab.plot(-np.diff(sliding_average(slicesnp.mean(1)))[:45]);pylab.show()
##        pdb.set_trace()
        '''2-4. STRANGE SWIDTHS, FINALLY! JUMP OUT OF FOR-COUNT'''
        if first_frame:
            # first_frame just told the program if it is processing the first frame

            swidths = scipy.ndimage.filters.percentile_filter(
                widths, 50, 8)  #task...temporarily just keep it
            # Julie-Question. meaning of this? and width, pleasssssssssssssse
            # DT code

            swidths = np.lib.pad(swidths, [0, 5], mode='edge')  #tag bug
            # Note. Bug. IndexError: index -1 is out of bounds for axis 0 with size 0
            # np.lib.pad, choose the last item of swidths and add
            # Question. why pads the fish?
            # numpy.pad(array, pad_width, mode, **kwargs), Pads an array

            tailfuncs = [
                tail_func2(
                    np.arange((-guess_line_width + 1) / 4 + 1,
                              (guess_line_width - 1) / 4), 0, swidth, 1, 0)
                for swidth in swidths
            ]  #tag
            # Note. guess_line_width = 51
            # Note. def tail_func2(x, mu, sigma, scale, offset)
            # Question. so swidth is going to be sigma? why is that????
        '''2-5. APPEND FITTED_TAIL'''
        fitted_tail.append(np.copy(frame_fit[:count]))
        # DT-CODE: print 'fitted_tail looks like this: ', fitted_tail
        # NOTE. CONFIRMED. fitted_tail is the final result of the whole program.
        # NOTE. CONFIRMED. it is a list, storing arrays with the total number of total frame analyzed/read
        # NOTE. CONFIRMED. each correspond to one frame, storing x number of points (x=tail_lengh/count)
        # NOTE. CONFIRMED. points is the fitted result_point(mid point of tail edge), it is the coordinate in each frame
        # NOTE. CONFIRMED. count would be the final number of total circles.
        '''2-6. DISPLAY THE FRAME!'''
        if display:
            cv2.putText(frame_display, str(count), (340, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (225, 10, 20))
            cv2.putText(frame_display, str(len(fitted_tail) - 1), (15, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (25, 10, 20))
            #-1 because the current frame has already been appended
            cv2.imshow("frame_display", frame_display)
            # cv2.waitKey(0)  #DT-CODE: Manual control to analyze the frame one by one
            if first_frame:
                delaytime = 1
                # Question. unit ms?
            else:
                minlen = min(
                    [fitted_tail[-2].shape[0], fitted_tail[-1].shape[0]]) - 1
                delaytime = int(
                    min(
                        max((np.abs(
                            (fitted_tail[-2][minlen, :] -
                             fitted_tail[-1][minlen, :])**2).sum()**.5)**1.2 *
                            3 - 1, 1), 500))


##              # Question. why calculate delay time in a such trouble way?
            cv2.waitKey(delaytime)
        '''2-7. OUTPUT JPEG'''
        #task. temp omit
        if output_jpegs:
            if first_frame:
                jpegs_dir = pickdir()
                if not os.path.exists(jpegs_dir):
                    os.makedirs(jpegs_dir)
            jpg_out = Image.fromarray(frame_display)
            jpg_out.save(
                os.path.normpath(jpegs_dir + '\\' + str(len(fitted_tail) - 1) +
                                 '.jpg'))
        '''2-8. FALSE 1ST FRAME AND READ NEXT FRAME'''
        first_frame = False
        # cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,float(len(fitted_tail)) );  #workaround for raw videos crash, but massively (ie 5x) slower
        s, frame = cap.read()
        if s:  # Only process valid image frames
            frame = cv2.resize(
                frame, (0, 0),
                fx=scale,
                fy=scale,
                interpolation=cv2.INTER_CUBIC)  #resize-tag #resize the frame!

        # turn off the first_frame and update the frame with next frame in video
        # cv2.VideoCapture.read([image]) returns tuple (retval, image), so frame only takes the returned image
    print "Done in %.2f seconds" % (time.time() - starttime)
    '''3RD PART. WARNING SYSTEM'''
    'DT-NOTE FOLLOWING IS LIKE WARNING SYSTEM, TEMPORARILLY SKIP THEM'
    fit_lengths = np.array([len(i) for i in fitted_tail])  ########tag########
    if np.std(fit_lengths) > 3 or plotlengths:
        print 'Abnormal variances in tail length detected, check results: ', filename
        pylab.plot(range(0, len(fitted_tail)), fit_lengths)
        pylab.ylim((0, 5 + max(fit_lengths)))
        pylab.xlabel("Frame")
        pylab.ylabel('Tail points')
        pylab.title('Tail fit lengths')
        print 'Close graph to continue!'
        pylab.show()

    if any(fit_lengths < 25):
        print "Warning - short tail detected in some frames - min: ", min(
            fit_lengths)

    if len(fitted_tail) != int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)):
        print "Warning - number of frames processed doesn't match number of video frames - can happen with videos over 2gb!"
        print "Frames processed: ", len(fitted_tail)
        print "Actual frames according to video header: ", int(
            cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    '''4TH PART. DESTROYWINDOW AND RETURN!'''
    cv2.destroyAllWindows()
    return fitted_tail, start_point, direction, FPS, numframes, tailedges_threshold
Ejemplo n.º 56
0
#/usr/bin/env python

# python script to test cool normal-distribution thing in shogun
# this code is under the "All Ur Base R Belong To Us" license
# what that entails exactly i'm not sure...
# so if you figure it out let me know ok?
# --serialhex

# must have the Shogun toolbox (http://www.shogun-toolbox.org/)
# && Matplotlib (http://matplotlib.sourceforge.net/) installed to use
from shogun.Library import Math
import pylab


def fill_list(x):
    a = 0
    b = []
    while a < x:
        b.append(Math.normal_random())
        a += 1
    return b


# how many values do you want to test this with?
nums = fill_list(1000000)

# thanks to Sergey Lisitsyn for (most of) the following...
# i still need to wake up!
n, bins, patches = pylab.hist(nums, 100)
pylab.plot(bins, pylab.normpdf(bins, 0, 1), 'k--')
pylab.show()
Ejemplo n.º 57
0
    def estimate(self, guess=None, k=2):
        """

        :param list guess: a list to provide the initial guess. Order is mu1, sigma1,
            pi1, mu2, ...
        :param int k: number of models to be used.
        """
        #print("EM estimation")
        self.k = k
        # Initial guess of parameters and initializations
        if guess is None:
            # estimate the mu/sigma/pis from the data
            guess = self.get_guess()

        mu = np.array(guess[0::3])
        sig = np.array(guess[1::3])
        pi_ = np.array(guess[2::3])
        N_ = len(pi_)

        gamma = np.zeros((N_, int(self.size)))
        N_ = np.zeros(N_)
        p_new = guess

        # EM loop
        counter = 0
        converged = False

        self.mus = []

        while not converged:
            # Compute the responsibility func. and new parameters
            for k in range(0, self.k):
                # unstable if eslf.model.pdf is made of zeros

                #self.model.pdf(self.data, p_new,normalise=False).sum()!=0:
                gamma[k, :] = pi_[k] * pylab.normpdf(self.data, mu[k], sig[k])
                gamma[k, :] /= (self.model.pdf(self.data,
                                               p_new,
                                               normalise=False))
                """else:
                    gamma[k, :] = pi_[k]*pylab.normpdf(self.data, mu[k],
                        sig[k])/(self.model.pdf(self.data, p_new,
                            normalise=False)+1e-6)
                """
                N_[k] = gamma[k].sum()
                mu[k] = np.sum(gamma[k] * self.data) / N_[k]
                sig[k] = pylab.sqrt(
                    np.sum(gamma[k] * (self.data - mu[k])**2) / N_[k])
                pi_[k] = N_[k] / self.size

            self.results = {'x': p_new, 'nfev': counter, 'success': converged}

            p_new = []
            for this in range(self.k):
                p_new.extend([mu[this], sig[this], pi_[this]])

            #p_new = [(mu[x], sig[x], pi_[x]) for x in range(0, self.k)]
            #p_new = list(pylab.flatten(p_new))

            self.status = True
            try:
                assert abs(N_.sum() - self.size) / self.size < 1e-6
                assert abs(pi_.sum() - 1) < 1e-6
            except:
                print("issue arised at iteration %s" % counter)
                self.debug = {'N': N_, 'pis': pi_}
                self.status = False
                break

            self.mus.append(mu)

            # Convergence check
            counter += 1
            converged = counter >= self.max_iter

        self.gamma = gamma

        if self.status is True:
            self.results = {'x': p_new, 'nfev': counter, 'success': converged}

        self.results = AttrDict(**self.results)
        self.results.mus = self.results.x[0::3]
        self.results.sigmas = self.results.x[1::3]
        self.results.pis = self.results.x[2::3]

        log_likelihood = self.model.log_likelihood(self.results.x, self.data)
        self.results.AIC = criteria.AIC(log_likelihood, k, logL=True)

        self.results.log_likelihood = log_likelihood
        self.results.AIC = criteria.AIC(log_likelihood, self.k, logL=True)
        self.results.AICc = criteria.AICc(log_likelihood,
                                          self.k,
                                          self.data.size,
                                          logL=True)
        self.results.BIC = criteria.BIC(log_likelihood,
                                        self.k,
                                        self.data.size,
                                        logL=True)
        average[n] = np.mean(ra[((number_terms_averaged)*n):(number_terms_averaged*(n+1))])      
    return average
 
fig = plt.figure(1)
fig.suptitle('Roundabout Distribution - Trend towards a normal distribution', fontweight = 'bold')


average = get_average(5)

plt.text(75,0.45, "n=5", fontsize = 16)
mu = np.mean(average)
sigma = np.std(average)
print mu
print sigma
print math.sqrt((sigma**2)*1)

r = np.arange(np.min(average),np.max(average), 0.2  )
r_detailed = (np.arange(60,85,0.005))
y = P.normpdf(r_detailed, mu, sigma)

plt.hist(average, bins=r, normed = 1)


plt.plot(r_detailed, y, linewidth=5.0)

plt.axis([(mu-3.5*sigma),(mu+3.5*sigma),0,P.normpdf(mu, mu, sigma)*1.1])

plt.grid(True)

plt.show()
plt.close()

# generate an empty vector with rep entries
rep = 20
a = np.zeros(rep)

# write numbers into the vector
for i in range(0,rep):
	a[i] = i
	
# print vector
print(a)

# generate random numbers
mu = 0.0
sigma = 1.0
nsample = 10000
aux = np.random.normal(mu,sigma,nsample)
#aux = np.random.uniform(-1.0,1.0,nsample)

# plot histogram and a Gaussian on top
bins = 30
n,x,p = plt.hist(aux,bins,normed=1)
y = plt.normpdf(x,mu,sigma)
plt.plot(x,y,'g--',linewidth=1.5)

# make plot nice
plt.xlabel('random number $x$')
plt.ylabel('probability $p(x)$')

plt.show()