Esempio n. 1
0
def csnormalize(image,f=0.75):
    """Center and size-normalize an image."""
    bimage = 1*(image>mean([amax(image),amin(image)]))
    w,h = bimage.shape
    [xs,ys] = mgrid[0:w,0:h]
    s = sum(bimage)
    if s<1e-4: return image
    s = 1.0/s
    cx = sum(xs*bimage)*s
    cy = sum(ys*bimage)*s
    sxx = sum((xs-cx)**2*bimage)*s
    sxy = sum((xs-cx)*(ys-cy)*bimage)*s
    syy = sum((ys-cy)**2*bimage)*s
    w,v = eigh(array([[sxx,sxy],[sxy,syy]]))
    l = sqrt(amax(w))
    if l>0.01:
        scale = f*max(image.shape)/(4.0*l)
    else:
        scale = 1.0
    m = array([[1.0/scale,0],[0.0,1.0/scale]])
    w,h = image.shape
    c = array([cx,cy])
    d = c-dot(m,array([w/2,h/2]))
    image = interpolation.affine_transform(image,m,offset=d,order=1)
    return image
Esempio n. 2
0
def test1():
    vector_full = NP.array([1.0, 2.5, 2.8, 4.1, 5.1, 5.9, 6.9, 8.1])
    vector = vector_full[:-2]
    t =  NP.arange(vector.shape[0])
    showArray('t', t) 
    showArray('vector', vector)    
    
    mask = [True] * vector.shape[0]
    mask[2] = False
    print 'mask', len(mask), mask
   
    masked_vector = applyMask1D(vector, mask)
    masked_t = applyMask1D(vector, mask)
    trend = getTrend(t, vector)
    print trend
    for i in range(masked_t.shape[0]):
        v_pred = trend[0] + masked_t[i] * trend[1]
        print i, masked_vector[i], v_pred, v_pred - masked_vector[i]
    
    predicted = NP.array([trend[0] + i * trend[1] for i in range(masked_vector.shape[0])])
    corrected = NP.array([masked_vector[i] - predicted[i] for i in range(masked_vector.shape[0])])
    masked_s = NP.transpose(NP.vstack([masked_vector, predicted, corrected]))
    
    showArray('masked_t', masked_t) 
    showArray('masked_s', masked_s)    
    # the main axes is subplot(111) by default
    PL.plot(masked_t, masked_s)
    s_range = PL.amax(masked_s) - PL.amin(masked_s)
    axis([PL.amin(masked_t), PL.amax(masked_t), PL.amin(masked_s) - s_range*0.1, PL.amax(masked_s) + s_range*0.1 ])
    xlabel('time (days)')
    ylabel('downloads')
    title('Dowloads over time')
    show()
Esempio n. 3
0
def classifier_normalize(image,size=32):
    """Normalize characters for classification."""
    if amax(image)<1e-3: return zeros((size,size))
    cimage = array(image*1.0/amax(image),'f')
    cimage = isotropic_rescale(cimage,size)
    cimage = csnormalize(cimage)
    return cimage
Esempio n. 4
0
def csnormalize(image, f=0.75):
    """Center and size-normalize an image."""
    bimage = 1 * (image > mean([amax(image), amin(image)]))
    w, h = bimage.shape
    [xs, ys] = mgrid[0:w, 0:h]
    s = sum(bimage)
    if s < 1e-4: return image
    s = 1.0 / s
    cx = sum(xs * bimage) * s
    cy = sum(ys * bimage) * s
    sxx = sum((xs - cx)**2 * bimage) * s
    sxy = sum((xs - cx) * (ys - cy) * bimage) * s
    syy = sum((ys - cy)**2 * bimage) * s
    w, v = eigh(array([[sxx, sxy], [sxy, syy]]))
    l = sqrt(amax(w))
    if l > 0.01:
        scale = f * max(image.shape) / (4.0 * l)
    else:
        scale = 1.0
    m = array([[1.0 / scale, 0], [0.0, 1.0 / scale]])
    w, h = image.shape
    c = array([cx, cy])
    d = c - dot(m, array([w / 2, h / 2]))
    image = interpolation.affine_transform(image, m, offset=d, order=1)
    return image
Esempio n. 5
0
def classifier_normalize(image, size=32):
    """Normalize characters for classification."""
    if amax(image) < 1e-3: return zeros((size, size))
    cimage = array(image * 1.0 / amax(image), 'f')
    cimage = isotropic_rescale(cimage, size)
    cimage = csnormalize(cimage)
    return cimage
Esempio n. 6
0
def prepare_line(line, pad=16):
    line = line * 1.0 / amax(line)
    line = amax(line) - line
    line = line.T
    if pad > 0:
        w = line.shape[1]
        line = vstack([zeros((pad, w)), line, zeros((pad, w))])
    return line
Esempio n. 7
0
def translate_back0(outputs, threshold=0.25):
    ms = amax(outputs, axis=1)
    cs = argmax(outputs, axis=1)
    cs[ms < threshold * amax(outputs)] = 0
    result = []
    for i in range(1, len(cs)):
        if cs[i] != cs[i - 1]:
            if cs[i] != 0:
                result.append(cs[i])
    return result
Esempio n. 8
0
def fwhm_2gauss(x, y, dx=0.001):
	'''
	Finds the FWHM for the profile y(x), with accuracy dx=0.001
	Uses a 2-Gauss 1D fit.
	'''
	popt, pcov = curve_fit(gauss2, x, y);
	xx = pl.arange(pl.amin(x), pl.amax(x)+dx, dx);
	ym = gauss2(xx, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5])
	hm = pl.amax(ym/2.0);
	y_diff = pl.absolute(ym-hm);
	y_diff_sorted = pl.sort(y_diff);
	i1 = pl.where(y_diff==y_diff_sorted[0]);
	i2 = pl.where(y_diff==y_diff_sorted[1]);
	fwhm = pl.absolute(xx[i1]-xx[i2]);
	return hm, fwhm, xx, ym
def average_size(img, minimum_area=10, maximum_area=40):
    "计算页面中整个连接体的 平均的大小,可能是因为manga中的字体都差不多大"
    
    components = get_connected_components(img)
    
    
    sorted_components = sorted(components,key=area_bb)
    #sorted_components = sorted(components,key=lambda x:area_nz(x,binary))
    areas = zeros(img.shape)
    
    for component in sorted_components:
        #As the input components are sorted, we don't overwrite
        #a given area again (it will already have our max value)
        if amax(areas[component])>0: continue
        #take the sqrt of the area of the bounding box
        areas[component] = area_bb(component)**0.5
        #alternate implementation where we just use area of black pixels in cc
        #areas[component]=area_nz(component,binary)
    #we lastly take the median (middle value of sorted array) within the region of interest
    #region of interest is defaulted to those ccs between 3 and 100 pixels on a side (text sized)
    aoi = areas[(areas>minimum_area)&(areas<maximum_area)]
    if len(aoi)==0:
        return 0
    print 'np.median(aoi) = ',np.median(aoi)
    
    return np.median(aoi)
Esempio n. 10
0
def SpherePlot( theta, phi, data, 
                title='', xlabel='x', ylabel='y', zlabel='z', color='b',
                fig=py.figure(), ax=None, pos=111 ):
    """Plots data in terms of polar angles THETA and PHI.
    
    x must be a tuple of meshgrids:
    Polar angle, THETA, as first element of x, 
    azimuthal angle, PHI, as second element of x.
    Note: Sphere plots require higher resolution than mesh plots. 300 is good."""
    X = py.sin(theta)*py.cos(phi)*data
    Y = py.sin(theta)*py.sin(phi)*data
    Z = py.cos(theta)*data
    if ax is None:
        ax = fig.add_subplot( pos, projection='3d' )
        ax.ticklabel_format(style='sci',scilimits=(-1,2),axis='both')      #
        ax.xaxis.major.formatter._useMathText = True                       # 
        ax.yaxis.major.formatter._useMathText = True                       # 
        ax.zaxis.major.formatter._useMathText = True                       # Sets scientific notation
        ax.plot_surface( X, Y, Z, color=color )
        oldlim = 0
    else:
        ax.plot_surface( X, Y, Z, color=color )
        oldlim = ax.get_xlim()[1]
    lim = py.amax(data)
    lim = max(lim,oldlim)
    ax.set_xlim(-lim,lim)
    ax.set_ylim(-lim,lim)
    ax.set_zlim(-lim,lim)
    return fig, ax   
Esempio n. 11
0
    def update_xsec(self):

        # Generate coordinates at which to find image values
        n = 1000

        x0 = self.line.get_data()[0][0]
        x1 = self.line.get_data()[0][1]
        y0 = self.line.get_data()[1][0]
        y1 = self.line.get_data()[1][1]

        L = pl.sqrt((x1 - x0)**2 + (y1 - y0)**2)
        print "length: %0.2f" % L
        print "angle: %0.2f" % (L * bfp_scaling_factor / 2)

        xs, ys = GetLinePoints(n, x0, x1, y0, y1)

        L = L * bfp_scaling_factor

        xsec_xs = pl.linspace(-L / 2., L / 2, n)
        xsec_ys = map_coordinates(self.imdata, [ys, xs])

        # Set new cross section plot axes limits
        xmin = -L / 2.  #pl.amin(xsec_xs)
        xmax = L / 2.  #pl.amax(xsec_xs)
        ymin = 0
        ymax = pl.amax(self.imdata)
        xsec_bounds = [xmin, xmax, ymin, ymax]

        # Redraw cross section plot

        self.xsec_line.set_data(xsec_xs, xsec_ys)
        self.xsec_axes.axis(xsec_bounds)
        self.xsection.canvas.draw()

        self.DrawscatrLine(xmin=xmin)
Esempio n. 12
0
    def update_result(self):

        # Generate coordinates at which to find image values
        n = 10000

        x0 = self.line.get_data()[0][0]
        x1 = self.line.get_data()[0][1]
        y0 = self.line.get_data()[1][0]
        y1 = self.line.get_data()[1][1]

        L = pl.sqrt((x1 - x0)**2 + (y1 - y0)**2)

        xs, ys = GetLinePoints(n, x0, x1, y0, y1)

        L = L * bfp_scaling_factor

        res_xs = pl.linspace(-L / 2., L / 2, n)
        res_ys = map_coordinates(self.unNoise(), [ys, xs], order=1)

        # Set new cross section plot axes limits
        xmin = -L / 2.  #pl.amin(xsec_xs)
        xmax = L / 2.  #pl.amax(xsec_xs)
        ymin = 0
        ymax = pl.amax(self.imdata)
        res_bounds = [xmin, xmax, ymin, ymax]

        # Redraw cross section plot

        self.result_line.set_data(res_xs, res_ys)
        self.result_axes.axis(res_bounds)
        self.result_fig.canvas.draw()
Esempio n. 13
0
def calc_max_field_one(d, subkeys):
	
	# Load fields
	fnbase = jfdfdUtil.generate_filename(d, subkeys=subkeys)
	sname = 'data/raw/max - ' + fnbase + '.dat'
	
	#if os.path.exists(os.path.join(os.getcwd(), sname)):
		#absval = numpy.loadtxt('data/raw/max - ' + fnbase + '.dat')
		#if absval[0]!=0 and absval[1]!=0:
			#print 'Analyzed data already exists - ', fnbase
			#return
			
	tot_name = 'fields/tot-' + fnbase + '.h5'
	print fnbase
	if os.path.exists(os.path.join(os.getcwd(), tot_name)):
		
		d = putil.LoadFields(tot_name)
		max = pylab.amax(abs(d['Fz']))
		
	else:
		print 'Simulation files do not exist!'
		max = 0
	
	# Save data
	xsections = pylab.zeros(1, 'd')
	xsections[0] = max
	
	print "Max Field - ", max
	
	numpy.savetxt(sname, xsections)
	
	return max
Esempio n. 14
0
	def __init__(self, X, c):
		self.n, self.N = X.shape
		self.X = X
		self.c = c

		# Calculate max value of the 2d array
		self.max = amax(X)
Esempio n. 15
0
def findFWHM(vector, maxPos=None, amplitude=None):
    """ 
    Find FWHM of vector peak (width at value at maxPos - amplitude /2).
    If maxPos is None, will find maximum in vector.
    If amplitude is None, will calculate amplitude from maximum to minimum of vector.
    """
    if maxPos == None:
        maxPos = vector.argmax()
    if amplitude == None:
        maxVal = pl.amax(vector)
        minVal = pl.amin(vector)
        amplitude = float(maxVal - minVal)

    maxSign = pl.sign(vector[maxPos])
    for pos, val in enumerate(vector[maxPos:]):
        if pl.sign(val) != maxSign:
            # we passed 0
            break
    halfAbove = pos - abs(vector[maxPos + pos]) / abs(vector[maxPos + pos] -
                                                      vector[maxPos + pos - 1])

    for pos, val in enumerate(vector[maxPos:0:-1]):
        if pl.sign(val) != maxSign:
            # we passed 0
            break
    halfBelow = pos - abs(vector[maxPos - pos]) / abs(vector[maxPos - pos] -
                                                      vector[maxPos - pos + 1])

    FWHM = halfBelow + halfAbove

    return FWHM, maxPos, amplitude
Esempio n. 16
0
def plot_tuneshifts_2(ax, spectrum, scan_values, Qs=1, fitrange=None, fittype=None):

    (spectral_lines, spectral_intensity) = spectrum

    # Normalize power.
    normalized_intensity = spectral_intensity / plt.amax(spectral_intensity)

    # Prepare plot environment.
    palette    = _create_cropped_cmap()

    x_grid = plt.ones(spectral_lines.shape) * plt.array(scan_values, dtype='float64')
    for file_i in xrange(len(scan_values)):
        x, y, z = x_grid[:,file_i], spectral_lines[:,file_i], normalized_intensity[:,file_i]
        tuneshift_plot = ax[0].scatter(x, y, s=192*plt.log(1+z), c=z, cmap=palette, edgecolors='None')

    # Colorbar
    cb = plt.colorbar(tuneshift_plot, ax[1], orientation='vertical')
    cb.set_label('Power [normalised]')

    if fitrange:
        if not fittype or fittype=='full':
            x, y, z, p = fit_modes_full(spectral_lines, spectral_intensity, scan_values, fitrange)
        elif fittype=="0":
            x, y, z, p = fit_modes_0(spectral_lines, spectral_intensity, scan_values, fitrange)
        else:
            raise ValueError("Wrong argument "+fittype+"! Use \"0\" or \"full\"")

        ax[0].plot(x, y, 'o', ms=8, mfc='none', mew=2, mec='limegreen')
        ax[0].plot(scan_values, z, '-', lw=2, color='limegreen')

        ax[0].text(0.95, 0.95, '$\Delta Q \sim $ {:1.2e}'.format(p[0]*1e11*Qs[0]), fontsize=36, color='w', horizontalalignment='right', verticalalignment='top', transform=ax[0].transAxes)
    def addDataVectorAccessor(self, data_vector_accessor):
        self.__data_vectors_accessors__.append(data_vector_accessor)

        _sum = pl.sum(data_vector_accessor.signal)
        _min = pl.amin(data_vector_accessor.signal)
        _max = pl.amax(data_vector_accessor.signal)

        if self.__minimal_signal__ == None:
            self.__minimal_signal__ = _sum
            self.__minimal_data_vector_accessor__ = data_vector_accessor

            self.__min_signal__ = _min
            self.__max_signal__ = _max

        if _sum < self.__minimal_signal__:
            self.__minimal_data_vector_accessor__ = data_vector_accessor
            self.__minimal_signal__ = _sum

        if _min < self.__min_signal__:
            self.__min_signal__ = _min

        if _max > self.__max_signal__:
            self.__max_signal__ = _max

        #collects unique annotations (>0) as a set
        if not data_vector_accessor.annotation == None:
            unique_annotations = pl.unique(data_vector_accessor.annotation[
                                pl.where(data_vector_accessor.annotation > 0)])
            if len(unique_annotations) > 0:
                #union of sets
                self.__unique_annotations__ |= set(unique_annotations)
Esempio n. 18
0
def fwhm(x, y):
	hm = pl.amax(y/2.0);
	y_diff = pl.absolute(y-hm);
	y_diff_sorted = pl.sort(y_diff);
	i1 = pl.where(y_diff==y_diff_sorted[0]);
	i2 = pl.where(y_diff==y_diff_sorted[1]);
	fwhm = pl.absolute(x[i1]-x[i2]);
	return hm, fwhm
Esempio n. 19
0
def _calculate_spectra_sussix(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape

    # Allocate memory for output.        
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    # Initialise Sussix object.
    SX = PySussix.Sussix()
    
    x, xp, y, yp = sx.real, sx.imag, sy.real, sy.imag
    for file_i in xrange(n_files):
        SX.sussix_inp(nt1=1, nt2=n_turns, idam=2, ir=0, tunex=Q_x[file_i] % 1, tuney=Q_y[file_i] % 1)
        SX.sussix(x[:,file_i], xp[:,file_i], y[:,file_i], yp[:,file_i], sx[:,file_i], sx[:,file_i])

        # Amplitude normalisation
        SX.ax /= plt.amax(SX.ax)
        SX.ay /= plt.amax(SX.ay)

        # Tunes
        SX.ox = plt.absolute(SX.ox)
        SX.oy = plt.absolute(SX.oy)
        if file_i==0:
            tunexsx = SX.ox[plt.argmax(SX.ax)]
            tuneysx = SX.oy[plt.argmax(SX.ay)]
            print "\n*** Tunes from Sussix"
            print "    tunex", tunexsx, ", tuney", tuneysx, "\n"

        # Tune normalisation
        SX.ox = (SX.ox - (Q_x[file_i] % 1)) / Q_s[file_i]
        SX.oy = (SX.oy - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([SX.ox, SX.ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([SX.oy, SX.ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox, CX.ax, CY.oy, CY.ay
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
Esempio n. 20
0
def translate_back(outputs,threshold=0.7,pos=0):
    """Translate back. Thresholds on class 0, then assigns
    the maximum class to each region."""
    # print outputs
    labels,n = measurements.label(outputs[:,0]<threshold)
    mask = tile(labels.reshape(-1,1), (1,outputs.shape[1]))
    maxima = measurements.maximum_position(outputs,mask,arange(1,amax(mask)+1))
    if pos: return maxima
    return [c for (r,c) in maxima]
Esempio n. 21
0
def remove_noise(line,minsize=8):
    """Remove small pixels from an image."""
    if minsize==0: return line
    bin = (line>0.5*amax(line))
    labels,n = morph.label(bin)
    sums = measurements.sum(bin,labels,range(n+1))
    sums = sums[labels]
    good = minimum(bin,1-(sums>0)*(sums<minsize))
    return good
Esempio n. 22
0
def translate_back(outputs,threshold=0.7,pos=0):
    """Translate back. Thresholds on class 0, then assigns
    the maximum class to each region."""
    # print outputs
    labels,n = measurements.label(outputs[:,0]<threshold)
    mask = tile(labels.reshape(-1,1), (1,outputs.shape[1]))
    maxima = measurements.maximum_position(outputs,mask,arange(1,amax(mask)+1))
    if pos: return maxima
    return [c for (r,c) in maxima]
Esempio n. 23
0
def findHighOD(data):
    '''Find the highest OD value to set for plots'''
    hi = 0
    for c, wDict in data.items():
        for w, curve in wDict.items():
            max = py.amax(curve)
            if max > hi:
                hi = max
    return hi
Esempio n. 24
0
def remove_noise(line, minsize=8):
    """Remove small pixels from an image."""
    if minsize == 0: return line
    bin = (line > 0.5 * amax(line))
    labels, n = morph.label(bin)
    sums = measurements.sum(bin, labels, range(n + 1))
    sums = sums[labels]
    good = minimum(bin, 1 - (sums > 0) * (sums < minsize))
    return good
Esempio n. 25
0
def LoadFITS(fname):
    hdulist = pyfits.open(fname, ignore_missing_end=True)
    image = hdulist[0].data

    print 'Loading -', fname
    print 'Image Dimensions:', pl.shape(image)
    print 'Maximum Counts:', pl.amax(image)
    print ''

    return image
Esempio n. 26
0
def QuickHull(points):
    """Randomized divide and conquer convex hull.
    
    Args:
        points: NxD matrix of points in dimension D.
    """
    N, D = points.shape
    dim = random.randint(0, D - 1)
    min_dim = p.amin(points.T, dim)
    max_dim = p.amax(points.T, dim)
Esempio n. 27
0
def QuickHull(points):
    """Randomized divide and conquer convex hull.
    
    Args:
        points: NxD matrix of points in dimension D.
    """
    N, D = points.shape
    dim = random.randint(0, D-1)
    min_dim = p.amin(points.T, dim)
    max_dim = p.amax(points.T, dim)
Esempio n. 28
0
def plot_risetimes(a, b, **kwargs):

    # plt.ion()
    # if kwargs is not None:
    #     for key, value in kwargs.iteritems():
    #         if key == 'file_list':
    #             file_list = value
    #         if key == 'scan_line':
    #             scan_line = value
    # varray = plt.array(get_value_from_cfg(file_list, scan_line))

    n_files = a.shape[-1]
    cmap = plt.get_cmap('jet')
    c = [cmap(i) for i in plt.linspace(0, 1, n_files)]

    fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
    [ax.set_color_cycle(c) for ax in (ax1, ax2)]

    r = []
    for i in xrange(n_files):
        x, y = a[:,i], b[:,i]
        # xo, yo = x, y #, get_envelope(x, y)
        xo, yo = get_envelope(x, y)
        p = plt.polyfit(xo, np.log(yo), 1)

        # Right way to fit... a la Nicolas - the fit expert!
        l = ax1.plot(x, plt.log(plt.absolute(y)))
        lcolor = l[-1].get_color()
        ax1.plot(xo, plt.log(yo), color=lcolor, marker='o', mec=None)
        ax1.plot(x, p[1] + x * p[0], color=lcolor, ls='--', lw=3)

        l = ax2.plot(x, y)
        lcolor = l[-1].get_color()
        ax2.plot(xo, yo, 'o', color=lcolor)
        xi = plt.linspace(plt.amin(x), plt.amax(x))
        yi = plt.exp(p[1] + p[0] * xi)
        ax2.plot(xi, yi, color=lcolor, ls='--', lw=3)

        print p[1], p[0], 1 / p[0]
        # plt.draw()
        # ax1.cla()
        # ax2.cla()

        r.append(1/p[0])

    ax2.set_ylim(0, 1000)
    plt.figure(2)
    plt.plot(r, lw=3, c='purple')
    # plt.gca().set_ylim(0, 10000)

    # ax3 = plt.subplot(111)
    # ax3.semilogy(x, y)
    # ax3.semilogy(xo, yo)

    return r
Esempio n. 29
0
def translate_back(outputs, threshold=0.7, pos=0):
    labels, n = measurements.label(outputs[:, 0] < threshold)
    mask = tile(labels.reshape(-1, 1), (1, outputs.shape[1]))
    maxima = measurements.maximum_position(outputs, mask,
                                           arange(1,
                                                  amax(mask) + 1))
    if pos == 1:
        return maxima
    if pos == 2:
        return [(c, outputs[r, c]) for (r, c) in maxima]
    return [c for (r, c) in maxima]
def mean_height(img, minimum=3, maximum=100):
  components = get_connected_components(img)
  sorted_components = sorted(components,key=area_bb)
  heights = zeros(img.shape)
  for c in sorted_components:
    if amax(heights[c])>0: continue
    heights[c]=height_bb(c)
  aoi = heights[(heights>minimum)&(heights<maximum)]
  if len(aoi)>0:
    return np.mean(aoi)
  return 0 
Esempio n. 31
0
def mean_height(img, minimum=3, maximum=100):
  components = get_connected_components(img)
  sorted_components = sorted(components,key=area_bb)
  heights = zeros(img.shape)
  for c in sorted_components:
    if amax(heights[c])>0: continue
    heights[c]=height_bb(c)
  aoi = heights[(heights>minimum)&(heights<maximum)]
  if len(aoi)>0:
    return np.mean(aoi)
  return 0 
Esempio n. 32
0
def mean_width(img, minimum=3, maximum=100):
  components = get_connected_components(img)
  sorted_components = sorted(components,key=area_bb)
  widths = zeros(img.shape)
  for c in sorted_components:
    if amax(widths[c])>0: continue
    widths[c]=width_bb(c)
  aoi = widths[(widths>minimum)&(widths<maximum)]
  if len(aoi)>0:
    return np.mean(aoi)
  return 0 
def mean_width(img, minimum=3, maximum=100):
  components = get_connected_components(img)
  sorted_components = sorted(components,key=area_bb)
  widths = zeros(img.shape)
  for c in sorted_components:
    if amax(widths[c])>0: continue
    widths[c]=width_bb(c)
  aoi = widths[(widths>minimum)&(widths<maximum)]
  if len(aoi)>0:
    return np.mean(aoi)
  return 0 
Esempio n. 34
0
def _calculate_spectra_fft(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape
        
    # Allocate memory for output.
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    for file_i in xrange(n_files):
        t = plt.linspace(0, 1, n_turns)
        ax = plt.absolute(plt.fft(sx[:, file_i]))
        ay = plt.absolute(plt.fft(sy[:, file_i]))

        # Amplitude normalisation
        ax /= plt.amax(ax, axis=0)
        ay /= plt.amax(ay, axis=0)
    
        # Tunes
        if file_i==0:
            tunexfft = t[plt.argmax(ax[:n_turns/2], axis=0)]
            tuneyfft = t[plt.argmax(ay[:n_turns/2], axis=0)]
            print "\n*** Tunes from FFT"
            print "    tunex:", tunexfft, ", tuney:", tuneyfft, "\n"

        # Tune normalisation
        ox = (t - (Q_x[file_i] % 1)) / Q_s[file_i]
        oy = (t - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([ox, ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([oy, ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox[-n_lines:], CX.ax[-n_lines:], CY.oy[-n_lines:], CY.ay[-n_lines:]
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
Esempio n. 35
0
def test2():
    number_samples = 300
    days_to_keep = [2,3,4,5,6]
    vector_full = NP.array([2.0 + i * 10.0/number_samples  + random.uniform(-.5, .5) for i in range(number_samples)])
    mask_full = getDaysOfWeekMask(days_to_keep, vector_full.shape[0])

    vector = vector_full[:int(vector_full.shape[0]*0.8)]
    t = NP.arange(vector.shape[0])
    showArray('t', t) 
    showArray('vector', vector)    
    
    mask = getDaysOfWeekMask(days_to_keep, vector.shape[0])
    print 'mask', len(mask), mask
   
    masked_t = applyMask1D(t, mask)
    masked_vector = applyMask1D(vector, mask)
    showArray('masked_t', masked_t) 
    showArray('masked_vector', masked_vector) 
    
    trend = getTrend(t, vector)
    print trend
    for i in range(masked_t.shape[0]):
        v_pred = trend[0] + masked_t[i] * trend[1]
        print masked_t[i], masked_vector[i], v_pred, v_pred - masked_vector[i]
    
    predicted = NP.array([trend[0] + masked_t[i] * trend[1] for i in range(masked_vector.shape[0])])
    corrected = NP.array([masked_vector[i] - predicted[i] for i in range(masked_vector.shape[0])])
    masked_s = NP.transpose(NP.vstack([masked_vector, predicted, corrected]))
    
    showArray('masked_t', masked_t) 
    showArray('masked_s', masked_s)    
    # the main axes is subplot(111) by default
    PL.plot(masked_t, masked_s)
    s_range = PL.amax(masked_s) - PL.amin(masked_s)
    PL.axis([PL.amin(masked_t), PL.amax(masked_t), PL.amin(masked_s) - s_range*0.1, PL.amax(masked_s) + s_range*0.1 ])
    PL.xlabel('Time (days)')
    PL.ylabel('Downloads')
    PL.title('Dowlnoads over time')
    PL.show()   
def main():
    Ns = py.array([20, 40, 80, 160, 320])
    infos = []
    max_error = []
    for name in ["jaccon", "gscon"]:
        infos.append([])
        max_error.append([])
        for N in Ns:
            filename = "DATA/%s_N%04d.dat" % (name, N)
            info, data = read_data(filename, 9)
            dx = 2 / (N - 1)
            x = py.arange(N) * dx - 1
            x_grid, y_grid = py.meshgrid(x, x)
            x_grid = x_grid.transpose()
            y_grid = y_grid.transpose()
            ref_data = py.sin(py.pi * x_grid) * py.sin(py.pi * y_grid)

            max_error[-1].append(py.amax(py.amax(abs(data - ref_data))))
            infos[-1].append(info)
    max_error = py.array(max_error)

    fig, ax = py.subplots(1, 1, figsize=(6, 3))
    # Fitting
    for ii, err in enumerate(max_error):
        a, b = py.polyfit(py.log(Ns), py.log(err), 1)
        infos[ii][0]["a"] = a
        ax.loglog(Ns,
                  err,
                  '.-',
                  label=infos[ii][0]["solver_type"] +
                  " (Fit $\propto N^{%2.2f}$)" % a)
        #ax.loglog(Ns,py.log(b)*Ns**a,label=r"Fit: Err $\propto N^{%2.2f}$"%a)
    ax.grid('on')
    ax.set_xlabel("N (grid size)")
    ax.set_ylabel("max|$u_{solver}-u_{ref}$| (max error)")
    ax.legend()
    py.tight_layout()
    fig.savefig(fig_path + "con.png")
    py.show()
Esempio n. 37
0
def translate_back0(outputs,threshold=0.7):
    """Simple code for translating output from a classifier
    back into a list of classes. TODO/ATTENTION: this can
    probably be improved."""
    ms = amax(outputs,axis=1)
    cs = argmax(outputs,axis=1)
    cs[ms<threshold] = 0
    result = []
    for i in range(1,len(cs)):
        if cs[i]!=cs[i-1]:
            if cs[i]!=0:
                result.append(cs[i])
    return result
Esempio n. 38
0
def translate_back0(outputs, threshold=0.7):
    """Simple code for translating output from a classifier
    back into a list of classes. TODO/ATTENTION: this can
    probably be improved."""
    ms = amax(outputs, axis=1)
    cs = argmax(outputs, axis=1)
    cs[ms < threshold] = 0
    result = []
    for i in range(1, len(cs)):
        if cs[i] != cs[i - 1]:
            if cs[i] != 0:
                result.append(cs[i])
    return result
Esempio n. 39
0
 def isCorrectSignalRange(self, _signal):
     _min = pl.amin(_signal)
     if _min >= self.__filter__.min_value and \
         _min <= self.__filter__.max_value:
         return True
     _max = pl.amax(_signal)
     if _max >= self.__filter__.min_value and \
         _max <= self.__filter__.max_value:
         return True
     if _min <= self.__filter__.min_value and \
         _max >= self.__filter__.max_value:
         return True
     InformationWindow(message="Signal data out of range !")
     return False
Esempio n. 40
0
 def isCorrectSignalRange(self, _signal):
     _min = pl.amin(_signal)
     if _min >= self.__filter__.min_value and \
         _min <= self.__filter__.max_value:
         return True
     _max = pl.amax(_signal)
     if _max >= self.__filter__.min_value and \
         _max <= self.__filter__.max_value:
         return True
     if _min <= self.__filter__.min_value and \
         _max >= self.__filter__.max_value:
         return True
     InformationWindow(message="Signal data out of range !")
     return False
Esempio n. 41
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0) / sqrt(
                    len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0) - amin(
                    data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0) - mean(
                    data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
Esempio n. 42
0
def leftover_phc_single(ds, attr="p_filt_value_phc", feature="CuKAlpha", ax=None):
    cal = ds.calibration[attr]
    pulse_timing.choose_laser_dataset(ds, "not_laser")
    if ax is None:
        plt.figure()
        ax = plt.gca()
    ax.plot(ds.p_promptness[ds.cuts.good()], getattr(ds, attr)[ds.cuts.good()],'.')
    # ax.set_xlabel("promptness")
    ax.set_ylabel(attr)
    ax.set_title("chan %d %s"%(ds.channum, feature))
    ax.set_ylim(np.array([.995, 1.005])*cal.name2ph(feature))
    index = np.logical_and(getattr(ds, attr)[ds.cuts.good()]>ax.get_ylim()[0], getattr(ds, attr)[ds.cuts.good()]<ax.get_ylim()[1])
    xmin = plt.amin(ds.p_promptness[ds.cuts.good()][index])
    xmax = plt.amax(ds.p_promptness[ds.cuts.good()][index])
    ax.set_xlim(xmin, xmax)
Esempio n. 43
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0)/ sqrt(len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0)-amin(data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0)-mean(data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
    def __initiate_movie__(self):
        FFMpegWriter = manimation.writers["ffmpeg"]
        metadata = dict(title="Poincare plot movie", artist="HRV", comment="Movie support!")
        self.writer = FFMpegWriter(fps=self.movie_parameters.fps, metadata=metadata)

        self.fig = plt.figure()
        # l, = plt.plot([], [], 'k-o')
        self.movie_plot, = plt.plot([], [], "bo")

        margin = 50
        signal = self.data_vector_accessor_list[0].signal
        _max = pl.amax(signal)
        _min = pl.amin(signal)
        plt.xlim(_min - margin, _max + margin)
        plt.ylim(_min - margin, _max + margin)
        movie_filename = "/tmp/movie.mp4"
        return self.writer.saving(self.fig, movie_filename, 150)
Esempio n. 45
0
def extract_centered_scaled_barred(image,shape,center,scale,bar=None):
    """Extracts a patch of size `shape` centered on `center`
    and scaled by `scale`. Optionally adds a "bar" to the left side
    of the image, usually used to indicate the baseline and x-line
    of a text line."""
    scale = 1.0/scale
    rshape = scale*array(shape)
    center = array(center)-rshape/2.0
    result = interpolation.affine_transform(1.0*image,diag([scale,scale]),offset=center,
                                            output_shape=shape,order=1)
    if bar is not None:
        bar = array(bar,'f')
        bar -= center[0]
        bar /= scale
        result[int(bar[0]):int(bar[1]),0] = amax(result)

    return result
Esempio n. 46
0
def spectrum(wav_file,mi,mx,har,start,end,posX,posY,layer,origin,gap=0,arrange="",radius=30,sinheight=6.1):
    spect = []
    frame_rate, snd = wavfile.read(wav_file)
    sound_info = snd[:,0]
    spectrum, freqs, t, im = plt.specgram(sound_info,NFFT=1024,Fs=frame_rate,noverlap=5,mode='magnitude')
    n = 0
    rotation = 6.2831
    sinpos = {}
    cirpos = {}
    if arrange is "sinus":
        sinpos = sinus(har,radius,sinheight)
        for i in range(har):
            cirpos[i] = 0
    elif arrange is "circle":
        gap = 0
        sinpos, cirpos = circle(har,radius)
        rotation /= har
    else:
        for i in range(har):
            sinpos[i] = 0
        for i in range(har):
            cirpos[i] = 0
    maximum = plt.amax(spectrum)
    minimum = plt.amin(spectrum)
    position = 0
    while n < har:
        lastval = ((spectrum[n][0]-minimum)/(maximum - minimum))*(mx-mi)+mi
        lastval = math.ceil(lastval*1000)/1000
        lasttime = int(round(t[0]*1000))
        spect.append(osbject("bar.png",layer,origin,posX+position*gap+int(round(float(cirpos[n]))),posY+int(round(float(sinpos[n])))))
        position += 1
        if arrange is "circle":
            spect[n].rotate(0,start,start,math.ceil((1.5707+n*rotation)*1000)/1000,math.ceil((1.5707+n*rotation)*1000)/1000)
        for index,power in enumerate(spectrum[n]):
            power = ((power-minimum)/(maximum - minimum))*(mx-mi)+mi
            power = math.ceil(power*1000)/1000
            if power == lastval or int(round(t[index]*1000)) < start or int(round(t[index]*1000)) > end or index % 2 is not 0:
                lasttime = int(round(t[index]*1000))
                continue
            else:
                spect[n].vecscale(0,lasttime,int(round(t[index]*1000)),1,lastval,1,power)
                lastval = power
                lasttime = int(round(t[index]*1000))
        n += 1
    return spect
Esempio n. 47
0
def spectrum(wav_file,mi,mx,har,start,end,posX,posY,layer,origin,gap=0,arrange="",radius=30,sinheight=6.1):
    spect = []
    frame_rate, snd = wavfile.read(wav_file)
    sound_info = snd[:,0]
    spectrum, freqs, t, im = plt.specgram(sound_info,NFFT=1024,Fs=frame_rate,noverlap=5,mode='magnitude')
    n = 0
    rotation = 6.2831
    sinpos = {}
    cirpos = {}
    if arrange is "sinus":
        sinpos = sinus(har,radius,sinheight)
        for i in range(har):
            cirpos[i] = 0
    elif arrange is "circle":
        gap = 0
        sinpos, cirpos = circle(har,radius)
        rotation /= har
    else:
        for i in range(har):
            sinpos[i] = 0
        for i in range(har):
            cirpos[i] = 0
    maximum = plt.amax(spectrum)
    minimum = plt.amin(spectrum)
    position = 0
    while n < har:
        lastval = ((spectrum[n][0]-minimum)/(maximum - minimum))*(mx-mi)+mi
        lastval = math.ceil(lastval*1000)/1000
        lasttime = int(round(t[0]*1000))
        spect.append(osbject("bar.png",layer,origin,posX+position*gap+int(round(float(cirpos[n]))),posY+int(round(float(sinpos[n])))))
        position += 1
        if arrange is "circle":
            spect[n].rotate(0,start,start,math.ceil((1.5707+n*rotation)*1000)/1000,math.ceil((1.5707+n*rotation)*1000)/1000)
        for index,power in enumerate(spectrum[n]):
            power = ((power-minimum)/(maximum - minimum))*(mx-mi)+mi
            power = math.ceil(power*1000)/1000
            if power == lastval or int(round(t[index]*1000)) < start or int(round(t[index]*1000)) > end or index % 2 is not 0:
                lasttime = int(round(t[index]*1000))
                continue
            else:
                spect[n].vecscale(0,lasttime,int(round(t[index]*1000)),1,lastval,1,power)
                lastval = power
                lasttime = int(round(t[index]*1000))
        n += 1
    return spect
Esempio n. 48
0
def ctc_align_targets(outputs,
                      targets,
                      threshold=100.0,
                      verbose=0,
                      debug=0,
                      lo=1e-5):
    outputs = maximum(lo, outputs)
    outputs = outputs * 1.0 / sum(outputs, axis=1)[:, newaxis]
    match = dot(outputs, targets.T)
    lmatch = log(match)
    assert not isnan(lmatch).any()
    both = forwardbackward(lmatch)
    epath = exp(both - amax(both))
    l = sum(epath, axis=0)[newaxis, :]
    epath /= where(l == 0.0, 1e-9, l)
    aligned = maximum(lo, dot(epath, targets))
    l = sum(aligned, axis=1)[:, newaxis]
    aligned /= where(l == 0.0, 1e-9, l)
    return aligned
Esempio n. 49
0
def average_size(img, minimum_area=3, maximum_area=100):
  components = get_connected_components(img)
  sorted_components = sorted(components,key=area_bb)
  #sorted_components = sorted(components,key=lambda x:area_nz(x,binary))
  areas = zeros(img.shape)
  for component in sorted_components:
    #As the input components are sorted, we don't overwrite
    #a given area again (it will already have our max value)
    if amax(areas[component])>0: continue
    #take the sqrt of the area of the bounding box
    areas[component] = area_bb(component)**0.5
    #alternate implementation where we just use area of black pixels in cc
    #areas[component]=area_nz(component,binary)
  #we lastly take the median (middle value of sorted array) within the region of interest
  #region of interest is defaulted to those ccs between 3 and 100 pixels on a side (text sized)
  aoi = areas[(areas>minimum_area)&(areas<maximum_area)]
  if len(aoi)==0:
    return 0
  return np.median(aoi)
Esempio n. 50
0
def center_maxsize(image,r):
    """Center the image and fit it into an r x r output image.
    If the input is larger in any dimension than r, it is
    scaled down."""
    from pylab import amin,amax,array,zeros
    assert amin(image)>=0 and amax(image)<=1
    image = array(image,'f')
    w,h = image.shape
    s = max(w,h)
    # zoom down, but don't zoom up
    if s>r:
        image = interpolation.zoom(image,(r+0.5)/float(s))
        image[image<0] = 0
        image[image>1] = 1
        w,h = image.shape
    output = zeros((r,r),image.dtype)
    dx = (r-w)/2
    dy = (r-h)/2
    output[dx:dx+w,dy:dy+h] = image
    return output
Esempio n. 51
0
    def __initiate_movie__(self):
        FFMpegWriter = manimation.writers['ffmpeg']
        metadata = dict(title='Poincare plot movie',
                        artist='HRV',
                        comment='Movie support!')
        self.writer = FFMpegWriter(fps=self.movie_parameters.fps,
                                   metadata=metadata)

        self.fig = plt.figure()
        #l, = plt.plot([], [], 'k-o')
        self.movie_plot, = plt.plot([], [], 'bo')

        margin = 50
        signal = self.data_vector_accessor_list[0].signal
        _max = pl.amax(signal)
        _min = pl.amin(signal)
        plt.xlim(_min - margin, _max + margin)
        plt.ylim(_min - margin, _max + margin)
        movie_filename = '/tmp/movie.mp4'
        return self.writer.saving(self.fig, movie_filename, 150)
Esempio n. 52
0
def extract_centered_scaled_barred(image, shape, center, scale, bar=None):
    """Extracts a patch of size `shape` centered on `center`
    and scaled by `scale`. Optionally adds a "bar" to the left side
    of the image, usually used to indicate the baseline and x-line
    of a text line."""
    scale = 1.0 / scale
    rshape = scale * array(shape)
    center = array(center) - rshape / 2.0
    result = interpolation.affine_transform(1.0 * image,
                                            diag([scale, scale]),
                                            offset=center,
                                            output_shape=shape,
                                            order=1)
    if bar is not None:
        bar = array(bar, 'f')
        bar -= center[0]
        bar /= scale
        result[int(bar[0]):int(bar[1]), 0] = amax(result)

    return result
Esempio n. 53
0
def center_maxsize(image, r):
    """Center the image and fit it into an r x r output image.
    If the input is larger in any dimension than r, it is
    scaled down."""
    from pylab import amin, amax, array, zeros
    assert amin(image) >= 0 and amax(image) <= 1
    image = array(image, 'f')
    w, h = image.shape
    s = max(w, h)
    # zoom down, but don't zoom up
    if s > r:
        image = interpolation.zoom(image, (r + 0.5) / float(s))
        image[image < 0] = 0
        image[image > 1] = 1
        w, h = image.shape
    output = zeros((r, r), image.dtype)
    dx = (r - w) / 2
    dy = (r - h) / 2
    output[dx:dx + w, dy:dy + h] = image
    return output
Esempio n. 54
0
def plot_tuneshifts(spectrum, scan_values, xlabel='intensity [particles]', ylabel='mode number',
                    xlimits=((0.,7.1e11)), ylimits=((-4,2))):

    (spectral_lines, spectral_intensity) = spectrum

    # Normalize power.
    normalized_intensity = spectral_intensity / plt.amax(spectral_intensity)

    # Prepare plot environment.
    ax11, ax13 = _create_axes(xlabel, ylabel, xlimits, ylimits)
    palette    = _create_cropped_cmap()

    x_grid = plt.ones(spectral_lines.shape) * plt.array(scan_values, dtype='float64')
    for file_i in xrange(len(scan_values)):
        x, y, z = x_grid[:,file_i], spectral_lines[:,file_i], normalized_intensity[:,file_i]
        tuneshift_plot = ax11.scatter(x, y, s=192*plt.log(1+z), c=z, cmap=palette, edgecolors='None')

    # Colorbar
    cb = plt.colorbar(tuneshift_plot, ax13, orientation='vertical')
    cb.set_label('Power [normalised]')

    plt.tight_layout()
#---- First subplot
ax= fig.add_subplot(121,projection='3d')
surf = ax.plot_surface(X,Y,Z,cmap=pl.cm.jet,linewidth=0,rstride=5, )
fig.colorbar(surf)
pl.xlim([0.0,1000.])
pl.ylim([0,pop])
ax.set_zlim(-70.,-40.)
ax.view_init(azim=0, elev=90) # set view angle normal to X-Y plane
ax.set_xlabel('ms')
ax.set_ylabel('neuron id ')
ax.set_zlabel('V_m')
pl.title('V_m')

#---- Second subplot
ax2 = fig.add_subplot(122, projection='3d')
Z_conv = (Z_conv-pl.mean(Z_conv))/(pl.amax(Z_conv)-pl.amin(Z_conv)) *(pl.amax(Z)-pl.amin(Z)) + E_L
surf = ax2.plot_surface(X,Y,Z_conv,cmap=pl.cm.jet,linewidth=0,rstride=5, )
fig.colorbar(surf)
pl.xlim([0.0,1000.])
pl.ylim([0,pop])
ax2.set_zlim(-70.,-40.)
ax2.view_init(azim=0, elev=90)
ax2.set_xlabel('ms')
ax2.set_ylabel('neuron id ')
ax2.set_zlabel('V_m')
pl.title('Convolved')
pl.show()



Esempio n. 56
0
 def reset(self):
     self.setSquareFilterParams(SquareFilterParams(
                                 int(pl.amin(self.data_accessor.signal)),
                                 int(pl.amax(self.data_accessor.signal)),
                                 True))