예제 #1
0
    def plot_plasma(self):

        P.tricontourf(self.rzt[:, 0], self.rzt[:, 1],
                      self.tris, self.beta, 1001, zorder=0)
        cticks = P.linspace(0.0, 0.2, 5)
        P.colorbar(ticks=cticks, format='%.2f')
        P.jet()
예제 #2
0
파일: beadDensityPlot.py 프로젝트: alecw/TS
def makeContourPlot(score, scores, average, HEIGHT, WIDTH, outputId, maskId, outputdir, barcodeId=-1, vmaxVal=100):
    pylab.bone()
    #majorFormatter = FormatStrFormatter('%.f %%')
    #ax = pylab.gca()
    #ax.xaxis.set_major_formatter(majorFormatter)
    
    pylab.figure()
    ax = pylab.gca()
    ax.set_xlabel('<--- Width = '+str(WIDTH)+' wells --->')
    ax.set_ylabel('<--- Height = '+str(HEIGHT)+' wells --->')
    ax.set_yticks([0,HEIGHT/INCREMENT])
    ax.set_xticks([0,WIDTH/INCREMENT])
    ax.autoscale_view()
    pylab.jet()
    #pylab.contourf(scores, 40,origin="lower")
    
    if vmaxVal=='auto':
        vmaxVal = autoGetVmaxFromAverage(average)
    
    pylab.imshow(scores,vmin=0, vmax=vmaxVal, origin='lower')
    pylab.vmin = 0.0
    pylab.vmax = 100.0
    ticksVal = getTicksForMaxVal(vmaxVal)
    pylab.colorbar(format='%.0f %%',ticks=ticksVal)
    
    string_value1 = getFormatForVal(average) % average
    if(barcodeId!=-1):
        if(barcodeId==0): maskId = "No Barcode Match,"
        else:             maskId = "Barcode Id %d," % barcodeId
    pylab.title(maskId+' Loading Density (Avg ~ '+string_value1+'%)')
    pylab.axis('scaled')
    pylab.axis([0,WIDTH/INCREMENT-1,0,HEIGHT/INCREMENT-1])
    pngFn = outputdir+'/'+outputId+'_density_contour.png'
    pylab.savefig(pngFn)
    print "Plot saved to", pngFn;
예제 #3
0
def makeContourPlot(scores, average, HEIGHT, WIDTH, outputId, maskId, plt_title, outputdir, barcodeId=-1, vmaxVal=100):
    pylab.bone()
    #majorFormatter = FormatStrFormatter('%.f %%')
    #ax = pylab.gca()
    #ax.xaxis.set_major_formatter(majorFormatter)
    
    pylab.figure()
    ax = pylab.gca()
    ax.set_xlabel(str(WIDTH) + ' wells')
    ax.set_ylabel(str(HEIGHT) + ' wells')
    ax.autoscale_view()
    pylab.jet()
    
    pylab.imshow(scores,vmin=0, vmax=vmaxVal, origin='lower')
    pylab.vmin = 0.0
    pylab.vmax = 100.0
    ticksVal = getTicksForMaxVal(vmaxVal)
    pylab.colorbar(format='%.0f %%',ticks=ticksVal)
    print "'%s'" % average
    if(barcodeId!=-1):
        if(barcodeId==0): maskId = "No Barcode Match,"
        else:             maskId = "Barcode Id %d," % barcodeId
    if plt_title != '': maskId = '%s\n%s' % (plt_title,maskId)
    print "Checkpoint A"
    pylab.title('%s Loading Density (Avg ~ %0.f%%)' % (maskId, average))
    pylab.axis('scaled')
    print "Checkpoint B"
    pngFn = outputdir+'/'+outputId+'_density_contour.png'
    print "Try save to", pngFn;
    pylab.savefig(pngFn, bbox_inches='tight')
    print "Plot saved to", pngFn;
예제 #4
0
def main():
    """Show simple use cases for functionality provided by this module."""
    from mpl_toolkits.mplot3d.axes3d import Axes3D
    import pylab
    argv = sys.argv
    if len(argv) != 3:
        print >> sys.stderr, 'usage: python -m pim.sp.gauss size sigma'
        sys.exit(2)
    size = int(argv[1])
    sigma = float(argv[2])
    x, y = numpy.mgrid[-size // 2 + 1:size // 2 + 1,
                       -size // 2 + 1:size // 2 + 1]

    fig = pylab.figure()
    fig.suptitle('Some 2-D Gauss Functions')
    ax = fig.add_subplot(2, 1, 1, projection='3d')
    ax.plot_surface(x,
                    y,
                    fspecial_gauss(size, sigma),
                    rstride=1,
                    cstride=1,
                    linewidth=0,
                    antialiased=False,
                    cmap=pylab.jet())
    ax = fig.add_subplot(2, 1, 2, projection='3d')
    ax.plot_surface(x,
                    y,
                    gaussian2(size, sigma),
                    rstride=1,
                    cstride=1,
                    linewidth=0,
                    antialiased=False,
                    cmap=pylab.jet())
    pylab.show()
    return 0
예제 #5
0
파일: NISTASD.py 프로젝트: atronchi/NISTASD
    def plot(self):
        specs = pl.array( list(set([ l['spec'] for l in self.lines ])) )
        specs.sort()
        self.specs = specs

        pl.figure()
        pl.hold('on')
        pl.grid('on')
        pl.jet()

        lines = [] 
        lines_spec = list(pl.zeros(len(specs)))
        for i in range(0,len(self.lines)):
            ispc = pl.find( specs == self.lines[i]['spec'] )
            self.colr = pl.cm.get_cmap()( float(ispc)/len(specs) )
            wl = self.lines[i]['wave']
            ri = float(self.lines[i]['rel_int'])
            lines.append( pl.plot( [wl, wl], [0., ri if not isnan(ri) else 0.], '.-', color=self.colr )[0] )
            lines_spec[ispc] = lines[-1]
        datacursor(lines,formatter='x={x:8.3f}\ny={y:8.3f}'.format)

        pl.rc('text',usetex=True)
        pl.xlabel('$\lambda ~ [\AA]$')
        pl.ylabel('relative intensity [arb]')
        pl.title('Spectrum for '+self.spec+' from NIST ASD')

        if len(specs) > 1:
            pl.legend( lines_spec,specs )

        pl.show()
예제 #6
0
def filter3(url):
    im=image_read(url)
    pylab.imshow(im)
    dnaf = ndimage.gaussian_filter(im, 1)
    T = mahotas.thresholding.otsu(dnaf)
    labeled,nr_objects = ndimage.label(dnaf > T)
    pylab.imshow(labeled)
    pylab.jet()
#     pylab.save("out_filter3.png")
    pylab.show()
예제 #7
0
파일: FDTD.py 프로젝트: xj361685640/EMpy
 def viz2D(self, filename, directory_="./", const_dir="z", logplot=False):
     """Visualize a slice."""
     directory = fixdir(directory_)
     data = load_fortran_unformatted(directory + filename)
     if self.param is None:
         self.load_param(directory)
     x = numpy.linspace(
         self.param.dx / 2.0,
         self.param.dx * self.param.mx - self.param.dx / 2.0,
         self.param.mx,
     )
     y = numpy.linspace(
         self.param.dy / 2.0,
         self.param.dy * self.param.my - self.param.dy / 2.0,
         self.param.my,
     )
     z = numpy.linspace(
         self.param.dz / 2.0,
         self.param.dz * self.param.mz - self.param.dz / 2.0,
         self.param.mz,
     )
     if const_dir == "x":
         n1 = self.param.my
         n2 = self.param.mz
         x1 = y
         x2 = z
         x1label = "y"
         x2label = "z"
     elif const_dir == "y":
         n1 = self.param.mx
         n2 = self.param.mz
         x1 = x
         x2 = z
         x1label = "x"
         x2label = "z"
     else:
         n1 = self.param.mx
         n2 = self.param.my
         x1 = x
         x2 = y
         x1label = "x"
         x2label = "y"
     data = data.reshape((n2, n1))
     pylab.clf()
     if logplot:
         data = 20 * numpy.log10(numpy.abs(data).clip(1e-30, 1e30))
         pylab.jet()
     else:
         pylab.hot()
     pylab.contour(x1, x2, data, 64)
     pylab.colorbar()
     pylab.axis("image")
     pylab.xlabel(x1label + " /um")
     pylab.ylabel(x2label + " /um")
     pylab.show()
예제 #8
0
    def plot_plasma(self):

        P.tricontourf(self.rzt[:, 0],
                      self.rzt[:, 1],
                      self.tris,
                      self.beta,
                      1001,
                      zorder=0)
        cticks = P.linspace(0.0, 0.2, 5)
        P.colorbar(ticks=cticks, format='%.2f')
        P.jet()
예제 #9
0
    def plot(self):
        """generate the plot formatting"""
        if self.data == None:
            print "Must load and parse data first"
            sys.exit()
            
        for k,v in self.data.iteritems():
            for type, data in v.iteritems():
                pylab.clf()
                height = int(self.height)
                width = int(self.width)
                pylab.figure()
                ax = pylab.gca()
                ax.set_xlabel('<--- Width = %s wells --->' % str(width))
                ax.set_ylabel('<--- Height = %s wells --->' % str(height))
                ax.set_yticks([0,height/10])
                ax.set_xticks([0,width/10])
                ax.set_yticklabels([0,height])
                ax.set_xticklabels([0,width])
                ax.autoscale_view()
                pylab.jet()
            #color = self.makeColorMap()
            #remove zeros for calculation of average
                flattened = []
                for i in data:
                    for j in i:
                        flattened.append(j)
                flattened = filter(lambda x: x != 0.0, flattened)
                Aver=pylab.average(flattened)
                name = type.replace(" ", "_")
                fave = ("%.2f") % Aver
                pylab.title(k.strip().split(" ")[-1] + " Heat Map (Average = "+fave+"%)")
                ticks = None
                vmax = None
                if type == "Region DR":
                    ticks = [0.0,0.2,0.4,0.6,0.8,1.0]
                    vmax = 1.0
                else:
                    ticks = [0.0,0.4,0.8,1.2,1.6,2.0]
                    vmax = 2.0
                    
                pylab.imshow(data, vmin=0, vmax=vmax, origin='lower')
                pylab.colorbar(format='%.2f %%',ticks=ticks)
                pylab.vmin = 0.0
                pylab.vmax = 2.0
            #pylab.colorbar()     

                if self.savePath is None:
                    save = "%s_heat_map_%s.png" % (name,k)
                else:
                    save = path.join(self.savePath,"%s_heat_map_%s.png" % (name,k))
                pylab.savefig(save)
                pylab.clf()
예제 #10
0
def makeRawDataPlot(scores, outputId, outputdir):
    # Writes a png file containing only the data, i.e. no axes, labels, gridlines, ticks, etc.
    pylab.jet()
    fig = pylab.figure()
    ax1 = fig.add_subplot(111)
    ax1.xaxis.set_ticklabels([None])
    ax1.yaxis.set_ticklabels([None])
    ax1.xaxis.set_ticks([None])
    ax1.yaxis.set_ticks([None])
    ax1.imshow(scores,vmin=0, vmax=100, origin='lower')
    pylab.savefig(outputdir+'/'+outputId+'_density_raw.png', dpi=20, transparent=True, bbox_inches='tight', pad_inches=0)
    print "Plot saved to %s" % outputId+'_density_raw.png'
예제 #11
0
def graphScatter(X,
                 Y,
                 Names=['X', 'Y'],
                 TestName='',
                 Legend=[],
                 Caption='',
                 LogX=0,
                 RunningAvg=0,
                 Lines=None,
                 Colors=[None]):
    if not graph: return
    Fig = pylab.figure()
    if len(X) == 6: Fmts = Formats
    else: Fmts = [
        'k>',
        'go',
        'b^',
        'rs',
        'y<',
    ]
    if Colors[0]: pylab.jet()
    else: Colors = [f[0] for f in Fmts]
    #	LegendKey = []
    if Legend:
        for fmt in Fmts:
            pylab.plot([0], [0], fmt)
    if Lines:
        for x1, y1, x2, y2 in Lines:
            pylab.plot([x1, x2], [y1, y2], '-')
    dot = 0.1 * min(max([max(y) for y in Y]), max([max(x) for x in X]))
    for (x, y, color, fmt) in zip(X, Y, Colors, Fmts):
        pylab.scatter(x, y, c=color, marker=fmt[1])
        #		pylab.plot(x, y, fmt)
        if RunningAvg:
            pylab.plot(runningAverage(x, 10), runningAverage(y, 10), '.y-')
            pylab.plot(runningAverage(x, 20), runningAverage(y, 20), '.g-')
        #LegendKey.append(pylab.plot(10, 10, Fmt[0]))
    Axes = pylab.gca()
    if LogX:
        for i in [
                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 26, 36, 51, 76, 101,
                151, 201
        ]:
            pylab.text(math.log(i), -1.2, str(i - 1))
    adornGraph(
        Title='Scatter: ' + TestName,
        Filename='Scatter_' + TestName,
        Figure=Fig,
        XLabel=Names[0],
        YLabel=Names[1],
        Legend=Legend,
    )
def vector_flow(output_file, i_factor=1):
	# DPI resolution of the image to be saved
	dpi = 200

	file_path_x = "example/vector_field_x_0.csv"
	file_path_y = "example/vector_field_y_0.csv"

	vector_field_x = np.loadtxt(file_path_x, delimiter=",")
	vector_field_y = np.loadtxt(file_path_y, delimiter=",")

	x_steps, y_steps = vector_field_x.shape
	
	# Interpolation factor. For 1 no interpolation occurs.
	if i_factor > 1:
		vector_field_x = scipy.ndimage.zoom(vector_field_x, i_factor)
		vector_field_y = scipy.ndimage.zoom(vector_field_y, i_factor)

		x_steps *= i_factor
		y_steps *= i_factor


	# Putting data in the expected format
	vectors = np.zeros((x_steps, y_steps, 2), dtype=np.float32)

	vectors[...,0] += vector_field_y
	vectors[...,1] += vector_field_x
	
	texture = np.random.rand(x_steps,y_steps).astype(np.float32)

	kernellen=20
	kernel = np.sin(np.arange(kernellen)*np.pi/kernellen)
	kernel = kernel.astype(np.float32)

	image = lic_internal.line_integral_convolution(vectors, texture, kernel)
	mag = np.hypot(vector_field_x, vector_field_y)

	plt.jet()
	plt.figure()
	plt.axis('off')
	plt.imshow(texture, interpolation='nearest')
	plt.savefig(output_file+"-texture.png",dpi=dpi)

	
	plt.figure()
	fig = plt.quiver(vector_field_y, vector_field_x, mag)
	plt.colorbar()
	plt.savefig(output_file+".png",dpi=dpi)

	plt.bone()
	fig = plt.imshow(image, interpolation='nearest')
	# plt.colorbar()
	plt.savefig(output_file+"-flow.png",dpi=dpi)
예제 #13
0
def showFit(c, x, y):
    mins = x.min(axis=0)
    maxs = x.max(axis=0)
    mx, my = np.meshgrid(np.linspace(mins[0], maxs[0]), np.linspace(mins[1], maxs[1]))
    z = c.decision_function(np.c_[mx.ravel(), my.ravel()])
    pl.contour(mx, my, z.reshape(mx.shape), [-1, 0, 1])
    
    # plot data
    pos = y == 1
    neg = y == -1
    pl.plot(x[pos,0], x[pos,1], 'r+')
    pl.plot(x[neg, 0], x[neg, 1], 'b.')
    pl.jet()
예제 #14
0
 def showNeuronCorrelations(self):
     l = len(self.neuronDFFs)
     corrMat = np.zeros((l,l))
     for i in range(l):
         for j in range(l):
             mx = np.mean(self.neuronDFFs[i])
             my = np.mean(self.neuronDFFs[j])
             cov = np.sum((self.neuronDFFs[i] - mx)*(self.neuronDFFs[j] - my))
             corr = cov/(sqrt(np.sum((self.neuronDFFs[i] - mx)**2))*sqrt(np.sum((self.neuronDFFs[j] - my)**2)))
             corrMat[i,j] = corr
     plt.imshow(corrMat,interpolation='none')
     plt.jet()
     plt.colorbar()
     plt.show()
예제 #15
0
파일: metrics.py 프로젝트: ozwin/mpri_labs
def show_confusion_matrix(y_true, y_predicted, title=''):

    # compute confusion matrix
    cm = confusion_matrix(y_true, y_predicted)
    print cm
    # configure window
    pl.matshow(cm)
    pl.title(title)
    pl.colorbar()
    pl.ylabel('True label')
    pl.xlabel('Predicted label')
    pl.jet()
    # show confusion matrix plot
    pl.show()
예제 #16
0
파일: FDTD.py 프로젝트: seghil/EMpy
 def viz2D(self, filename, directory_='./', const_dir='z', logplot=False):
     """Visualize a slice."""
     directory = fixdir(directory_)
     data = load_fortran_unformatted(directory + filename)
     if self.param is None:
         self.load_param(directory)
     x = numpy.linspace(self.param.dx / 2.,
                        self.param.dx * self.param.mx - self.param.dx / 2.,
                        self.param.mx)
     y = numpy.linspace(self.param.dy / 2.,
                        self.param.dy * self.param.my - self.param.dy / 2.,
                        self.param.my)
     z = numpy.linspace(self.param.dz / 2.,
                        self.param.dz * self.param.mz - self.param.dz / 2.,
                        self.param.mz)
     if const_dir == 'x':
         n1 = self.param.my
         n2 = self.param.mz
         x1 = y
         x2 = z
         x1label = 'y'
         x2label = 'z'
     elif const_dir == 'y':
         n1 = self.param.mx
         n2 = self.param.mz
         x1 = x
         x2 = z
         x1label = 'x'
         x2label = 'z'
     else:
         n1 = self.param.mx
         n2 = self.param.my
         x1 = x
         x2 = y
         x1label = 'x'
         x2label = 'y'
     data = data.reshape((n2, n1))
     pylab.clf()
     if logplot:
         data = 20 * numpy.log10(numpy.abs(data).clip(1e-30, 1e30))
         pylab.jet()
     else:
         pylab.hot()
     pylab.contour(x1, x2, data, 64)
     pylab.colorbar()
     pylab.axis('image')
     pylab.xlabel(x1label + ' /um')
     pylab.ylabel(x2label + ' /um')
     pylab.show()
예제 #17
0
def makeFullBleed(scores, outputId, outputdir):
    # Writes a png file containing only the data, i.e. no axes, labels, gridlines, ticks, etc.
    pylab.jet()
    fig = pylab.figure(figsize=(5,5))
    ax1 = fig.add_subplot(111)
    ax1.xaxis.set_ticklabels([None])
    ax1.yaxis.set_ticklabels([None])
    ax1.xaxis.set_ticks([None])
    ax1.yaxis.set_ticks([None])
    ax1.set_frame_on(False)
    ax1.imshow(scores,vmin=0, vmax=100, origin='lower')
    
    for size in (20, 70, 200, 1000):
        fig_path = os.path.join(outputdir, '%s_density_%d.png' % 
            (outputId, size))
        fig.savefig(fig_path, figsize=(5,5), dpi=size/3.875, transparent=True, 
            bbox_inches='tight', pad_inches=0)
        print("Full bleed plot saved to %s" % fig_path)
def counter(directory, sub_directory, pic_list, results_directory):
    blue_images = dict()
    red_images = dict()

    for pic in pic_list:
        try:
            current_image = str(directory) + '\\' + sub_directory + '\\' + str(pic)
            sky = mh.imread(current_image)
        except IOError:
            current_image = str(directory) + '/' + sub_directory + '/' + str(pic)
            sky = mh.imread(current_image)

        finally:

            t = mh.thresholding.otsu(sky.astype('uint8'))
            labeled, stars = mh.label(sky > t)

            if args.display:
                pylab.imshow(sky)
                pylab.imshow(labeled)
                pylab.jet()
                pylab.show()

            if args.save:
                pylab.imshow(sky)
                pylab.imshow(labeled)
                pylab.jet()
                pylab.savefig("{0}\\{1}_{2}.png".format(results_directory,
                                                        str(sub_directory),
                                                        str(pic)),
                              format='png'
                              )

            color = color_is_red(current_image)
            if color is True:
                red_images[pic] = int(stars)
            elif color is False:
                blue_images[pic] = int(stars)
            else:
                pass

            pylab.close()

    return blue_images, red_images
예제 #19
0
def plotarr2(mat,title):

    #if arr.ndim != 2:
    #    print('arr can only be 2d')
    #    return

    plt.figure()
    plt.imshow(mat,origin='lower',cmap=plt.jet())
    plt.title(title)
    plt.show()
예제 #20
0
파일: FDTD.py 프로젝트: rshegde/EMpy
 def viz2D(self, filename, directory_="./", const_dir="z", logplot=False):
     """Visualize a slice."""
     directory = fixdir(directory_)
     data = load_fortran_unformatted(directory + filename)
     if self.param is None:
         self.load_param(directory)
     x = numpy.linspace(self.param.dx / 2.0, self.param.dx * self.param.mx - self.param.dx / 2.0, self.param.mx)
     y = numpy.linspace(self.param.dy / 2.0, self.param.dy * self.param.my - self.param.dy / 2.0, self.param.my)
     z = numpy.linspace(self.param.dz / 2.0, self.param.dz * self.param.mz - self.param.dz / 2.0, self.param.mz)
     if const_dir == "x":
         n1 = self.param.my
         n2 = self.param.mz
         x1 = y
         x2 = z
         x1label = "y"
         x2label = "z"
     elif const_dir == "y":
         n1 = self.param.mx
         n2 = self.param.mz
         x1 = x
         x2 = z
         x1label = "x"
         x2label = "z"
     else:
         n1 = self.param.mx
         n2 = self.param.my
         x1 = x
         x2 = y
         x1label = "x"
         x2label = "y"
     data = data.reshape((n2, n1))
     pylab.clf()
     if logplot:
         data = 20 * numpy.log10(numpy.abs(data).clip(1e-30, 1e30))
         pylab.jet()
     else:
         pylab.hot()
     pylab.contour(x1, x2, data, 64)
     pylab.colorbar()
     pylab.axis("image")
     pylab.xlabel(x1label + " /um")
     pylab.ylabel(x2label + " /um")
     pylab.show()
예제 #21
0
def draw_workspace_graph(obs_map, goal, connect_8=False, road_rules=True):
    """Draws the individual policies and cost-to-go field for an
    environment

    obs_map - description of environment.  Matrix with 0 indicating free
              cell, 1 indicating an obstacle
    goal    - [[x1, y1], [x2, y2], ...] joint goal configuration
    """
    graph = workspace_graph.Workspace_Graph(obs_map,
                                            goal,
                                            connect_8=connect_8,
                                            road_rules=road_rules)
    cost_map = [[graph.get_cost((i, j)) for j in range(len(obs_map[0]))]
                for i in range(len(obs_map))]
    temp = []
    for i in cost_map:
        temp.extend(filter(lambda x: x < workspace_graph.MAX_COST, i))
    max_val = max(temp) * 1.05
    for i in cost_map:
        for j in xrange(len(i)):
            i[j] = min(i[j], max_val)
    pylab.matshow(matrix(cost_map).T)
    pylab.hold(True)
    X = [[0 for i in range(len(obs_map[0]))] for j in range(len(obs_map))]
    Y = [[0 for i in range(len(obs_map[0]))] for j in range(len(obs_map))]
    for i in range(len(obs_map)):
        for j in range(len(obs_map[0])):
            if cost_map[i][j] < max_val:
                pos = graph.get_step((i, j))
                if pos is None:
                    continue
                X[j][i] = (pos[1] - j)
                Y[j][i] = (pos[0] - i)
    pylab.quiver(Y, X)
    sizex = len(obs_map)
    sizey = len(obs_map[0])
    pylab.xlim([-.5, sizex - .5])
    pylab.ylim([-.5, sizey - .5])
    pylab.xticks([])
    pylab.yticks([])
    pylab.jet()
    pylab.hold(False)
    pylab.show()
예제 #22
0
파일: gauss.py 프로젝트: AlexBoro/python
def main():
    """Show simple use cases for functionality provided by this module."""
    from mpl_toolkits.mplot3d.axes3d import Axes3D
    import pylab
    argv = sys.argv
    if len(argv) != 3:
        print >>sys.stderr, 'usage: python -m pim.sp.gauss size sigma'
        sys.exit(2)
    size = int(argv[1])
    sigma = float(argv[2])
    x, y = numpy.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]

    fig = pylab.figure()
    fig.suptitle('Some 2-D Gauss Functions')
    ax = fig.add_subplot(2, 1, 1, projection='3d')
    ax.plot_surface(x, y, fspecial_gauss(size, sigma), rstride=1, cstride=1, 
                    linewidth=0, antialiased=False, cmap=pylab.jet())
    ax = fig.add_subplot(2, 1, 2, projection='3d')
    ax.plot_surface(x, y, gaussian2(size, sigma), rstride=1, cstride=1, 
                    linewidth=0, antialiased=False, cmap=pylab.jet())
    pylab.show()
    return 0
예제 #23
0
    def _plot(self):
##         pylab.clf()
        
##         ## Added garbage collection since matplotlib objects seem to hang
##         ## around and accumulate.
##         import gc
##         gc.collect()

        mesh = self.vars[0].getMesh()
        shape = mesh.getShape()
        X, Y = mesh.getCellCenters()
        Z = self.vars[0].getValue()
        X, Y, Z = [v.reshape(shape, order="FORTRAN") for v in (X, Y, Z)]

        zmin, zmax = self._autoscale(vars=self.vars,
                                     datamin=self._getLimit(('datamin', 'zmin')),
                                     datamax=self._getLimit(('datamax', 'zmax')))

        numberOfContours = 10
        smallNumber = 1e-7
        diff = zmax - zmin
        
        if diff < smallNumber:            
            V = numerix.arange(numberOfContours + 1) * smallNumber / numberOfContours + zmin
        else:
            V = numerix.arange(numberOfContours + 1) * diff / numberOfContours + zmin

        import pylab
        pylab.jet()

        pylab.contourf(X, Y, Z, V)

        pylab.xlim(xmin=self._getLimit('xmin'),
                   xmax=self._getLimit('xmax'))

        pylab.ylim(ymin=self._getLimit('ymin'),
                   ymax=self._getLimit('ymax'))
예제 #24
0
def main():
    arps_mo = ARPSModelObsFile("/caps1/tsupinie/3km-fixed-radar/KCYSan014400", (2, 12))
    python_mo = cPickle.load(open("3km-fixed-radar/eomean.pkl014400", 'r'))

    grid = goshen_3km_grid()#bounds=(slice(242, 327), slice(199, 284)))
    radar_x, radar_y = grid(arps_mo._radar_lon, arps_mo._radar_lat)
    print radar_x, arps_mo._radar_x
    print radar_y, arps_mo._radar_y
    xs, ys = grid.getXY()
    bounds = grid.getBounds()

    Re = 6371000.
    range = np.hypot(xs - radar_x, ys - radar_y)
    slant_range = Re * np.tan(range / Re)
    range_mask = (range < arps_mo._range_max) & (range > arps_mo._range_min)

    height_error = arps_mo._height[0][bounds] - coneHeight(range, arps_mo._elev_angles[0] + 0.06, 1867.)
    height_error_masked = np.where(range < arps_mo._range_max - 12000, height_error, np.nan)
    print np.nanmin(height_error_masked), np.nanmax(height_error_masked)

    pylab.figure(figsize=(12,6))

    pylab.subplot(121)
    pylab.pcolormesh(xs, ys, arps_mo['Z'][0][bounds], vmin=10, vmax=80, cmap=pylab.jet())
#   pylab.pcolormesh(xs, ys, height_error_masked, vmin=-200, vmax=200, cmap=matplotlib.cm.get_cmap('RdBu'))
    pylab.colorbar()
    pylab.title("ARPS")
    grid.drawPolitical()

    pylab.subplot(122)
    pylab.pcolormesh(xs, ys, python_mo['Z'][0, 0][bounds], vmin=10, vmax=80, cmap=matplotlib.cm.get_cmap('jet'))
    pylab.title("Python")
    grid.drawPolitical()

    pylab.savefig("model_obs_comparison.png")
    pylab.close()

    cPickle.dump(arps_mo._height, open("beam_height.pkl", 'w'), -1)

    return
예제 #25
0
    # construct individual nodes
    reservoir = ReservoirNode_lyapunov(input_size, 100, lyapunov_skip=100)
    readout = Oger.nodes.RidgeRegressionNode()

    # build network with MDP framework
    flow = mdp.Flow([reservoir, readout])

    # Nested dictionary
    gridsearch_parameters = {reservoir:{'input_scaling': mdp.numx.arange(0.1, 2.2, .7), 'spectral_radius':mdp.numx.arange(0.1, 2.2, .7)}}

    # Instantiate an optimizer
    opt = Oger.evaluation.Optimizer(gridsearch_parameters, Oger.utils.nrmse)

    # Do the grid search
    opt.grid_search(data, flow, cross_validate_function=Oger.evaluation.train_test_only, training_fraction=.9)

    # Plot the maximal LLE for each parameter setting
    lle_max = mdp.numx.zeros(opt.paramspace_dimensions)
    for i in range(opt.paramspace_dimensions[0]):
        for j in range(opt.paramspace_dimensions[1]):
            lle_max[i, j] = mdp.numx.amax(mdp.numx.mean(mdp.numx.array(opt.probe_data[i, j][reservoir]), 0))

    pylab.figure()
    pylab.imshow(mdp.numx.flipud(lle_max), cmap=pylab.jet(), interpolation='nearest', aspect="auto", extent=opt.get_extent(opt.parameters))
    pylab.ylabel('Spectral Radius')
    pylab.xlabel('Input scaling')
    pylab.suptitle('Max LLE')
    pylab.colorbar()
    pylab.show()
예제 #26
0
    }

    # Instantiate an optimizer
    opt = Oger.evaluation.Optimizer(gridsearch_parameters, Oger.utils.nrmse)

    # Do the grid search
    opt.grid_search(data,
                    flow,
                    cross_validate_function=Oger.evaluation.train_test_only,
                    training_fraction=.9)

    # Plot the maximal LLE for each parameter setting
    lle_max = mdp.numx.zeros(opt.paramspace_dimensions)
    for i in range(opt.paramspace_dimensions[0]):
        for j in range(opt.paramspace_dimensions[1]):
            lle_max[i, j] = mdp.numx.amax(
                mdp.numx.mean(mdp.numx.array(opt.probe_data[i, j][reservoir]),
                              0))

    pylab.figure()
    pylab.imshow(mdp.numx.flipud(lle_max),
                 cmap=pylab.jet(),
                 interpolation='nearest',
                 aspect="auto",
                 extent=opt.get_extent(opt.parameters))
    pylab.ylabel('Spectral Radius')
    pylab.xlabel('Input scaling')
    pylab.suptitle('Max LLE')
    pylab.colorbar()
    pylab.show()
예제 #27
0
def main(path, marked_path=None):
    # images multiscale
    imgs_mscale = try_pickle_load(path)
    n_scales = len(imgs_mscale)
    imgs_s0 = imgs_mscale[0]  # scale 1
    image_shape = (imgs_s0.shape[2], imgs_s0.shape[3])

    images_to_show = min(IMAGES_TO_SHOW, len(imgs_s0))

    print "Images shape", imgs_s0.shape
    print "Number of images to show", images_to_show
    print "Number of scales", n_scales
    print "Requested image shape will be", image_shape
    n_rows = (1 + n_scales) * 2

    perturbed_imgs = [
        np.empty((images_to_show, imgs.shape[1], imgs.shape[2], imgs.shape[3]))
        for imgs in imgs_mscale
    ]
    perturbed_marks = None
    if marked_path is not None:
        marked_imgs = try_pickle_load(marked_path)
        perturbed_marks = np.empty(
            (images_to_show, marked_imgs.shape[1], marked_imgs.shape[2]))

    for i in xrange(images_to_show):
        imgs_to_perturb = [img[i] for img in imgs_mscale]
        # if we loaded markings, add marked image to list of imgs to perturb
        if perturbed_marks is not None:
            imgs_to_perturb.append(marked_imgs[i])

        ret_list = perturb_image(imgs_to_perturb, image_shape)
        for n_scale in range(n_scales):
            perturbed_imgs[n_scale][i] = ret_list[n_scale]

        if perturbed_marks is not None:
            perturbed_marks[i] = ret_list[n_scales]

    for i, imgs in enumerate(imgs_mscale):
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show, i * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(imgs[j, CHANNEL, :, :])
            pylab.gray()  # set colormap

    for ind, imgs in enumerate(perturbed_imgs):
        i = n_scales + ind
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show, i * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(imgs[j, CHANNEL, :, :])
            pylab.gray()

    if perturbed_marks is not None:
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show,
                          (2 * n_scales + 0) * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(marked_imgs[j, :, :])
            pylab.jet()

            pylab.subplot(n_rows, images_to_show,
                          (2 * n_scales + 1) * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(perturbed_marks[j, :, :])
            pylab.jet()

    pylab.show()
예제 #28
0
def schlieren_entry():
    """
    console script to apply schlieren routine to xyp files
    """
    usage = """ %prog [options] initial.xyp final.xyp
            for 'xz' images
        %prog [options] timeseries.xyp
            for 'tz' images"""
    parser = OptionParser(usage=usage, 
             version = "%prog (igwtools " + __version__ + ")")
    parser.add_option("-d", action = "store_true", dest = "display",
                      help = "displays images for debugging purposes")
    parser.add_option("-v", "--verbose", 
                      action = "store_true", dest = "verbose",
                      help = "be verbose and output settings used")
    parser.add_option("-T", "--mintol", 
                      dest="mintol", type="float", default = 10,
                      help = "Minimum intensity difference between pixels for computation [default: %default]")
    parser.add_option("--fast", action="store_true", default=False,
                      help = "use experimental 'fast' getdelz calculation")
    parser.add_option("-t", "--dt", type="float",
                      help = "Time difference between frames")
    parser.add_option("-n", "--ndt", type="int", default = 2,
                      help = "Take time difference between how many pixels [default : %default]")
    parser.add_option("-s", "--sigma", dest="sigma", 
                      type="float", default=0.3,
                      help = "Distance over which to fill in uncomputed points [default: %default]")
    parser.add_option("-M", "--mode", default="dn2t",
                      help = "Schlieren mode (dz, dzt, dn2, dn2t) [default: %default]")
    parser.add_option("-o", "--outputfile",
                      help = "Name of output file (output saved only if supplied)")
    parser.add_option("-f", "--force", action="store_true",
                      help = "Force overwriting of anyexisting view")

    parser.add_option("--nofill", action = "store_true",
                      help = "do not fill-in values that were not computed by interpolation by Gaussian average")
    parser.add_option("--nosmooth", action = "store_true",
                      help = "do not smooth with uniform average filter")

    parser.add_option("--Ls", type="float", default = 24.0,
            help = "Distance from back of tank to screen [default: %default]")
    parser.add_option("--Lt", type="float", default = 17.5,
            help = "Width of tank [default: %default]")
    parser.add_option("--Lp", type="float", default = 1.0,
            help = "Width of tank walls [default: %default]")
    parser.add_option("--Lc", type="float", default = 330.0,
            help = "Distance from fron of tank to camera [default: %default]")


    # look up defaults from environment
    default_database = os.getenv("IGWDB")
    default_experiment = os.getenv("IGWEXPT")

    #OptionGroup here
    parser.add_option("-D", "--database", default= default_database,
                       help = "Database name [default: %default]")
    parser.add_option("-e", "--experiment", default=default_experiment,
                       help = "Experiment name [default: %default]")
    parser.add_option("-L", "--load",
                      help = "load input from view")
    parser.add_option("-S", "--save",
                      help = "save results as view")

    (options, args) = parser.parse_args()

    # Infer whether input is xz or tz images based on number of arguments
    if len(args) == 1: # timeseries.xyp
        options.imagetype = "tz"
    elif len(args) == 2: # initial.xyp final.xyp
        options.imagetype = "xz"
    elif options.load != None:
        # load experiment
        if options.experiment[0] != '/':
            options.experiment = '/' + options.experiment
        h5file = tables.openFile(options.database, mode="r")
        valid_expt = h5file.__contains__(options.experiment)
        h5file.close()
        if valid_expt:
            expt = tools.Experiment(options.database, options.experiment)
        else:
            print(options.experiment, "not found in", options.database)
            return
        field = expt.load_view(options.load)

        if field is None:
            print("View", options.load, "not found!")

        field = schlieren_field(field, options=options)

        if options.save != None:
            if options.verbose:
                print("saving view...")
            expt.save_view(field, options.save, force=options.force)
        else:
            print("use --save option to store result!")

        expt.close()

        return
    else:
        parser.error("incorrect number of arguments")

    if options.mode not in ['dz', 'dzt', 'dn2', 'dn2t', 'qualitative']:
        parser.error("invalid schlieren mode: %s" % options.mode)
 
    import xplot
    # read in data from xyp files
    if options.verbose:
        print('reading in XYP files...')
    if options.imagetype == "xz":
        reference_image = xplot.readXYplot(args[0], orientation='xz')
        image = xplot.readXYplot(args[1], orientation='xz')
        options.dxt = image.dx
    else: # tz
        image = xplot.readXYplot(args[0], orientation='tz')
        options.dxt = image.dt
        print(image.shape)

    options.xmin = image.xmin
    options.xmax = image.xmax
    options.zmin = image.zmn
    options.zmax = image.zmax
    options.tmin = image.tmin
    options.tmax = image.tmax

    if ((options.mode == 'dzt') or (options.mode == 'dn2t')):
        if options.imagetype == 'x':
            if options.dt == None:
                parser.error("mode %s and 'xz' images requires --dt option" % \
                             options.mode)
        else:
            options.dt = options.ndt * image.dt

    if options.imagetype == "xz":
        output = schlieren(reference_image = reference_image, 
                           image = image, options = options)
    else:
        output = schlieren(image=image, options=options)

    # save output
    if(options.outputfile != None):
        if options.verbose:
            print ('saving results in xyp file')
        output.save(options.outputfile)

    if options.display:
        # show results
        vmax = 2.5*numpy.std(output)
        if options.verbose:
            print('plotting result')
            print('    vmin = %.3f' % -vmax, 'vmax = %.3f' % vmax)
        pylab.figure()
        output.plot(interpolation = 'bicubic', vmin=-vmax, vmax=vmax)
        pylab.colorbar()
        pylab.jet()
        pylab.title(options.mode)

        pylab.show()
예제 #29
0
def clickScat(array2d,
              array3d,
              xScat=None,
              xerror3d=None,
              yerror3d=None,
              array3d2=None,
              xerror3d2=None,
              yerror3d2=None,
              fn=None,
              xMap=None,
              yMap=None,
              modelError=False,
              ylimScat=None):
    """
    figureHandles=clickScat(array2d, array3d, xScat=None, xerror3d=None, yerror3d=None, array3d2=None, xerror3d2=None, yerror3d2=None, fn=None, xMap=None, yMap=None):
    xScat: x-axis variables for Scatter Plot. Has to be the same length as last dimension of array3d.shape[2]
    xerror3d: errorbars for x-axis. two sided. 
    fn:'annual'
    """
    import insar
    dateaxis = False
    if xScat is None:
        xScat = np.r_[0:array3d.shape[2]]
    elif isinstance(xScat[0], P.matplotlib.dates.datetime.date):
        xScat = P.matplotlib.dates.date2num(xScat)
        dateaxis = True

    def onclick(event):
        P.figure(fh.number)
        P.clf()
        #ax = P.gca()
        #inv = ax.transData.inverted()
        #A=inv.transform((event.x,  event.y))
        #A[1]=np.int(np.round((1-A[1])*array2d.shape[1]))
        #A[0]=np.int(np.round((A[0])*array2d.shape[0]))
        try:
            y = np.round(event.xdata)
        except:
            return
        x = np.round(event.ydata)
        #ARRAY MAPPING IS first axis y(rows) and second axis is cols (x)
        if all(np.isnan(array3d[x, y, :])):
            #if there are no points to plot (all nan) then return
            return

        #Plot second scatter data.
        if array3d2 is not None:
            if isinstance(array3d2, list):
                if yerror3d is None:
                    w = np.ones(array3d[x, y, :].shape)
                else:
                    w = basic.rescale(1. / yerror3d[x, y, :], [1, 2])
                markers = ['*', '+', 's', 'd', 'x', 'v', '<', '>', '^']
                m = 0
                for arr in array3d2:
                    print("%d, %d, %d" % (x, y, m))
                    P.scatter(xScat, arr[x, y, :], marker=markers[m])
                    idx = ~(np.isnan(arr[x, y, :])
                            | np.isnan(array3d[x, y, :]))
                    #c=insar.crosscorrelate(basic.nonan(w[idx]*arr[x, y,idx]),basic.nonan(w[idx]*array3d[x, y,idx]))
                    slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
                        basic.nonan(w[idx] * arr[x, y, idx]),
                        basic.nonan(w[idx] * array3d[x, y, idx]))
                    P.annotate(str("r2[%s]: %0.2f" % (markers[m], r_value)),
                               (0, 0.9 - m * 0.05),
                               xycoords='axes fraction')
                    m = m + 1
            else:
                if xerror3d2 is None:
                    xerr = None
                else:
                    xerr = xerror3d2[x, y, :]
                if yerror3d2 is None:
                    yerr = None
                else:
                    yerr = yerror3d2[x, y, :]
                P.errorbar(xScat,
                           array3d2[x, y, :],
                           xerr=xerr,
                           yerr=yerr,
                           marker='*',
                           fmt='o')

        #Plot function result as scatter data.
        p = None
        if fn is not None:
            if fn == 'linear_amplitude_annual':
                dataMask = ~np.isnan(array3d[x, y, :])
                p0 = np.array([1, 0, 0, basic.nonan(array3d[x, y, :]).mean()])
                fitfun = lambda p: (p[0] + p[1] * xScat[dataMask] / 365.
                                    ) * np.cos(2 * np.pi * xScat[dataMask] /
                                               365. + p[2]) + p[3]
                xScat2 = np.linspace(xScat.min(), xScat.max())
                fitfun2 = lambda p: (p[0] + p[1] * xScat2 / 365.) * np.cos(
                    2 * np.pi * xScat2 / 365. + p[2]) + p[3]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w = np.ones(array3d[x, y, :].shape)
                else:
                    w = basic.rescale(1. / yerror3d[x, y, :], [1, 2])
                errfun = lambda p: basic.nonan(w * array3d[x, y, :]) - w[
                    dataMask] * fitfun(p)
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p = scipy.optimize.leastsq(errfun, p0)
                p = p[0]
                P.scatter(xScat[dataMask], fitfun(p), marker='^')
                sortedxy = np.squeeze(np.dstack([xScat2, fitfun2(p)]))
                sortedxy = sortedxy[sortedxy[:, 0].argsort(), :]
                P.plot(sortedxy[:, 0], sortedxy[:, 1])
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
                    basic.nonan(w * array3d[x, y, :]), w[dataMask] * fitfun(p))
                P.annotate(
                    str("a0:%0.2f\na1:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" %
                        (p[0], p[1], p[2], p[3], r_value**2.)), (0.8, 0.8),
                    xycoords='axes fraction')
            elif fn == 'quadratic_amplitude_annual':
                dataMask = ~np.isnan(array3d[x, y, :])
                p0 = np.array(
                    [1, 0, 0, 0,
                     basic.nonan(array3d[x, y, :]).mean()])
                fitfun = lambda p: (p[0] + p[1] * xScat[dataMask] / 365. + p[
                    2] * (xScat[dataMask] / 365.)**2.) * np.cos(
                        2 * np.pi * xScat[dataMask] / 365. + p[3]) + p[4]
                xScat2 = np.linspace(xScat.min(), xScat.max())
                fitfun2 = lambda p: (p[0] + p[1] * xScat2 / 365. + p[2] * (
                    xScat2 / 365.)**2.) * np.cos(2 * np.pi * xScat2 / 365. + p[
                        3]) + p[4]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w = np.ones(array3d[x, y, :].shape)
                else:
                    w = basic.rescale(1. / yerror3d[x, y, :], [1, 2])
                errfun = lambda p: basic.nonan(w * array3d[x, y, :]) - w[
                    dataMask] * fitfun(p)
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p = scipy.optimize.leastsq(errfun, p0)
                p = p[0]
                P.scatter(xScat[dataMask], fitfun(p), marker='^')
                sortedxy = np.squeeze(np.dstack([xScat2, fitfun2(p)]))
                sortedxy = sortedxy[sortedxy[:, 0].argsort(), :]
                P.plot(sortedxy[:, 0], sortedxy[:, 1])
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
                    basic.nonan(w * array3d[x, y, :]), w[dataMask] * fitfun(p))
                P.annotate(str(
                    "a0:%0.2f\na1:%0.2f\na2:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f"
                    % (p[0], p[1], p[2], p[3], p[4], r_value**2.)), (0.8, 0.8),
                           xycoords='axes fraction')

            elif fn == 'annual':
                dataMask = ~np.isnan(array3d[x, y, :])
                p0 = np.array([1, 1, basic.nonan(array3d[x, y, :]).mean()])
                fitfun = lambda p: p[0] * np.cos(2 * np.pi * xScat[dataMask] /
                                                 365. + p[1]) + p[2]
                xScat2 = np.linspace(xScat.min(), xScat.max())
                fitfun2 = lambda p: p[0] * np.cos(2 * np.pi * xScat2 / 365. +
                                                  p[1]) + p[2]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w = np.ones(array3d[x, y, :].shape)
                else:
                    w = basic.rescale(1. / yerror3d[x, y, :], [1, 2])
                errfun = lambda p: basic.nonan(w * array3d[x, y, :]) - w[
                    dataMask] * fitfun(p)
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p = scipy.optimize.leastsq(errfun, p0)
                p = p[0]
                P.scatter(xScat[dataMask], fitfun(p), marker='^')
                sortedxy = np.squeeze(np.dstack([xScat2, fitfun2(p)]))
                sortedxy = sortedxy[sortedxy[:, 0].argsort(), :]
                P.plot(sortedxy[:, 0], sortedxy[:, 1])
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
                    basic.nonan(w * array3d[x, y, :]), w[dataMask] * fitfun(p))
                P.annotate(str("amp:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" %
                               (p[0], p[1], p[2], r_value**2.)), (0.8, 0.8),
                           xycoords='axes fraction')
            else:
                p = None
                P.scatter(xScat, fn(xScat), marker='^')
        #convert axis to date...
        if dateaxis:
            try:
                P.figure(fh.number).axes[0].xaxis_date(tz=None)
                P.figure(fh.number).autofmt_xdate()
            except:
                pass
        #change x y to xMap, yMap
        if yMap is not None:
            xM = ya * x + yb
        else:
            xM = x
        if xMap is not None:
            yM = xa * (y) + xb
        else:
            yM = y
        #x and y are flipped in the try/except block above. So Flip again.
        #if p is not None:
        #    P.title("x,y,[]: " + str(yM) + ", " + str(xM) + ', ' + str(p) )
        #else:
        P.title("x,y,z,z.std: " + str(yM) + ", " + str(xM) + ', ' +
                str(array2d[x, y]) + ', ' +
                str(np.std(basic.nonan(array3d[x, y, :]))))

        # rotate and align the tick labels so they look better
        #P.figure(fh.number).autofmt_xdate()
        # use a more precise date string for the x axis locations in the
        # toolbar
        #P.gca().fmt_xdata = mdates.DateFormatter('%Y-%m-%d')

        if xerror3d is None:
            xerr = None
        else:
            xerr = xerror3d[x, y, :]
        if yerror3d is None:
            yerr = None
        else:
            yerr = yerror3d[x, y, :]
        if modelError:
            yerr = yerror3d[x, y, :]
            yerr[dataMask] = errfun(p)

        P.errorbar(xScat, array3d[x, y, :], xerr=xerr, yerr=yerr, fmt='ro')
        if ylimScat is not None:
            P.ylim(ylimScat)
        ##################################
        ## END OF PLOTTING
        ##################################

    s = array2d[~np.isnan(array2d)].std()
    m = array2d[~np.isnan(array2d)].mean()
    fig = P.figure()
    ax = fig.add_subplot(111)
    ax.matshow(array2d, vmin=m - s, vmax=m + s)
    #fig=P.figure();ax=fig.add_subplot(111);ax.matshow(basic.wrapToInt(array2d, s), vmin=-s, vmax=s);
    if xMap is not None:
        ticks = ax.get_xticks()
        (xa, xb) = np.polyfit(np.r_[0:len(xMap)], xMap, 1)
        ax.set_xticklabels(np.around(xa * ticks + xb, 4))
    if yMap is not None:
        ticks = ax.get_yticks()
        (ya, yb) = np.polyfit(np.r_[len(yMap):0:-1], yMap, 1)
        ax.set_yticklabels(np.around(ya * ticks + yb, 4))

    #P.colorbar();
    cax, kw = P.matplotlib.colorbar.make_axes(ax, orientation='vertical')
    P.matplotlib.colorbar.ColorbarBase(cax,
                                       cmap=P.jet(),
                                       norm=P.normalize(vmin=m - s,
                                                        vmax=m + s),
                                       orientation='vertical')
    fh = P.figure()
    #should be accessible in child function?
    fig.canvas.mpl_connect('button_press_event', onclick)
    return (fig, fh)
import numpy as np
import pylab
import sys
from skimage import io
from skimage import color
from scipy import ndimage
from matplotlib import pyplot
import project_helpers as ph
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn.preprocessing import LabelBinarizer
from sklearn.ensemble import RandomForestClassifier
#############
# This tutorial walks through various functions used for the project
# and some of the concepts covered in lecture.
##############
# Flag that indicates whether to show images.
VIEW = False
# Set the path to the data files.
data_path = "crchistophenotypes_2016_04_28/CRCHistoPhenotypes_2016_04_28/"
#############
#Basic image I/O
##############
#Load the ground truth labels
#Detections are stored as [x,y] and labels are positive for detected nuclei centres
img, detections, labels = ph.load_data_set(data_path,"img7")
pylab.imshow(img)
if(VIEW):
    pyplot.show()
import numpy as np
import pylab
import sys
from skimage import io
from skimage import color
from scipy import ndimage
from matplotlib import pyplot
import project_helpers as ph
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn.preprocessing import LabelBinarizer
from sklearn.ensemble import RandomForestClassifier
#############
# This tutorial walks through various functions used for the project
# and some of the concepts covered in lecture.
##############
# Flag that indicates whether to show images.
# When True you will need to close each window after you've looked at the output to continue
VIEW = False

# Set the path to the data files.
data_path = "crchistophenotypes_2016_04_28/CRCHistoPhenotypes_2016_04_28/"

#############
#Basic image I/O
##############

#Load the ground truth labels
#Detections are stored as [x,y] and labels are positive for detected nuclei centres
예제 #32
0
    def disp(self,sampler,interp_type_during_visualization):
        level=sampler.level    
        theta=sampler.theta_current
        tw=self.tw
#        interval=self.interval
#        interval_dense=self.interval_dense
        markersize = 5
        fontsize=30
        cpa_space = tw.ms.L_cpa_space[level]            
        plt.subplot(231)
        sampler.plot_ll()
        plt.title('ll',fontsize=fontsize)
        sampler.plot_wlp()
        sampler.plot_wlp_plus_ll()
        if sampler.lp_func:         
            plt.legend(['ll','wlp','ll+wlp'])
        
        plt.subplot(232)
        sampler.plot_ar()
        plt.title('accept ratio',fontsize=fontsize)
         
#        print theta
        cpa_space.theta2As(theta=theta)
        tw.update_pat_from_Avees(level=level)          
        tw.calc_v(level=level)    
        tw.v_dense.gpu2cpu()     
    
        src = self.src
#        dst = self.dst
        transformed = self.transformed
        
#        src_dense=self.src_dense
#        transformed_dense=self.transformed_dense
#        tw.calc_T(src_dense, transformed_dense, mysign=1, level=level, 
#        
#        transformed_dense.gpu2cpu()

        tw.calc_T_inv(src, transformed,  level=level, 
                  int_quality=+1)            
        transformed.gpu2cpu()
        
        if interp_type_during_visualization=='gpu_linear':
            my_dtype = np.float64
        else:
            my_dtype = np.float32 # For opencv
        
        img_src = self.signal.src.cpu.reshape(tw.nRows,tw.nCols)
        img_src = CpuGpuArray(img_src.astype(my_dtype))  
        img_wrapped = CpuGpuArray.zeros_like(img_src)

        img_dst = self.signal.dst.cpu.reshape(tw.nRows,tw.nCols)
        img_dst = CpuGpuArray(img_dst)         
        
                
        if interp_type_during_visualization=='gpu_linear':
            tw.remap_fwd(transformed,img_src,img_wrapped)
        else:
            tw.remap_fwd_opencv(transformed,img_src,img_wrapped,interp_type_during_visualization)
        img_wrapped.gpu2cpu()
             
        plt.subplot(233)   
        plt.imshow(img_src.cpu,interpolation="None")
        plt.gray()
        cpa_space.plot_cells('r')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}$')

        
                
        
        plt.subplot(234)   
        plt.imshow(img_wrapped.cpu,interpolation="None")
        plt.gray()
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$I_{\mathrm{src}}\circ T^{\theta}$')
        
        plt.subplot(235)   
        plt.imshow(img_dst.cpu,interpolation="None")
        plt.gray()
        plt.title(r'$I_{\mathrm{dst}}$')
        
#        cpa_space.plot_cells('w')
        tw.config_plt(axis_on_or_off='on')
        
        plt.subplot(2,6,11)
        self.tw.imshow_vx()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_x$')
        plt.subplot(2,6,12)
        self.tw.imshow_vy()
        pylab.jet()
        tw.config_plt(axis_on_or_off='on')
        plt.title(r'$v_y$')
예제 #33
0
def schlieren_entry():
    """
    console script to apply schlieren routine to xyp files
    """
    usage = """ %prog [options] initial.xyp final.xyp
            for 'xz' images
        %prog [options] timeseries.xyp
            for 'tz' images"""
    parser = OptionParser(usage=usage, 
             version = "%prog (igwtools " + __version__ + ")")
    parser.add_option("-d", action = "store_true", dest = "display",
                      help = "displays images for debugging purposes")
    parser.add_option("-v", "--verbose", 
                      action = "store_true", dest = "verbose",
                      help = "be verbose and output settings used")
    parser.add_option("-T", "--mintol", 
                      dest="mintol", type="float", default = 10,
                      help = "Minimum intensity difference between pixels for computation [default: %default]")
    parser.add_option("--fast", action="store_true", default=False,
                      help = "use experimental 'fast' getdelz calculation")
    parser.add_option("-t", "--dt", type="float",
                      help = "Time difference between frames")
    parser.add_option("-n", "--ndt", type="int", default = 2,
                      help = "Take time difference between how many pixels [default : %default]")
    parser.add_option("-s", "--sigma", dest="sigma", 
                      type="float", default=0.3,
                      help = "Distance over which to fill in uncomputed points [default: %default]")
    parser.add_option("-M", "--mode", default="dn2t",
                      help = "Schlieren mode (dz, dzt, dn2, dn2t) [default: %default]")
    parser.add_option("-o", "--outputfile",
                      help = "Name of output file (output saved only if supplied)")
    parser.add_option("-f", "--force", action="store_true",
                      help = "Force overwriting of anyexisting view")

    parser.add_option("--nofill", action = "store_true",
                      help = "do not fill-in values that were not computed by interpolation by Gaussian average")
    parser.add_option("--nosmooth", action = "store_true",
                      help = "do not smooth with uniform average filter")

    parser.add_option("--Ls", type="float", default = 24.0,
            help = "Distance from back of tank to screen [default: %default]")
    parser.add_option("--Lt", type="float", default = 17.5,
            help = "Width of tank [default: %default]")
    parser.add_option("--Lp", type="float", default = 1.0,
            help = "Width of tank walls [default: %default]")
    parser.add_option("--Lc", type="float", default = 330.0,
            help = "Distance from fron of tank to camera [default: %default]")


    # look up defaults from environment
    default_database = os.getenv("IGWDB")
    default_experiment = os.getenv("IGWEXPT")

    #OptionGroup here
    parser.add_option("-D", "--database", default= default_database,
                       help = "Database name [default: %default]")
    parser.add_option("-e", "--experiment", default=default_experiment,
                       help = "Experiment name [default: %default]")
    parser.add_option("-L", "--load",
                      help = "load input from view")
    parser.add_option("-S", "--save",
                      help = "save results as view")

    (options, args) = parser.parse_args()

    # Infer whether input is xz or tz images based on number of arguments
    if len(args) == 1: # timeseries.xyp
        options.imagetype = "tz"
    elif len(args) == 2: # initial.xyp final.xyp
        options.imagetype = "xz"
    elif options.load != None:
        # load experiment
        if options.experiment[0] != '/':
            options.experiment = '/' + options.experiment
        h5file = tables.openFile(options.database, mode="r")
        valid_expt = h5file.__contains__(options.experiment)
        h5file.close()
        if valid_expt:
            expt = tools.Experiment(options.database, options.experiment)
        else:
            print options.experiment, "not found in", options.database
            return
        field = expt.load_view(options.load)

        if field is None:
            print "View", options.load, "not found!"

        field = schlieren_field(field, options=options)

        if options.save != None:
            if options.verbose:
                print "saving view..."
            expt.save_view(field, options.save, force=options.force)
        else:
            print "use --save option to store result!"

        expt.close()

        return
    else:
        parser.error("incorrect number of arguments")

    if options.mode not in ['dz', 'dzt', 'dn2', 'dn2t', 'qualitative']:
        parser.error("invalid schlieren mode: %s" % options.mode)
 
    import xplot
    # read in data from xyp files
    if options.verbose:
        print 'reading in XYP files...'
    if options.imagetype == "xz":
        reference_image = xplot.readXYplot(args[0], orientation='xz')
        image = xplot.readXYplot(args[1], orientation='xz')
        options.dxt = image.dx
    else: # tz
        image = xplot.readXYplot(args[0], orientation='tz')
        options.dxt = image.dt
        print image.shape

    options.xmin = image.xmin
    options.xmax = image.xmax
    options.zmin = image.zmn
    options.zmax = image.zmax
    options.tmin = image.tmin
    options.tmax = image.tmax

    if ((options.mode == 'dzt') or (options.mode == 'dn2t')):
        if options.imagetype == 'x':
            if options.dt == None:
                parser.error("mode %s and 'xz' images requires --dt option" % \
                             options.mode)
        else:
            options.dt = options.ndt * image.dt

    if options.imagetype == "xz":
        output = schlieren(reference_image = reference_image, 
                           image = image, options = options)
    else:
        output = schlieren(image=image, options=options)

    # save output
    if(options.outputfile != None):
        if options.verbose:
            print ('saving results in xyp file')
        output.save(options.outputfile)

    if options.display:
        # show results
        vmax = 2.5*numpy.std(output)
        if options.verbose:
            print 'plotting result'
            print '    vmin = %.3f' % -vmax, 'vmax = %.3f' % vmax
        pylab.figure()
        output.plot(interpolation = 'bicubic', vmin=-vmax, vmax=vmax)
        pylab.colorbar()
        pylab.jet()
        pylab.title(options.mode)

        pylab.show()
예제 #34
0
SW = np.random.permutation(X.shape[1])
X = X[:, SW]  # permute pixels

#D = np.array([np.concatenate((pixel, pixel[i%2:-i%2 or None], pixel[(i+1)%2:(i+1)%2 or None])) for i,pixel in enumerate(X.T)])
D = X.T

for metric in [
        'abs_correlation', 'braycurtis', 'canberra', 'correlation', 'cosine',
        'minkowski', 'seuclidean'
]:
    for n_neighbors in [4, 5, 6]:
        pl.figure()
        results = isomap(D, n_neighbors, metric=metric)
        x, y = results.real[:, 0], results.real[:, 1]
        pl.scatter(x, y, c=np.arange(X.shape[1])[SW], cmap=pl.jet())
        pl.title("%s n_neighbors %d" % (metric, n_neighbors))
        #pl.savefig("%s.n_neighbors.%d.png" % (metric, n_neighbors))
        pl.show()

exit()
print "Saving restored frames..."

name = "movie"
if not os.path.exists(name): os.mkdir(name)
for i in range(len(X)):
    pl.figure(figsize=(2, 2))
    pl.hexbin(x, y, gridsize=50, C=X[i], cmap=pl.gray())
    pl.savefig("%s/frame.%04d.png" % (name, i))
    pl.close()
    print ".",
T = mahotas.thresholding.otsu(dna)
pylab.imshow(dna > T)
pylab.show()
# Results in numpy array of bools -> b/w image

# Smooth image using gaussian filter
dnaf = ndimage.gaussian_filter(dna, 8) # -- Image and stdev of image
T = mahotas.thresholding.otsu(dnaf)
pylab.imshow(dnaf > T)
pylab.show()

# Deal with merged/touching nuclei
labeled,nr_objects = ndimage.label(dnaf > T)
print nr_objects # prints 18
pylab.imshow(labeled)
pylab.jet()	# resets to jet from gray-scale
pylab.show()

############ STEP TWO -- Segmenting Image/Finding seeds
# Smooth image->find regional maxima->use maxima as seeds for watershed

# First try:
dnaf = ndimage.gaussian_filter(dna, 8)
rmax = pymorph.regmax(dnaf)
pylab.imshow(pymorph.overlay(dna, rmax)) # Overlay returns a color image with gray level component in first argument, second arg is red
pylab.show()

# Second try - Increase sigma:
dnaf = ndimage.gaussian_filter(dna, 16)
rmax = pymorph.regmax(dnaf)
pylab.imshow(pymorph.overlay(dna, rmax))
예제 #36
0
            i += 1
    ltrs_uly.sort()

    if '-v' in sys.argv:
        print voca_ham
        print voca_uly

    if '--norm' in sys.argv:
        voca_ham = voca_ham / voca_ham.max()
        voca_uly = voca_uly / voca_uly.max()

    import pylab
        
    fig_cnt += 1
    pylab.figure(fig_cnt)
    pylab.jet()
    pylab.imshow(voca_ham, interpolation='none')
    pylab.yticks(numpy.arange(len(ltrs_ham)), ltrs_ham)
    pylab.xlabel(r'Radix Tree Depth')
    pylab.ylabel(r'Root letter of Radix Tree')
    pylab.title(r'Radix Tree Breadth of HAMLET')
    pylab.colorbar()
    pylab.savefig('./plots/radix_tree_hamlet.jpg')
    pylab.savefig('./plots/radix_tree_hamlet.pdf')
    
    fig_cnt += 1
    pylab.figure(fig_cnt)
    pylab.jet()
    pylab.imshow(voca_uly, interpolation='none')
    pylab.yticks(numpy.arange(len(ltrs_uly)), ltrs_uly)
    pylab.xlabel(r'Radix Tree Depth')
예제 #37
0
def update(testdir=None, seed=2):
    '''
    Update positioner to fiber number mapping from DocDB

    Options:
        testdir: if not None, write files here instead of
            $DESIMODEL/data/footprint/fiberpos*
        seed: integer random number seed for randomization within a cartridge

    Writes testdir/fiberpos* or $DESIMODEL/data/focalplane/fiberpos*
    '''
    from desiutil.log import get_logger
    log = get_logger()

    #- Download input files from DocDB
    cassette_file = docdb.download(2721, 2, 'cassette_order.txt')
    xls_fp_layout = docdb.download(530, 11, 'DESI-0530-v11 (Focal Plane Layout).xlsx')
    platescale_file = docdb.download(329, 15, 'Echo22Platescale.txt')

    #- Pick filenames in output directory
    if testdir is None:
        outdir = os.path.join(datadir(), 'focalplane')
    else:
        outdir = testdir

    if not os.path.isdir(outdir):
        raise ValueError("Missing directory {}".format(testdir))

    #- copy platescale file
    outpsfile = os.path.join(outdir, 'platescale.txt')
    shutil.copy(platescale_file, outpsfile)
    log.info('Wrote {}'.format(outpsfile))

    #- Random but reproducible
    np.random.seed(seed)

    #- DESI-0530 file name (fn) and sheet name (sn) shortcuts
    fn = xls_fp_layout
    sn = 'PositionerAndFiducialLocations'

    #- Sanity check that columns are still in the same place
    rowmin, rowmax = 48, 590
    headers = docdb.xls_read_row(fn, sn, rowmin-1, 'B', 'S')

    assert headers[0] == 'device_location_id'
    assert headers[1] == 'device_type'
    assert headers[2] == 'X'
    assert headers[3] == 'Y'
    assert headers[4] == 'Z'
    assert headers[8] == 'cassetteID'
    assert headers[15] == 'Q'
    assert headers[17] == 'S'

    #- Read Excel table with device locations
    posloc = Table()
    posloc['DEVICE'] = docdb.xls_read_col(fn, sn, 'B', rowmin, rowmax, dtype=int)
    posloc['DEVICE_TYPE'] = docdb.xls_read_col(fn, sn, 'C', rowmin, rowmax, dtype=str)
    posloc['X'] = docdb.xls_read_col(fn, sn, 'D', rowmin, rowmax, dtype=float)
    posloc['Y'] = docdb.xls_read_col(fn, sn, 'E', rowmin, rowmax, dtype=float)
    posloc['Z'] = docdb.xls_read_col(fn, sn, 'F', rowmin, rowmax, dtype=float)
    posloc['Q'] = docdb.xls_read_col(fn, sn, 'Q', rowmin, rowmax, dtype=float)
    posloc['S'] = docdb.xls_read_col(fn, sn, 'S', rowmin, rowmax, dtype=float)

    #- Cassette N/A -> -1, and parse string -> float -> int
    c = docdb.xls_read_col(fn, sn, 'J', rowmin, rowmax)
    not_spectro_fiber = (c == 'N/A')
    c[not_spectro_fiber] = '-1'
    posloc['CASSETTE'] = np.array(c, dtype=float).astype(int)

    #- Sanity check on values
    ndevice = len(posloc)
    assert ndevice == 543  #- 543 holes have been drilled
    assert len(np.unique(posloc['DEVICE'])) == len(posloc['DEVICE'])
    assert set(posloc['DEVICE_TYPE']) == set(['POS', 'FIF', 'GIF', 'NON', 'OPT', 'ETC'])
    assert 0 < np.min(posloc['X']) and np.max(posloc['X']) < 410
    assert 0 <= np.min(posloc['Q']) and np.max(posloc['Q']) < 36.0
    assert 0 <= np.min(posloc['S']) and np.max(posloc['S']) < 412.3
    assert np.all(posloc['S']**2 > posloc['X']**2 + posloc['Y']**2 + posloc['Z']**2)
    assert np.min(posloc['CASSETTE']) == -1
    assert np.max(posloc['CASSETTE']) == 11
    assert set(posloc['DEVICE_TYPE'][posloc['CASSETTE']==11]) == set(['ETC', 'OPT'])
    assert set(posloc['DEVICE_TYPE'][posloc['CASSETTE']==-1]) == set(['FIF', 'GIF', 'NON'])
    assert 0 not in posloc['CASSETTE']

    #- Read mapping of cassettes on focal plane to fibers on slithead
    colnames = ['fibermin', 'fibermax', 'sp0', 'sp1', 'sp2', 'sp3', 'sp4', 'sp5', 'sp6', 'sp7', 'sp8', 'sp9']
    cassettes = Table.read(cassette_file, format='ascii', names=colnames)

    #- Randomize fibers within a cassette
    petals = list()
    for p in range(10):
        fiberpos = posloc.copy(copy_data=True)
        fiberpos['FIBER'] = -1
        fiberpos['PETAL'] = p
        fiberpos['SLIT'] = p
        fiberpos['SPECTRO'] = p
        iipos = (fiberpos['DEVICE_TYPE'] == 'POS')
        ### fiberpos['device'] += p*len(fiberpos)
        for c in range(1,11):
            ii = (cassettes['sp'+str(p)] == c)
            assert np.count_nonzero(ii) == 1
            fibermin = p*500 + cassettes['fibermin'][ii][0]
            fibermax = p*500 + cassettes['fibermax'][ii][0]

            jj = iipos & (fiberpos['CASSETTE'] == c)
            assert np.count_nonzero(jj) == 50
            fiber = list(range(fibermin, fibermax+1))
            np.random.shuffle(fiber)
            fiberpos['FIBER'][jj] = fiber

        #- Additional columns
        fiberpos['SLITBLOCK'] = (fiberpos['FIBER'] % 500) // 25
        fiberpos['BLOCKFIBER'] = (fiberpos['FIBER'] % 500) % 25
        fiberpos['LOCATION'] = p*1000 + fiberpos['DEVICE']

        #- Petal 0 is at the "bottom"; See DESI-0530
        phi = np.radians((7*36 + 36*p)%360)
        x = np.cos(phi)*fiberpos['X'] - np.sin(phi)*fiberpos['Y']
        y = np.sin(phi)*fiberpos['X'] + np.cos(phi)*fiberpos['Y']
        fiberpos['X'] = x
        fiberpos['Y'] = y

        petals.append(fiberpos)

    fiberpos = vstack(petals)
    fiberpos.sort('FIBER')
    POS = (fiberpos['DEVICE_TYPE'] == 'POS')

    #- devices that don't go to spectrographs don't have slitblock, blockfiber
    fiberpos['SLITBLOCK'][~POS] = -1
    fiberpos['BLOCKFIBER'][~POS] = -1

    #- More sanity checks before writing output
    fp = fiberpos[POS]
    assert len(fp) == 5000
    assert len(np.unique(fp['FIBER'])) == 5000
    assert min(fp['FIBER']) == 0
    assert max(fp['FIBER']) == 4999
    assert len(set(fp['SPECTRO'])) == 10
    assert min(fp['SPECTRO']) == 0
    assert max(fp['SPECTRO']) == 9
    assert len(np.unique(fiberpos['DEVICE'])) == ndevice
    assert len(np.unique(fiberpos['LOCATION'])) == len(fiberpos)

    #- Drop some columns we don't need
    fiberpos.remove_column('CASSETTE')

    #- Update i8 -> i4 for integer columns
    for colname in ['FIBER', 'DEVICE', 'SPECTRO', 'PETAL', 'SLIT']:
        fiberpos.replace_column(colname, fiberpos[colname].astype('i4'))

    #- Reorder columns
    assert set(fiberpos.colnames) == set('DEVICE DEVICE_TYPE X Y Z Q S FIBER PETAL SLIT SPECTRO SLITBLOCK BLOCKFIBER LOCATION'.split())
    colnames = 'PETAL DEVICE DEVICE_TYPE LOCATION FIBER X Y Z Q S  SPECTRO SLIT SLITBLOCK BLOCKFIBER'.split()
    fiberpos = fiberpos[colnames]
    assert fiberpos.colnames == colnames

    #- Set units and descriptions; see DESI-2724
    fiberpos['X'].unit = 'mm'
    fiberpos['Y'].unit = 'mm'
    fiberpos['Z'].unit = 'mm'
    fiberpos['Q'].unit = 'deg'
    fiberpos['S'].unit = 'mm'
    fiberpos['X'].description = 'focal surface location [mm]'
    fiberpos['Y'].description = 'focal surface location [mm]'
    fiberpos['Z'].description = 'focal surface location [mm]'
    fiberpos['Q'].description = 'azimuthal angle on focal surface [deg]'
    fiberpos['S'].description = 'radial distance along focal surface [mm]'
    fiberpos['FIBER'].description = 'fiber number [0-4999]'
    fiberpos['DEVICE'].description = 'focal plane device_loc number [0-542]'
    fiberpos['SPECTRO'].description = 'spectrograph number [0-9]'
    fiberpos['PETAL'].description = 'focal plane petal_loc number [0-9]'
    fiberpos['SLIT'].description = 'spectrograph slit number [0-9]'
    fiberpos['SLITBLOCK'].description = 'id of the slitblock on the slit [0-19]'
    fiberpos['BLOCKFIBER'].description = 'id of the fiber on the slitblock [0-24]'
    fiberpos['LOCATION'].description = 'global location id across entire focal plane [0-9543]; has gaps in sequence'
    fiberpos.meta['comments'] = [
        "Coordinates at zenith: +x = East = +RA; +y = South = -dec",
        "PETAL and DEVICE refer to locations, not hardware serial numbers",
        "Differences from DESI-2724 naming:",
        '  - Drops "_ID" from column names',
        '  - Drops "_LOC" from "DEVICE_LOC" and "PETAL_LOC"',
        "  - SLITBLOCK as int [0-19] instead of string [B0-B19]",
        "  - BLOCKFIBER as int [0-24] instead of string [F0-F24]",
        "Convenience columns:",
        "  - FIBER = PETAL*500 + SLITBLOCK*25 + BLOCKFIBER",
        "  - LOCATION = PETAL*1000 + DEVICE",
    ]

    ecsvout = os.path.join(outdir, 'fiberpos.ecsv')
    textout = os.path.join(outdir, 'fiberpos.txt')
    fitsout = os.path.join(outdir, 'fiberpos.fits')
    pngout  = os.path.join(outdir, 'fiberpos.png')

    #- Write old text format with just fiber, device, spectro, x, y, z
    write_text_fiberpos(textout, fiberpos[POS])
    log.info('Wrote {}'.format(textout))

    #- Write all columns but only for positioners with fibers
    fiberpos[POS].write(ecsvout, format='ascii.ecsv')
    log.info('Wrote {}'.format(ecsvout))

    fiberpos[POS].write(fitsout, format='fits', overwrite=True)
    log.info('Wrote {}'.format(fitsout))

    #- Write all columns and all rows, including
    #- fiducials (device_type='FIF') and sky monitor (device_type='ETC')
    fiberpos.sort('LOCATION')
    fitsallout = fitsout.replace('.fits', '-all.fits')
    ecsvallout = textout.replace('.txt', '-all.ecsv')
    fiberpos.write(fitsallout, format='fits', overwrite=True)
    fiberpos.write(ecsvallout, format='ascii.ecsv')
    log.info('Wrote {}'.format(fitsallout))
    log.info('Wrote {}'.format(ecsvallout))

    #- Visualize mapping
    POS = (fiberpos['DEVICE_TYPE'] == 'POS')
    FIF = (fiberpos['DEVICE_TYPE'] == 'FIF')
    ETC = (fiberpos['DEVICE_TYPE'] == 'ETC')
    import pylab as P
    P.jet()     #- With apologies to viridis
    P.figure(figsize=(7,7))
    P.scatter(fiberpos['X'][POS], fiberpos['Y'][POS], c=fiberpos['FIBER'][POS]%500, edgecolor='none', s=20)
    # P.scatter(fiberpos['x'][FIF], fiberpos['y'][FIF], s=5, color='k')
    # P.plot(fiberpos['x'][ETC], fiberpos['y'][ETC], 'kx', ms=3)
    P.grid(alpha=0.2, color='k')
    P.xlim(-420,420)
    P.ylim(-420,420)
    P.xlabel('x [mm]')
    P.ylabel('y [mm]')
    P.title('Focal plane color coded by fiber location on slithead')
    P.savefig(pngout, dpi=80)
    log.info('Wrote {}'.format(pngout))
예제 #38
0
파일: optimizer.py 프로젝트: rcrowder/Oger
    def plot_results(self,
                     node_param_list=None,
                     vmin=None,
                     vmax=None,
                     cmap=None,
                     log_x=False,
                     axes=None,
                     title=None,
                     plot_variance=True):
        ''' Plot the results of the optimization. 
            
            Works for 1D and 2D linear sweeps, yielding a 2D resp. 3D plot of the parameter(s) vs. the error.
            Arguments:
                - node_param_list: a list of (node, param_string) tuples. Before plotting, the mean will be taken over all these node.param_string combinations, which is useful to plot/reduce multi-dimensional parameter sweeps.
                - vmin/vmax: can be used to truncate the errors between lower and upper bounds before plotting.
                - cmap: passed as a matplotlib colormap when plotting 2D images.
                - log_x: boolean to indicate if a 1D plot should use a log scale for the x-axis.
                - axes: optional Axes object to use for plotting
                - title: optional title for the plot
                - plot_variance: should variance be plotted in case of taking the mean over certain parameters. Default True. 
        '''

        try:
            import pylab
        except ImportError:
            print "It looks like matplotlib isn't installed. Plotting is impossible."
            return

        if axes is None:
            axes = pylab.axes()

        errors_to_plot, var_errors, parameters = self.mean_and_var(
            node_param_list)
        if vmin != None:
            errors_to_plot[errors_to_plot < vmin] = vmin
        if vmax != None:
            errors_to_plot[errors_to_plot > vmax] = vmax

        # If we have ranged over only one parameter
        if len(parameters) == 1:
            # Get the index of the remaining parameter to plot using the correct
            # parameter ranges
            param_index = self.parameters.index(parameters[0])
            if var_errors is not None and plot_variance:
                pylab.errorbar(self.parameter_ranges[param_index],
                               errors_to_plot,
                               var_errors,
                               axes=axes)
            else:
                if log_x:
                    pylab.semilogx(self.parameter_ranges[param_index],
                                   errors_to_plot,
                                   axes=axes)
                else:
                    pylab.plot(self.parameter_ranges[param_index],
                               errors_to_plot,
                               axes=axes)

            pylab.xlabel(str(parameters[0][0]) + '.' + parameters[0][1])
            pylab.ylabel(self.loss_function.__name__)
            if title is not None:
                pylab.title(title)
            pylab.show()
        elif len(parameters) == 2:
            # Get the extreme values of the parameter values
            p1 = self.parameters.index(parameters[0])
            p2 = self.parameters.index(parameters[1])

            xm = mdp.numx.amin(self.parameter_ranges[p1])
            ym = mdp.numx.amin(self.parameter_ranges[p2])

            xM = mdp.numx.amax(self.parameter_ranges[p1])
            yM = mdp.numx.amax(self.parameter_ranges[p2])

            # For optimization algorithms which have non-uniform sampling of the parameter space, we interpolate here
            # This has no effect on the plot for optimizations using gridsearch
            xi = mdp.numx.linspace(xm, xM, len(self.parameter_ranges[p1]))
            yi = mdp.numx.linspace(ym, yM, len(self.parameter_ranges[p2]))
            (x, y) = mdp.numx.meshgrid(self.parameter_ranges[p1],
                                       self.parameter_ranges[p2])

            # Create an interpolation grid
            zi = mdp.numx.fliplr(
                pylab.griddata(x.flatten(), y.flatten(),
                               errors_to_plot.flatten('F'), xi, yi)).T

            pylab.imshow(zi,
                         cmap=pylab.jet(),
                         interpolation='nearest',
                         extent=self.get_extent(parameters),
                         aspect="auto",
                         axes=axes)
            pylab.xlabel(str(parameters[1][0]) + '.' + parameters[1][1])
            pylab.ylabel(str(parameters[0][0]) + '.' + parameters[0][1])
            if title is not None:
                pylab.suptitle(title)
            pylab.colorbar()

            if var_errors is not None and plot_variance:
                pylab.figure()
                pylab.imshow(mdp.numx.flipud(var_errors),
                             cmap=cmap,
                             interpolation='nearest',
                             extent=self.get_extent(parameters),
                             aspect="auto",
                             vmin=vmin,
                             vmax=vmax)
                pylab.xlabel(str(parameters[1][0]) + '.' + parameters[1][1])
                pylab.ylabel(str(parameters[0][0]) + '.' + parameters[0][1])
                pylab.suptitle('variance')
                pylab.colorbar()

            pylab.show()
        else:
            raise Exception("Too many parameter dimensions to plot: " +
                            str(errors_to_plot.ndim))
예제 #39
0
pylab.imshow(dna > T)
pylab.show()

# apply a gaussian filter that smoothen the image
dnaf = mh.gaussian_filter(dna, 8)
dnat = dnaf > T
pylab.gray()
nuclei1 = dnat
pylab.imshow(dnat)
pylab.show()

#labelling thereshold image
labeled, nr_objects = mh.label(dnat)
print nr_objects  # output number of objects
pylab.imshow(labeled)
pylab.jet()  # makes image colourful
pylab.show()

dnaf = mh.gaussian_filter(dnaf, 8)
rmax = mh.regmax(dnaf)
pylab.imshow(mh.overlay(
    dna, rmax))  # print dna and rmax (with second channel in red)
pylab.show()  # seeds only show when image is zoomed in

dnaf = mh.gaussian_filter(dnaf,
                          16)  # apply different filter to yield better result
rmax = mh.regmax(dnaf)
pylab.imshow(mh.overlay(dna, rmax))
pylab.show()

seeds, nr_nuclei = mh.label(rmax)  # nuclei count
예제 #40
0
				# component = sta_small_visual_lin[:,:,b] #STA[:,:,b]
				# pl.figure()
				# im = pl.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
				# #pl.contour(component)
				# pl.colorbar(im)
				# ax = pl.axes()
				# ax.set_yticklabels([])
				# ax.set_xticklabels([])
				# #ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
				# pl.savefig(finalfolder_lin+"/Small_STA-"+str(neurontag)+"-"+str(b)+"-g_.png",format='png', bbox_inches='tight')
				# del component

		print 'Saving mean image in lineal scale...'
		pl.figure()
		im = pl.pcolormesh(MEANSTA_lin,vmin = 0,vmax = 255, cmap=cm.jet)
		pl.jet()
		pl.colorbar(im)
		ax = pl.axes()
		ax.set_yticklabels([])
		ax.set_xticklabels([])
		#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
		pl.savefig(finalfolder_lin+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')
		# del ax

		print 'CELL ' + timestampName + ' FINISHED!!!'
		print '-----------------------END---------------------------'
		
		del STA_desp 
		del STA 
		del stavisual_lin 
		del spikeframe_matrix
예제 #41
0
def main(infile):
    img = mahotas.imread(infile)
    infile = os.path.splitext(
        os.path.basename(infile))[0]
    blue_component = img[:,:,B]

    pylab.gray()
    k = 3
    f = ndimage.gaussian_filter(blue_component, 12)
    if DEBUG:
        print 'Procesando imagen %s usando canal azul' % (infile)
        mahotas.imsave('00%s-input.jpg' % infile, f)

    clustered = segment_kmeans(f, k)
    clustered[clustered == k-1] = 0
    mask = ndimage.binary_fill_holes(clustered)

    if DEBUG:
        print 'Segmentacion inicial k-means con k=%d' % k
        mahotas.imsave('01kmeans1%s.jpg' % infile, mask)

    masked = f * mask

    if DEBUG:
        mahotas.imsave('02masked%s.jpg' % infile, masked)

    k = 4
    clustered2 = segment_kmeans(masked, k)

    if DEBUG:
        print 'Resegmentacion k-means con k=%d' % k
        mahotas.imsave('03kmeans2%s.jpg' % infile, clustered2)

    clustered2[(clustered2 != k-2)] = 0
    clustered2[(clustered2 == k-2)] = 1

    if DEBUG:
        mahotas.imsave('04kmeans2binary%s.jpg' % infile, clustered2)

    clustered2 = ndimage.binary_fill_holes(clustered2)

    labeled, _  = mahotas.label(mask)
    if DEBUG:
        print 'Etiquetando imagen segmentacion inicial k-means'
        mahotas.imsave('05kmeans2noholes%s.jpg' % infile, clustered2)
        mahotas.imsave('06kmeans2labeled%s.jpg' % infile, labeled)
        while True:
            min_max = raw_input('label1 min,max? ')
            try:
                min_max = min_max.strip().split(',')
                min_ = int(min_max[0])
                max_ = int(min_max[1])
            except:
                break
            labeled1 = remove_by_size(labeled, min_, max_)
            mahotas.imsave('07labeled1f%d,%d%s.jpg' % (min_, max_, infile), labeled1)

    labeled, _  = mahotas.label(clustered2)
    labeled2 = remove_by_size(labeled, 1600, 23000)

    if DEBUG:
        print 'Etiquetando imagen re-segmentacion k-means'
        mahotas.imsave('08labeled2f%s.jpg' % infile, labeled)
        mahotas.imsave('09labeled2f%s.jpg' % infile, labeled2)

    combined = labeled1 + labeled2

    labeled_to_binary(combined)
    if DEBUG:
        mahotas.imsave('10combined%s.jpg' % infile, combined)

    combined = ndimage.binary_fill_holes(combined)

    if DEBUG:
        mahotas.imsave('11combined_noholes%s.jpg' % infile, combined)

    borders = mahotas.labeled.borders(mahotas.label(combined)[0])

    cells = f * combined
    cells[cells == 0] = 255

    if DEBUG:
        mahotas.imsave('12maskedcellsw%s.jpg' % infile, cells)

    rmin = mahotas.regmin(cells)
    seeds, nr_nuclei = mahotas.label(rmin)

    if DEBUG:
        mahotas.imsave(
            '13gscale-final%s.jpg' % infile,
            pymorph.overlay(blue_component, rmin,
                        borders)
        )

    img2 = np.copy(img)
    img2[borders] = [0,0,0]
    img2[rmin] = [5,250,42]
    mahotas.imsave('14%s-outputcells.jpg' % (infile), img2)

    #watershed
    gradient = ndimage.morphology.morphological_gradient(combined, size=(3,3))
    gradient = gradient.astype(np.uint8)
    if DEBUG:
        print 'Watershed'
        mahotas.imsave('15%s-gradient.jpg' % infile, gradient)
    wshed, lines = mahotas.cwatershed(gradient, seeds, return_lines=True)


    pylab.jet()

    if DEBUG:
        mahotas.imsave('16wshed.jpg', wshed)

    ncells =  len(np.unique(wshed)) - 1
    print '%d cells.' % ncells
    borders = mahotas.labeled.borders(wshed)

    img[borders] = [0,0,0]
    img[rmin] = [5,250,42]
    mahotas.imsave('17%s-output-%dcells.jpg' % (infile, ncells), img)
예제 #42
0
def calculaSTA(args):
	start, finish = args
	if finish > endUnit:
		finish = endUnit

	for kunit in range(start,finish):
		timestampName = per_row[kunit]
		if characterization[kunit] > 0:
			print 'Analysing Unit ',timestampName #, ' loop :', c ,' unit n ', c + startUnit
			#--------------------------------------------------------
			# get spike time stamps from file 
			#--------------------------------------------------------
			neurontag = timestampName # tag or number of cell
			rastercelulatxtfile = timefolder + timestampName +'.txt'
			timestamps = npy.loadtxt(rastercelulatxtfile) # text file containing time spikes in datapoints
			neuronresultfolder_lin = str(neurontag)+'_lineal'
			try:
			  os.mkdir( outputFolder+neuronresultfolder_lin ) # create the folder
			except OSError:
			  pass		
			finalfolder_lin = outputFolder+neuronresultfolder_lin
			#print 'size time stamps vector: ', len(timestamps) #, 'x',len(timestamps[0])
			#--------------------------------------------------------
			# get time spikes depending of the stimulus start (frame do not start in time=0)
			#--------------------------------------------------------
			#--------------------------------------------------------
			# Conversion of spike times from seconds to POINTS:
			#--------------------------------------------------------
			vector_spikes = timestamps[:]*samplingRate # without first id zero column (1 COLUMMN)
			#vector_spikes = timestamps[:] # without first id zero column (1 COLUMMN)
			stimei = []  # initialize time spike index depending of image time
			spikeframe_matrix = npy.zeros( (len(vector_spikes), 4) ) # [spike time, frame id, ini time frame, end time frame]
			#--------------------------------------------------------
			# convert stimes (SPIKE TIMES) to frame indexes (image index):
			#--------------------------------------------------------
			primer_frame = 0
			frame_ant = 0
			#print 'Get the spike triggered stimuli indices: \n'
			contator = 0
			contator2 = 0
			totalcont = len(vector_spikes) * len(range(primer_frame, lenSyncFile))
			for punto_spike in vector_spikes:
				# WTF is this?
				condicion = 1			
				for i in range(primer_frame, lenSyncFile):
					if (vector_inicio_frame[i] < punto_spike) & (punto_spike <= vector_fin_frame[i]):
						# if the spike time is into a frame time points (start and ends)
						spikeframe_matrix[contator,0] = punto_spike
						spikeframe_matrix[contator,1] = vector_fin_frame[i]
						spikeframe_matrix[contator,2] = inicio_fin_frame[i,0]
						spikeframe_matrix[contator,3] = inicio_fin_frame[i,1]
						stimei.append(i)
						frame_ant = i
						break
				contator += 1
				# WTF is this?  Comentario idiota
				sys.stdout.write("\r%d%%" %contator2)
				sys.stdout.flush()			
				contator2 = contator * 100 // ( 1.0 * len(vector_spikes) )		
				primer_frame = frame_ant
			#print '\n'	
			# WTF?
			limite3 = len(stimei)
			print "Nro de frames: ", limite3
			#print 'length frames times vector', lenSyncFile
			#print "length time stamps vector: ", len(timestamps)
			#print "length spike triggered stimuli time i vector: ", len(stimei)
			#--------------------------------------------------------
			# STA Algorithm
			#--------------------------------------------------------
	
			#------------------- ALGORITHM TYPE 1----------------------
			if(tipoalgoritmo == 1):
				sta_1()	
	
			#------------------- ALGORITHM TYPE 2----------------------
			if(tipoalgoritmo == 2): # sequentially algorithm
				sta_2()
			dosmall = 0
	
			#------------------- ALGORITHM TYPE 3----------------------
			if(tipoalgoritmo == 3): # LOAD CHUNKS OF FRAMES AND CALCULATES THE STA SEQUENTIALLY
				sta_3()
				
			#===============================================================================
			#------------------- ALGORITHM TYPE 4----------------------
			if(tipoalgoritmo == 4): # LOAD entire matrix stimuli AND CALCULATES THE STA SEQUENTIALLY
				STA , stavisual_lin , MEANSTA_lin, STA_desp, acumula = sta_4(stimei)
				
			#----------------------------------------------------
			# save spike time stamp and frame index
			#----------------------------------------------------
			spikeframe_matrix_array =  npy.array(spikeframe_matrix)
			spikeframe_filename = "spikeframe_matrix"+str(neurontag)
			#print "Save spike frame matrix as mat file: ",spikeframe_filename
			scipy.io.savemat(finalfolder_lin+'/'+spikeframe_filename+'.mat',mdict={'spikeframe_matrix':spikeframe_matrix_array},oned_as='column')
	
			#----------------------------------------------------
			# save true STA matrix (NON SCALED for visual plot)
			#----------------------------------------------------
			STA_array = npy.array(STA)
			cadena_texto = "sta_array_"+str(neurontag)
			#print "Saving NON rescaled STA as mat file: ",cadena_texto
			scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STA_array':STA_array},oned_as='column')
			
			#----------------------------------------------------
			# save visual STA matrix ( RE SCALED for visual plot)
			#----------------------------------------------------
			stavisual_lin_array = npy.array(stavisual_lin)
			cadena_texto = "stavisual_lin_array_"+str(neurontag)
			#print "Saving visual STA (lineal) as mat file: ",cadena_texto
			scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STAarray_lin':stavisual_lin_array},oned_as='column')
	
			#print 'Saving images in lineal scale...'
			
			plt.clf()
			fig = plt.figure(1, figsize=(12,10))
			
			ax = fig.add_subplot(3,6,1)
			component = stavisual_lin[:,:,0]
			ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
			ax.set_yticklabels([])
			ax.set_xticklabels([])
			ax.set_aspect(1)
			
			kcontador = 2
			#casep, cambio de 17 a framesNumber-1 para prevenir "out of bounds" al procesar menos de 18+2 frames
			for ksubplot in range(framesNumber-1):
				ax = fig.add_subplot(3,6,kcontador)
				component = stavisual_lin[:,:,kcontador-1]
				ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
				ax.set_aspect(1)
				ax.set_yticklabels([])
				ax.set_xticklabels([])
				kcontador+=1
			
			plt.savefig(finalfolder_lin+"/STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
			plt.savefig(outputFolder+"STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
			plt.show()        
			plt.clf()
			plt.close()
			#------------------------------------------------------
	
			#print 'Saving mean image in lineal scale...'
			pl.figure()
			im = pl.pcolormesh(MEANSTA_lin,vmin = 0,vmax = 255, cmap=cm.jet)
			pl.jet()
			pl.colorbar(im)
			ax = pl.axes()
			ax.set_yticklabels([])
			ax.set_xticklabels([])
			pl.savefig(finalfolder_lin+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')
			pl.close()
			print 'CELL ' + timestampName + ' FINISHED!!!'

			del STA_desp 
			del STA 
			del stavisual_lin 
			del spikeframe_matrix
			del acumula
예제 #43
0
#print img_bin,stats.mode(img_bin,axis=None)
#print img_bin,np.max(img_bin)

# binary again
#img_bin = filters.maximum_filter(img_bin,7)
#img_bin = filter.threshold_adaptive(img_bin,7)

#img_bin[img_bin>0]=255
Image.fromarray(uint8(img_bin)).save('feature_points.png')

figure(); gray(); # don't use colors 

# show the two pics  on 1*2 frame
#subplot(1,3,1)   
imshow(img_gray)
#subplot(1,3,2)
figure(); gray(); # don't use colors 
imshow(img_bin)
figure(); gray(); # don't use colors 
imshow(img_bin_words)
#subplot(1,3,3)
#imshow(labeled_array)
#ob = labeled_array[obj_list[100]]
figure(); gray(); # don't use colors 
imshow(labeled_array)

# starts the figure GUI and raises the figure windows

jet()
show()
예제 #44
0
def clickScat(array2d, array3d, xScat=None, xerror3d=None, yerror3d=None, array3d2=None, xerror3d2=None, yerror3d2=None, fn=None, xMap=None, yMap=None, 
    modelError=False, ylimScat=None):
    """
    figureHandles=clickScat(array2d, array3d, xScat=None, xerror3d=None, yerror3d=None, array3d2=None, xerror3d2=None, yerror3d2=None, fn=None, xMap=None, yMap=None):
    xScat: x-axis variables for Scatter Plot. Has to be the same length as last dimension of array3d.shape[2]
    xerror3d: errorbars for x-axis. two sided. 
    fn:'annual'
    """
    import insar
    dateaxis=False;
    if xScat is None:
        xScat=np.r_[0:array3d.shape[2]];
    elif isinstance(xScat[0], P.matplotlib.dates.datetime.date):
        xScat=P.matplotlib.dates.date2num(xScat);
        dateaxis=True;

    def onclick(event):
        P.figure(fh.number);
        P.clf();
        #ax = P.gca()
        #inv = ax.transData.inverted()
        #A=inv.transform((event.x,  event.y))
        #A[1]=np.int(np.round((1-A[1])*array2d.shape[1])) 
        #A[0]=np.int(np.round((A[0])*array2d.shape[0]))
        try:
            y=np.round(event.xdata);
        except:
            return
        x=np.round(event.ydata);        
        #ARRAY MAPPING IS first axis y(rows) and second axis is cols (x)
        if all(np.isnan(array3d[x, y,:])):
            #if there are no points to plot (all nan) then return
            return
        
        #Plot second scatter data.
        if array3d2 is not None:        
            if isinstance(array3d2, list):
                if yerror3d is None:
                    w=np.ones(array3d[x, y,:].shape);
                else:
                    w=basic.rescale(1./yerror3d[x,y,:], [1,2])
                markers=['*','+','s','d','x','v','<','>','^']
                m=0;
                for arr in array3d2:  
                    print ("%d, %d, %d" % (x,y,m))                  
                    P.scatter(xScat, arr[x, y,:], marker=markers[m]);
                    idx=~( np.isnan(arr[x, y,:]) | np.isnan(array3d[x, y,:]))
                    #c=insar.crosscorrelate(basic.nonan(w[idx]*arr[x, y,idx]),basic.nonan(w[idx]*array3d[x, y,idx]))
                    slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w[idx]*arr[x, y,idx]), basic.nonan(w[idx]*array3d[x, y,idx]))
                    P.annotate(str("r2[%s]: %0.2f" % (markers[m],r_value)), (0,0.9-m*0.05), xycoords='axes fraction')                    
                    m=m+1;
            else:            
                if xerror3d2 is None:
                    xerr=None;
                else:
                    xerr=xerror3d2[x,y,:]
                if yerror3d2 is None:
                    yerr=None;
                else:
                    yerr=yerror3d2[x, y,:]
                P.errorbar(xScat,array3d2[x, y,:], xerr=xerr, yerr=yerr, marker='*', fmt='o');

        #Plot function result as scatter data.
        p=None            
        if fn is not None:
            if fn=='linear_amplitude_annual':
                dataMask=~np.isnan(array3d[x, y,:])
                p0=np.array([1,0,0,basic.nonan(array3d[x, y,:]).mean() ])
                fitfun=lambda p: (p[0]+p[1]*xScat[dataMask]/365. )* np.cos(2*np.pi*xScat[dataMask]/365.+p[2]) + p[3]
                xScat2=np.linspace(xScat.min(),xScat.max())
                fitfun2=lambda p: (p[0]+p[1]*xScat2/365.) * np.cos(2*np.pi*xScat2/365.+p[2]) + p[3]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w=np.ones(array3d[x, y,:].shape);
                else:
                    w=basic.rescale(1./yerror3d[x,y,:], [1,2])
                errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p=scipy.optimize.leastsq(errfun, p0);
                p=p[0];
                P.scatter(xScat[dataMask], fitfun(p), marker='^');                
                sortedxy=  np.squeeze(np.dstack([xScat2, fitfun2(p)]));
                sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
                P.plot(sortedxy[:,0], sortedxy[:,1]);
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
                P.annotate(str("a0:%0.2f\na1:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], p[3], r_value**2.)), (0.8,0.8), xycoords='axes fraction')
            elif fn=='quadratic_amplitude_annual':
                dataMask=~np.isnan(array3d[x, y,:])
                p0=np.array([1,0,0,0,basic.nonan(array3d[x, y,:]).mean() ])
                fitfun=lambda p: (p[0]+p[1]*xScat[dataMask]/365.+p[2]*(xScat[dataMask]/365.)**2. )* np.cos(2*np.pi*xScat[dataMask]/365.+p[3]) + p[4]
                xScat2=np.linspace(xScat.min(),xScat.max())
                fitfun2=lambda p: (p[0]+p[1]*xScat2/365.+p[2]*(xScat2/365.)**2.) * np.cos(2*np.pi*xScat2/365.+p[3]) + p[4]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w=np.ones(array3d[x, y,:].shape);
                else:
                    w=basic.rescale(1./yerror3d[x,y,:], [1,2])
                errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p=scipy.optimize.leastsq(errfun, p0);
                p=p[0];
                P.scatter(xScat[dataMask], fitfun(p), marker='^');                
                sortedxy=  np.squeeze(np.dstack([xScat2, fitfun2(p)]));
                sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
                P.plot(sortedxy[:,0], sortedxy[:,1]);
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
                P.annotate(str("a0:%0.2f\na1:%0.2f\na2:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], p[3], p[4], r_value**2.)), (0.8,0.8), xycoords='axes fraction')


            elif fn=='annual':
                dataMask=~np.isnan(array3d[x, y,:])
                p0=np.array([1,1,basic.nonan(array3d[x, y,:]).mean() ])
                fitfun=lambda p: p[0]* np.cos(2*np.pi*xScat[dataMask]/365.+p[1]) + p[2]
                xScat2=np.linspace(xScat.min(),xScat.max())
                fitfun2=lambda p: p[0]* np.cos(2*np.pi*xScat2/365.+p[1]) + p[2]
                #errfun=lambda p: sum(abs(basic.nonan(array3d[x, y,:])-fitfun(p)));
                if yerror3d is None:
                    w=np.ones(array3d[x, y,:].shape);
                else:
                    w=basic.rescale(1./yerror3d[x,y,:], [1,2])
                errfun=lambda p: basic.nonan(w*array3d[x, y,:])-w[dataMask]*fitfun(p);
                #p=scipy.optimize.fmin_powell(errfun, p0)
                p=scipy.optimize.leastsq(errfun, p0);
                p=p[0];
                P.scatter(xScat[dataMask], fitfun(p), marker='^');                
                sortedxy=  np.squeeze(np.dstack([xScat2, fitfun2(p)]));
                sortedxy=sortedxy[sortedxy[:,0].argsort(),:]
                P.plot(sortedxy[:,0], sortedxy[:,1]);
                slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(basic.nonan(w*array3d[x, y,:]),w[dataMask]*fitfun(p))
                P.annotate(str("amp:%0.2f\npha:%0.2f\nbias:%0.2f\nr2:%0.2f" % (p[0], p[1], p[2], r_value**2.)), (0.8,0.8), xycoords='axes fraction')
            else:      
                p=None             
                P.scatter(xScat, fn(xScat), marker='^');       
        #convert axis to date...             
        if dateaxis:
            try:
                P.figure(fh.number).axes[0].xaxis_date(tz=None)
                P.figure(fh.number).autofmt_xdate()
            except:
                pass
        #change x y to xMap, yMap
        if yMap is not None:
            xM=ya*x+yb;
        else:
            xM=x;
        if xMap is not None:
            yM=xa*(y)+xb;
        else:
            yM=y;
        #x and y are flipped in the try/except block above. So Flip again.
        #if p is not None:
        #    P.title("x,y,[]: " + str(yM) + ", " + str(xM) + ', ' + str(p) )
        #else:
        P.title("x,y,z,z.std: " + str(yM) + ", " + str(xM) + ', ' + str(array2d[x,y]) +', ' + str(np.std(basic.nonan(array3d[x, y,:]))) )
        
        # rotate and align the tick labels so they look better
        #P.figure(fh.number).autofmt_xdate()
        # use a more precise date string for the x axis locations in the
        # toolbar
        #P.gca().fmt_xdata = mdates.DateFormatter('%Y-%m-%d')

        if xerror3d is None:
            xerr=None;
        else:
            xerr=xerror3d[x,y,:]
        if yerror3d is None:
            yerr=None;
        else:
            yerr=yerror3d[x, y,:]        
        if modelError:
            yerr=yerror3d[x, y,:] 
            yerr[dataMask]=errfun(p)
            
        P.errorbar(xScat,array3d[x, y,:], xerr=xerr, yerr=yerr, fmt='ro');
        if ylimScat is not None:
            P.ylim(ylimScat);
        ##################################
        ## END OF PLOTTING
        ##################################
        
    s=array2d[~np.isnan(array2d)].std();
    m=array2d[~np.isnan(array2d)].mean();
    fig=P.figure();ax=fig.add_subplot(111);ax.matshow(array2d, vmin=m-s, vmax=m+s);
    #fig=P.figure();ax=fig.add_subplot(111);ax.matshow(basic.wrapToInt(array2d, s), vmin=-s, vmax=s);
    if xMap is not None:
        ticks=ax.get_xticks();
        (xa,xb)=np.polyfit(np.r_[0:len(xMap)],xMap,1)
        ax.set_xticklabels(np.around(xa*ticks+xb,4));
    if yMap is not None:
        ticks=ax.get_yticks();
        (ya,yb)=np.polyfit(np.r_[len(yMap):0:-1],yMap,1)
        ax.set_yticklabels(np.around(ya*ticks+yb,4));
    
    #P.colorbar();
    cax,kw=P.matplotlib.colorbar.make_axes(ax,orientation='vertical')
    P.matplotlib.colorbar.ColorbarBase(cax, cmap=P.jet(),
                                       norm=P.normalize(vmin=m-s,vmax=m+s),
                                       orientation='vertical')
    fh=P.figure(); #should be accessible in child function?
    fig.canvas.mpl_connect('button_press_event', onclick);
    return (fig,fh)
예제 #45
0
data = np.loadtxt('velocities.txt')

# some constants
n = 1000
a = 0
b = int(100)
h = (b - a) / n


def f(t):
    # find our appropriate y values for our x's
    t = int(t)
    return data[t, 1]


bum = (f(a) + f(b)) / 2
glum = 0

for k in range(1, n - 1):
    glum += f(a + k * h)

integral = (glum + bum) * h
print("integral =", integral)

for i in range(0, 100):
    py.plot(data[i, 0], data[i, 1], 'ko')

py.jet()
py.colorbar
py.show()
예제 #46
0
#---------------------------------------
# Matrix of Frequencies
#---------------------------------------
npos = len(target_abego)
posfreq = np.zeros((npos, 20))
#print seq_pos

for pos in range(npos):
    for j, aa in enumerate(aa_list):
        posfreq[pos, j] = seq_pos[pos][aa]

import matplotlib.colors as colors

dim1, dim2 = posfreq.shape
#print posfreq
plt.figure()
plt.pcolor(posfreq, cmap=plt.jet())
plt.xticks(np.arange(0.5, dim2 + 0.5, 1))
plt.yticks(np.arange(0.5, dim1 + 0.5, 1))
plt.gca().set_xticklabels(aa_list)

ylabel = []
for i in range(dim1):
    ylabel.append('%i.%s' % (i, target_abego[i]))

plt.gca().set_yticklabels(ylabel)
plt.colorbar()

plt.show()
예제 #47
0
for i in range(row):
    for k in range(col):

        # distances to point charges
        # r1 coords are (4.5, 5.0)
        # r2 coords are (5.5, 5.0)
        r1 = sqrt((4.5 - i)**2 + (5.0 - k)**2)
        r2 = sqrt((5.5 - i)**2 + (5.0 - k)**2)
        field[i, k] = (q1 * k / r1) + (q2 * k / r2)

part = input(str("Enter which part (a, b, or c): "))

if part in ['a']:
    clf()
    imshow(field, origin='lower')
    jet()
    colorbar()
    show()

if part in ['b']:
    h = 1e-3

    pdiff_x = zeros((row, col))
    pdiff_y = zeros((row, col))

    for i in range(2, 9):
        # derivative of all values in left column two data points to one data point
        pdiff_x[:, i] = ((field[:, i - 2] - field[:, i + 2]) / 12 +
                         (field[:, i + 1] - field[:, i - 1]) / 3) / h

    # central difference derivative
예제 #48
0
    # construct individual nodes
    reservoir = ReservoirNode_lyapunov(input_size, 100, lyapunov_skip=100)
    readout = Oger.nodes.RidgeRegressionNode()

    # build network with MDP framework
    flow = mdp.Flow([reservoir, readout])

    # Nested dictionary
    gridsearch_parameters = {reservoir:{'input_scaling': mdp.numx.arange(0.1, 2.2, .7), 'spectral_radius':mdp.numx.arange(0.1, 2.2, .7)}}

    # Instantiate an optimizer
    opt = Oger.evaluation.Optimizer(gridsearch_parameters, Oger.utils.nrmse)

    # Do the grid search
    opt.grid_search(data, flow, cross_validate_function=Oger.evaluation.train_test_only, training_fraction=.9)

    # Plot the maximal LLE for each parameter setting
    lle_max = mdp.numx.zeros(opt.paramspace_dimensions)
    for i in range(opt.paramspace_dimensions[0]):
        for j in range(opt.paramspace_dimensions[1]):
            lle_max[i, j] = mdp.numx.amax(mdp.numx.mean(mdp.numx.array(opt.probe_data[i, j][reservoir]), 0))

    pylab.figure()
    pylab.imshow(mdp.numx.flipud(lle_max), cmap=pylab.jet(), interpolation='nearest', aspect="auto", extent=opt.get_extent(opt.parameters))
    pylab.ylabel('Spectral Radius')
    pylab.xlabel('Input scaling')
    pylab.suptitle('Max LLE')
    pylab.colorbar()
    pylab.show()
예제 #49
0
def sta_each( getimagenames,openimagesandwrite,calculatemeanrf,tipoalgoritmo,timestampName ,stafolder ,imageruta ,imagefolder ,imagefiltro ,timefolder ,samplingRate ,numberframes ,numberframespost , synchronyfile ,sizex ,sizey, dolog):
	#=============================================
	# GET INPUTS: initial options
	#=============================================

	# # LOAD DE IMAGE NAME LIST WITH THE STIMULUS ENSEMBLE
	# getimagenames = int(sys.argv[1]) #0 

	# # DO TESTS FOR READ AND WRITE IMAGES
	# openimagesandwrite = int(sys.argv[2]) #0 

	# # LOAD ALL THE IMAGES FROM THE STIMULOS ENSEMBLE AND CALCULATE THE MEAN STIMULUS
	# calculatemeanrf = int(sys.argv[3]) #0 

	# # defines how to do the STA process: 
	# # 1 for load all spike triggered stimuli, 2 for sequentially load
	# tipoalgoritmo = int(sys.argv[4]) #2

	# # DEFINE THE NAME OF THE TXT FILE WITH TIME STAMPS FOR LOAD:
	# timestampName = (sys.argv[5]) #C1a 

	# =============================================
	# SET OTHERS OPTIONS
	# =============================================

	# FOLDER NAME TO SAVE EACH FOLDER RESULTS
	# stafolder = 'STA_datos0005'

	# # FOLDER NAME TO LOAD STIMULUS ENSEMBLE: IMAGE STIMULUS FOLDER
	# imageruta = 'D:/'
	# imagefolder = 'checkImages'
	# imagefiltro = '*.png'

	# # SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS
	# timefolder = 'TS_datos0005/'

	# # SET THE ADQUISITION SAMPLING RATE OF THE RECORDS
	# samplingRate = 20000 # Hz

	# # SET THE NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALIZE:
	# # number of frames previous to each spike for STA windows
	# numberframes = 13 
	# # number of frames posterior to each spike for STA windows
	# numberframespost = 5 

	# # SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
	# # IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
	# synchronyfile = 'inicio_fin_frame_datos0005.txt'
	inicio_fin_frame = np.loadtxt(synchronyfile)

	# # SET THE SIZE OF EACH FRAME IN PIXELS
	# sizex = 380 #500 #750
	# sizey = 380 #500 #700

	# # set if do logarithm analysis for plot:
	# dolog = 0

	print '---------------------BEGIN---------------------------'

	#--------------------------------------------------------
	# creates STA result folder
	# if folder "STA" don't exist, create the folder
	#--------------------------------------------------------
	try:
	  os.mkdir( stafolder ) 
	except OSError:
	  pass

	#============================================================
	# Get the image names (optional)
	# This stage is performed the first time of analysis only.
	# For each different stimuli ensemble files, this list has 
	# been created the first time of analysis once.
	#============================================================
	#getimagenames = 0
	if(getimagenames == 1):
		#--------------------------------------------------------
		# get image file names from folder
		#--------------------------------------------------------
		
		#imageruta = '../../VIRTUALWIN/cinv_datos_05-06-2013/'
		#imageruta = '../cinv_datos23-4-2013/'
		#imageruta = '/media/Datos/STA_RVM/Datos 13-11-2013/' #Se aplica el mismo estimulo de la semana anterior
		#imageruta = 'D:\Users\ALIEN3\Desktop/'
		# imageruta = 'D:/'

		# #imagefolder = 'random blanco y negro, cuadrados de 40x40 pixeles'
		# imagefolder = 'checkImages'
		
		# imagefiltro = '*.png'

		globstring     =  imageruta + imagefolder +'/'+ imagefiltro
		
		imagefilenames = glob.glob(globstring) # get file names from folder

		imagefilenames.sort()

		print "\t length imagefilenames: ", len(imagefilenames)
		print "\t last imagefilenames : ",imagefilenames[len(imagefilenames)-1]

		#--------------------------------------------------------
		# save image name list to matlab file
		#--------------------------------------------------------
		ifn = np.array(imagefilenames)
		cadena_texto = "image_filenames"
		print "\t Saving image names as mat file: ",cadena_texto
		scipy.io.savemat(stafolder+'/'+cadena_texto+'.mat',mdict={'ifn':ifn},oned_as='column')
		
		#--------------------------------------------------------
		# save image name list to txt file
		#--------------------------------------------------------
		try:
			print "\t Saving image names as mat txt: ",cadena_texto
			configFile = open(stafolder+'/'+cadena_texto+'.txt', 'w')
			
			for parameter in ifn:
				configFile.write( ''+str(parameter)+' \n' ) 
				
			configFile.close
	  
		except OSError:
			pass	


	#--------------------------------------------------------
	# load image file names list:
	# (the file should exist before)
	#--------------------------------------------------------
	cadena_texto = "image_filenames"
	#print "\t Loading image names from mat file: ",cadena_texto
	contenedor = scipy.io.loadmat(stafolder+'/'+cadena_texto+'.mat')
	ifn2 = contenedor['ifn']
	del contenedor

	# print "\t length imagefilenames: ", len(ifn2 )
	# print "\t last imagefilenames : ",ifn2 [len(ifn2 )-1]
	# print "\n"

	#============================================================
	# Load images and write each image as a vector:
	# This stage is only a test for load and save images from 
	# original stimuli images.
	# The important issue is to load correctly images for the 
	# use of all information of each frame, using grey scale or 
	# RGB or others channels.
	#============================================================
	#openimagesandwrite = 0
	if(openimagesandwrite == 1):

	   stimuli =[]  # initialize stimuli matrix to keep images as vectors
	   
	   contador = 0 # to get a defined number of images (abac)
	   
	   limite = 5   # define the number of images to load using the image file name list
	   
	   imagefilenames = ifn2

	   for line in imagefilenames:
		print '\t Loading stimuli: '+ line # show inline image name
		#--------------------------------------------------------
		# load as gray scale image:
		#--------------------------------------------------------
		imagen = scim.imread(line, flatten = True) # read one image as gray scale

		tamanox = len(imagen)
		tamanoy = len(imagen[0])
		tamanomin = np.min([tamanox,tamanoy])
		
		print '\t size imagen : ', tamanox, 'x',tamanoy
		
		imagenvector = np.zeros( ( 1,tamanomin*tamanomin ) ) # initialize vector for keep image
			
		k = 0
		for i in range(tamanomin): # over image x,y coordinates, for keep image matrix as row vector
		   for j in range(tamanomin):
			  imagenvector[0,k] = imagen[i,j] # keep one image pixel as one vector element
			  k = k+1 	# for next element
			  
		stimuli.append( imagenvector ) # add image row vector and a group in a big matrix [MxN]

		saveplot = 1
		
		if saveplot:
			
			pl.figure()
			im = pl.imshow(imagen,interpolation = 'none')
			pl.gray() #pl.hot()
			#pl.clim(mini,maxi)
			pl.contour(imagen)
			pl.colorbar(im)
			ax = pl.axes()
			ax.set_yticklabels([])
			ax.set_xticklabels([])
			#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
			pl.savefig(stafolder+"/gray_frame-"+str(contador)+"-g.png",format='png', bbox_inches='tight')

			contador = contador + 1 # counting images of the ensemble
			
		if contador==limite: 
				break


	#============================================================
	# Calculate the mean frame image for the entire stimuli 
	# ensemble. This calculation can be performed in one big load
	# or sequentially.
	#============================================================
	#calculatemeanrf = 0
	if(calculatemeanrf==1):
		#---------------------------------------------------
		# get all images and calculate the mean frame image
		#---------------------------------------------------
		
		limite2 = len(ifn)-1 #100000
		
		contador = 0
		
		#tamanomin = 700
		
		imagenvector = np.zeros( ( 1 ,tamanomin*tamanomin ) )

		vectoracumula = imagenvector
		
		#tamanox = 700
		#tamanoy = 750
		
		imagenacumula = np.zeros( ( tamanox , tamanoy ) )

		for line in imagefilenames:
			print 'load id: ', contador #,' Loading stimuli: '+line # show inline image name
			#--------------------------------------------------------
			# load as gray scale image:
			#--------------------------------------------------------
			imagen = scim.imread(line, flatten=True) # read one image as grayscale

			imagenacumula = imagenacumula + imagen
			
			#stimuli.append( imagenvector ) # add image row vector and add to bigmatrix [MxN]

			contador = contador + 1 # counting images of the ensemble
			if contador==limite2: 
					break

		meanimage = imagenacumula // (limite2*1.0)

		meanimagearray = np.array(meanimage)
		
		cadena_texto = "mean_image"
		print "\t Saving mean image as mat file: ",cadena_texto
		scipy.io.savemat(stafolder+'/'+cadena_texto+'.mat',mdict ={'meanimagearray':meanimagearray},oned_as='column')

		#--------------------------------------------------------
		# save mean frame as image:
		#--------------------------------------------------------
		pl.figure()
		#im = pl.imshow(component,interpolation='bicubic')
		im = pl.imshow(meanimage,interpolation = 'none')
		pl.gray() #pl.hot()
		#pl.clim(mini,maxi)
		pl.contour(meanimage)
		pl.colorbar(im)
		ax = pl.axes()
		ax.set_yticklabels([])
		ax.set_xticklabels([])
		#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
		pl.savefig(stafolder+"/mean_gray_frame-g.png",format='png', bbox_inches='tight')


	#============================================================
	# DO STA
	#============================================================

	#--------------------------------------------------------
	# get spike time stamps from file 
	#--------------------------------------------------------

	#timefolder = 'ss_dMCD_fs20_nb1218_f2_CINV-25-06-2013-RANDOM-1/'
	# timefolder = 'TS_datos0001/'

	# nb = 1218 # number of blocks
	# e  = 15 # 10 #9 #8 # number of electrode
	# nc = 0 # 2 #0 # number of cluster

	#timestampName = 'M10a'

	#neurontag = 'e_'+timestampName # tag or number of cell
	neurontag = timestampName # tag or number of cell

	rastercelulatxtfile = timefolder + timestampName +'.txt'
	#rastercelulatxtfile = timestampName
	
	timestamps = np.loadtxt(rastercelulatxtfile) # text file containing time spikes in datapoints

	#limite3 = 820 #37928 #218 #800 # limit number of spikes to analize

	# neuronresultfolder_lin = 'cell_'+str(neurontag)+'_lineal'
	# neuronresultfolder_log = 'cell_'+str(neurontag)+'_log'
	neuronresultfolder_lin = str(neurontag)+'_lineal'
	neuronresultfolder_log = str(neurontag)+'_log'

	# CREATE SUBFOLDERS
	try:
	  os.mkdir( stafolder+'/'+neuronresultfolder_lin ) # create the folder
	except OSError:
	  pass
	  
	if dolog ==1:
		try:
		  os.mkdir( stafolder+'/'+neuronresultfolder_log ) # create the folder
		except OSError:
		  pass
	  
	finalfolder_lin = stafolder+'/'+neuronresultfolder_lin
	finalfolder_log = stafolder+'/'+neuronresultfolder_log

	#rastercelulatxtfile = timefolder+'isi_spktimes_nb'+str(nb)+'_e'+str(e)+'_nc'+str(nc)+'.txt'
	#rastercelulatxtfile = timefolder+'isi_spktimes_nb90_e17_nc0.txt'
	#rastercelulatxtfile = timefolder+'F1a.txt'
	#timestamps = np.loadtxt(rastercelulatxtfile) # text file containing time spikes in datapoints

	print 'size time stamps vector: ', len(timestamps) #, 'x',len(timestamps[0])

	#--------------------------------------------------------
	# set useful parameters
	#--------------------------------------------------------
	# samplingRate = 20000 #20000 Hz

	# #frameinicio = 31199 #31855-671+2 #1.00385e6 # points, inicio de la presentacion de frames 
	# #frameinicioms = 5.01925e4 # ms

	# #frameduracion = 334*2 #671 #668 #350 # points = (1.00455-1.0042)*10^6
	# #frameduracionms = 17.5 # ms 

	# numberframes = 13 # number of frames previous to each spike for STA windows
	# numberframespost = 5 # number of frames posterior to each spike for STA windows

	# inicio_fin_frame = np.loadtxt('inicio_fin_frame_datos0001.txt')
	#print (inicio_fin_frame)

	#--------------------------------------------------------
	# get time spikes depending of the stimulus start (frame do not start in time=0)
	#--------------------------------------------------------
	#timestamps2 = timestamps[timestamps[:,1]>frameinicio*2,1]
	#print 'size timestamps2: ', len(timestamps2)
	#stimes = (timestamps[:,1]-timeoffset)**(samplingRate/(1000*1.0)) # time spikes (second column of data) as vector

	#--------------------------------------------------------
	# Conversion from microseconds to seconds
	#--------------------------------------------------------
	#timestamps2 = timestamps[:,1]/1000000
	#print timestamps2[100,1]

	#--------------------------------------------------------
	# Conversion of spike times from seconds to POINTS:
	#--------------------------------------------------------
	#vector_spikes = timestamps[:,1]*samplingRate
	vector_spikes = timestamps[:]*samplingRate # without first id zero column (1 COLUMMN)
	#vector_spikes = timestamps[:,1]

	#print vector_spikes

	#punto_spike = timestamps[:,1]

	#print inicio_fin_frame[:,0]

	vector_fin_frame = inicio_fin_frame[:,1]
	vector_inicio_frame = inicio_fin_frame[:,0]

	# initialize time spike index depending of image time
	stimei = [] 

	spikeframe_matrix = np.zeros( (len(vector_spikes), 4) ) # [spike time, frame id, ini time frame, end time frame]

	#--------------------------------------------------------
	# convert stimes (SPIKE TIMES) to frame indexes (image index):
	#--------------------------------------------------------

	primer_frame = 0
	frame_ant = 0

	print 'first spike pos: ',vector_spikes[0],' last spike pos: ',vector_spikes[-1]
	print 'first frame begin at: ',inicio_fin_frame[0,0],' last frame ends at: ',inicio_fin_frame[-1,1]
	#print 'final: ',len(vector_spikes),' x ',len(vector_fin_frame),' = ',len(vector_spikes) * len(vector_fin_frame) 

	print 'Get the spike triggered stimuli indices: \n'

	contator = 0
	contator2 = 0
	totalcont = len(vector_spikes) * len(range(primer_frame, len(vector_fin_frame)))
	for punto_spike in vector_spikes:
		for i in range(primer_frame, len(vector_fin_frame)):
			if (vector_inicio_frame[i] < punto_spike) & (punto_spike <= vector_fin_frame[i]):
				# if the spike time is into a frame time points (start and ends)
				
				#print '\npunto de inicio de frame:\t',inicio_fin_frame[i,0]
				#print 'punto de spike:\t\t\t',punto_spike
				#print 'punto de final de frame:\t',inicio_fin_frame[i,1]
				spikeframe_matrix[contator,0] = punto_spike
				spikeframe_matrix[contator,1] = vector_fin_frame[i]
				spikeframe_matrix[contator,2] = inicio_fin_frame[i,0]
				spikeframe_matrix[contator,3] = inicio_fin_frame[i,1]
				
				#print '\r punto_spike: ',punto_spike, ' i: ',i
				stimei.append(i)
				frame_ant = i

				#contator2 = contator * 100 // (1.0 * len(vector_spikes) * len(vector_fin_frame) )
				#time.sleep(1)
				#sys.stdout.write("\r%d%%" %contator2, "%d%%" %contator)    # or print >> sys.stdout, "\r%d%%" %i,
				sys.stdout.write("\r%d%%" %contator2)
				sys.stdout.flush()
				
		contator = contator + 1 #
		#contator2 = contator * 100 // (1.0 * len(vector_spikes) * len(vector_fin_frame) )/47*100
		contator2 = contator * 100 // ( 1.0 * len(vector_spikes) )
			
				
		primer_frame = frame_ant
		#print primer_frame

	print '\n'	
	limite3 = len(stimei)
	print 'length frames times vector', len(vector_fin_frame)
	print "length time stamps vector: ", len(timestamps)
	print "length spike triggered stimuli time i vector: ", len(stimei)

	#puntos = 10
	#print " timestamps: ", (timestamps[0:puntos,1])
	#print " stimei: ", (stimei[0:puntos])

	#--------------------------------------------------------
	# load mean frame from mat file:
	#--------------------------------------------------------
	cadena_texto = "mean_image"
	#print "\t Loading mean frame from mat file: ",cadena_texto
	contenedor = scipy.io.loadmat(stafolder+'/'+cadena_texto+'.mat')
	meanimagearray = contenedor['meanimagearray']
	del contenedor

	#--------------------------------------------------------
	# STA Algorithm
	#--------------------------------------------------------
	#tipoalgoritmo = 2

	#------------------- ALGORITHM TYPE 1----------------------
	# LOAD ALL THE FRAMES ACCORDING TO THE TIME STAMPS OF THE CELL
	# NOT FUNCTIONAL ANYMORE
	if(tipoalgoritmo == 1):
		limite3 = len(stimei)
		kframe = 0
		spk = np.zeros((500,500,numberframes,limite3))

		for kiter in range(limite3):
			kframe = stimei[kiter]
		
			for b in range(numberframes):
				print ' kiter: ',kiter, ' kframe: ',kframe, ' b: ',b
				line = ifn2[kframe-(numberframes-1)+ b ]
				imagen = scim.imread(line, flatten=True)
				spk[:,:,b,kiter] = imagen - meanimagearray

		N = len(stimei)
		STA = ( np.add.reduce(spk,axis=3) / (1.0 * N) ) 
		#sta = sum(spk, axis=1 (over column) ) / (1*Number_Spikes)
		MEANSTA = ( np.add.reduce(STA,axis=2) / (1.0 * numberframes) )

	#------------------- ALGORITHM TYPE 2----------------------
	# LOAD EACH FRAME AND CALCULATES THE STA SEQUENTIALLY
	if(tipoalgoritmo == 2): # sequentially algorithm

		kframe = 0
		
		# sizex = 380 #500 #750
		# sizey = 380 #500 #700
		
		acumula = np.zeros((sizex,sizey,numberframes+numberframespost))

		#timeProcessIni = time.time()

		print 'Get the spike triggered stimuli: \n '
		for kiter in range(limite3):
			timeProcessIni = time.time()
			
			kframe = stimei[kiter]
			
			for b in range(numberframes+numberframespost):
				#print 'b ',b
				line = ifn2[kframe-(numberframes-1)+ b]
				imagen = scim.imread( line, flatten=True )
				#spk[:,:,b,kiter] = imagen
				acumula[:,:,b] = acumula[:,:,b] + (imagen - meanimagearray)
				
			if kiter > len(stimei):
				break
				
			timeProcessFin = time.time() 
			tiempoDiferencia = timeProcessFin - timeProcessIni
			#print '\r kiter: ',kiter, ' kframe: ',kframe, '  ',"%.2f" % ((kiter+1)*100.0/limite3, ), ' % ' , (limite3 -(kiter+0)) * tiempoDiferencia/60, 'min'

			#sys.stdout.write("\r%d%%" %contator2)   
			#sys.stdout.write("\r%d%% %d%%" %((kiter+1)*100.0/limite3) %(limite3 -(kiter+0)) * tiempoDiferencia/60)    
			sys.stdout.write("\r%d%%" %((kiter+1)*100.0/limite3, ) ) 
			#sys.stdout.write("\r%d%%" %((limite3 -(kiter+0)) * tiempoDiferencia/60 ) ) 
			sys.stdout.flush()
			
			
		N = limite3 # len(stimei)

		STA = acumula // N
		
		print ' \n '

		minimosta = np.min(np.min(np.min(STA)))	
		maximosta = np.max(np.max(np.max(STA)))
		#print '\nmin sta ', minimosta, ' max sta ', maximosta
		if minimosta < 0:
			STA_desp = STA + np.abs(minimosta) # lineal shift
		if minimosta >= 0:
			STA_desp = STA - np.abs(minimosta) # lineal shift
		minimosta_desp = np.min(np.min(np.min(STA_desp)))
		maximosta_desp = np.max(np.max(np.max(STA_desp)))
		#print 'min sta with bias', minimosta_desp
		#print 'max sta with bias', maximosta_desp
		
		if dolog ==1:
			STA_log = log10(STA_desp + 1) # logarithmic normalization
			minimosta_log = np.min(np.min(np.min(STA_log)))
			maximosta_log = np.max(np.max(np.max(STA_log)))
			print 'min sta log ', minimosta_log
			print 'max sta log ', maximosta_log
			
		stavisual_lin = STA_desp*255 # it is visualized with lineal scale
		stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
		#print 'min sta visual lineal', np.min(np.min(np.min(stavisual_lin)))
		#print 'max sta visual lineal', np.min(np.max(np.max(stavisual_lin)))

		if dolog ==1:
			stavisual_log = STA_log*255 # it is visualized with logarithmic scale
			stavisual_log = stavisual_log // (maximosta_log *1.0) # it is normalized with logarithmic scale
			print 'min sta visual log', np.min(np.min(np.min(stavisual_log)))
			print 'max sta visual log', np.min(np.max(np.max(stavisual_log)))
		   
		# FINAL NORMALIZATION FOR THE MEAN STA
		MEANSTA_lin = ( np.add.reduce(stavisual_lin,axis=2) / (1.0 * (numberframes+numberframespost) ) )
		if dolog ==1:
			MEANSTA_log = ( np.add.reduce(stavisual_log,axis=2) / (1.0 * (numberframes+numberframespost) ) )
		
	#============================================================
	#============================================================

	print '\nsize STA: ',len(STA),'x',len(STA[0]),'x',len(STA[0][0])

	#----------------------------------------------------
	# save results
	#----------------------------------------------------

	spikeframe_matrix_array =  np.array(spikeframe_matrix)
	spikeframe_filename = "spikeframe_matrix"+str(neurontag)
	print "\t spikeframe_matrix as mat file: ",spikeframe_filename
	scipy.io.savemat(finalfolder_lin+'/'+spikeframe_filename+'.mat',mdict={'spikeframe_matrix':spikeframe_matrix_array},oned_as='column')


	stavisual_lin_array = np.array(stavisual_lin)
	cadena_texto = "stavisual_lin_array_"+str(neurontag)
	print "\t Saving visual STA (lineal) as mat file: ",cadena_texto
	scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STAarray_lin':stavisual_lin_array},oned_as='column')

	if dolog ==1:
		stavisual_log_array = np.array(stavisual_log)
		cadena_texto = "stavisual_log_array_"+str(neurontag)
		print "\t Saving visual STA (logarithmic) as mat file: ",cadena_texto
		scipy.io.savemat(finalfolder_log+'/'+cadena_texto+'.mat',mdict={'STAarray_log':stavisual_log_array},oned_as='column')

	# MEANSTAarray_lin = np.array(MEANSTA_lin)
	# cadena_texto = "MEANSTA_lin"+str(neurontag)
	# print "\t Saving STA as mat file: ",cadena_texto
	# scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'MEANSTAarray_lin':MEANSTAarray_lin},oned_as='column')

	# MEANSTAarray_log = np.array(MEANSTA_log)
	# cadena_texto = "MEANSTA_log"+str(neurontag)
	# print "\t Saving STA as mat file: ",cadena_texto
	# scipy.io.savemat(finalfolder_log+'/'+cadena_texto+'.mat',mdict={'MEANSTAarray_log':MEANSTAarray_log},oned_as='column')

	print '\nSaving images in lineal scale...'
	for b in range(numberframes+numberframespost):
		#print 'Image ', b
		sys.stdout.write("\r Image %d" %(b ) ) 
		sys.stdout.flush()
		component = stavisual_lin[:,:,b] #STA[:,:,b]
		pl.figure()
		#im = pl.imshow(component,interpolation = 'none')
		#im = pl.pcolor( component,vmin = 0,vmax = 255, cmap=cm.jet )
		im = pl.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
		#pl.contour(component)
		pl.colorbar(im)
		ax = pl.axes()
		ax.set_yticklabels([])
		ax.set_xticklabels([])
		#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
		pl.savefig(finalfolder_lin+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.png",format='png', bbox_inches='tight')
		#pl.savefig(stafolder+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.jpg",format='jpg', bbox_inches='tight')
		#del pl
		del component

	if dolog ==1:
		print '\nSaving images in logarithmic scale...'
		for b in range(numberframes+numberframespost):
			#print 'Image ', b
			sys.stdout.write("\r Image %d" %(b ) ) 
			sys.stdout.flush()
			component = stavisual_log[:,:,b] #STA[:,:,b]
			pl.figure()
			#im = pl.imshow(component,interpolation = 'none')
			#im = pl.pcolor(component,vmin = 0,vmax = 255, cmap=cm.jet)
			im = pl.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
			#pl.contour(component)
			pl.colorbar(im)
			ax = pl.axes()
			ax.set_yticklabels([])
			ax.set_xticklabels([])
			####ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
			pl.savefig(finalfolder_log+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.png",format='png', bbox_inches='tight')
			#pl.savefig(stafolder+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.jpg",format='jpg', bbox_inches='tight')'''
			#del pl
			del component

	#for b in range(numberframes+numberframespost):
	#	print 'Image ', b
	#	component = stavisual_lin[:,:,b]
	#	#im = pl.pcolor(component,vmin = 0,vmax = 255, cmap=cm.jet)	
	#	misc.imsave(finalfolder_lin+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.png", component)
	#	#plt.imshow(component)
	#	#plt.show()
	#	myImC = Image.open(finalfolder_lin+"/STA-"+str(neurontag)+"-"+str(b)+"-g_.png")
	#   out = ImageChops.invert(myImC)
	#   out.save(finalfolder_lin+"/(inv)_STA-"+str(neurontag)+"-"+str(b)+"-g_.png")

	print '\nSaving mean image in lineal scale...'
	pl.figure()
	#im = pl.imshow(MEANSTA,interpolation = 'none')
	im = pl.pcolormesh(MEANSTA_lin,vmin = 0,vmax = 255, cmap=cm.jet)
	#pl.gray() 
	pl.jet()
	#pl.contour(MEANSTA)
	pl.colorbar(im)
	ax = pl.axes()
	ax.set_yticklabels([])
	ax.set_xticklabels([])
	#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
	pl.savefig(finalfolder_lin+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')

	if dolog ==1:
		print 'Saving mean image in logarithmic scale...'
		pl.figure()
		#im = pl.imshow(MEANSTA,interpolation = 'none')
		im = pl.pcolormesh(MEANSTA_log,vmin = 0,vmax = 255, cmap=cm.jet)
		#pl.gray() 
		pl.jet()
		#pl.contour(MEANSTA)
		pl.colorbar(im)
		ax = pl.axes()
		ax.set_yticklabels([])
		ax.set_xticklabels([])
		#ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
		pl.savefig(finalfolder_log+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')


	# '''minim = np.min(np.min(MEANSTA))
	# if minim < 0:
		# MEANSTA2 = MEANSTA + np.abs(minim) #lineal shift
	# if minim > 0:
		# MEANSTA2 = MEANSTA - np.abs(minim) #lineal shift
	# else:
		# MEANSTA2 = MEANSTA
	# minim2 = np.min(np.min(MEANSTA2))
	# maxim = np.max(np.max(MEANSTA2))
	# print 'minimo ', minim2
	# print 'maximo ', maxim
	# meanvisual = MEANSTA2*255
	# meanvisual = meanvisual // (maxim*1.0)
	# #print meanvisual
	# minim3 = np.min(np.min(meanvisual))
	# maxim3 = np.max(np.max(meanvisual))
	# print 'minimo visual', minim3
	# print 'maximo visual', maxim3

	# pl.figure()
	# #im = pl.imshow(meanvisual,interpolation = 'none')
	# im = pl.pcolor(meanvisual,vmin = 0,vmax = 255, cmap=cm.jet)
	# #pl.gray() 
	# pl.jet()
	# #pl.contour(meanvisual)
	# pl.colorbar(im)
	# ax = pl.axes()
	# ax.set_yticklabels([])
	# ax.set_xticklabels([])
	# #ax.annotate(str(a+1), (.1, 1.2), bbox=dict(boxstyle="round, pad=0.3", fc="w"), size=52 )
	# pl.savefig(finalfolder+"/MEANSTA2-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')'''

	print 'CELL ' + timestampName + ' FINISHED!!!'
	print '-----------------------END---------------------------'
	del STA_desp 
	del STA 
	del stavisual_lin 
	del spikeframe_matrix
	del imagen
	del acumula
예제 #50
0
#          levels=levels, colors='gray')

P.plot(r_floops, z_floops, 'ro')
#P.plot(-r_floops, z_floops, 'ro')

if 'tris' not in dir():
    rzt, tris, pt = t3dinp('hitpops.05.t3d')

#do_conf = False
do_conf = True

if do_conf:
    P.tricontourf(rzt[:, 0], rzt[:, 1], tris, beta, 1001, zorder=0)
    cticks = P.linspace(0.0, 0.2, 5)
    P.colorbar(ticks=cticks, format='%.2f')
    P.jet()

#no_text = True
no_text = False
# whether to annotate in MA or MW
show_MA = True

if not no_text:
    # annotate coil powers
    for ii in xrange(n_b_coils):
        if i_floops[ii] >= 0:
            signum = '+'
        else:
            signum = '-'
        if show_MA:
            tdata = 1e-6 * i_floops[ii]
def main(path, marked_path=None):
    # images multiscale
    imgs_mscale = try_pickle_load(path)
    n_scales = len(imgs_mscale)
    imgs_s0 = imgs_mscale[0]  # scale 1
    image_shape = (imgs_s0.shape[2], imgs_s0.shape[3])

    images_to_show = min(IMAGES_TO_SHOW, len(imgs_s0))

    print "Images shape", imgs_s0.shape
    print "Number of images to show", images_to_show
    print "Number of scales", n_scales
    print "Requested image shape will be", image_shape
    n_rows = (1 + n_scales) * 2

    perturbed_imgs = [np.empty((images_to_show, imgs.shape[1],
                                imgs.shape[2], imgs.shape[3]))
                      for imgs in imgs_mscale]
    perturbed_marks = None
    if marked_path is not None:
        marked_imgs = try_pickle_load(marked_path)
        perturbed_marks = np.empty((images_to_show, marked_imgs.shape[1],
                                    marked_imgs.shape[2]))

    for i in xrange(images_to_show):
        imgs_to_perturb = [img[i] for img in imgs_mscale]
        # if we loaded markings, add marked image to list of imgs to perturb
        if perturbed_marks is not None:
            imgs_to_perturb.append(marked_imgs[i])

        ret_list = perturb_image(imgs_to_perturb, image_shape)
        for n_scale in range(n_scales):
            perturbed_imgs[n_scale][i] = ret_list[n_scale]

        if perturbed_marks is not None:
            perturbed_marks[i] = ret_list[n_scales]

    for i, imgs in enumerate(imgs_mscale):
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show, i * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(imgs[j, CHANNEL, :, :])
            pylab.gray()  # set colormap

    for ind, imgs in enumerate(perturbed_imgs):
        i = n_scales + ind
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show, i * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(imgs[j, CHANNEL, :, :])
            pylab.gray()

    if perturbed_marks is not None:
        for j in xrange(images_to_show):
            pylab.subplot(n_rows, images_to_show, (2*n_scales+0) * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(marked_imgs[j, :, :])
            pylab.jet()

            pylab.subplot(n_rows, images_to_show, (2*n_scales+1) * images_to_show + j + 1)
            pylab.axis('off')
            pylab.imshow(perturbed_marks[j, :, :])
            pylab.jet()

    pylab.show()
예제 #52
0
#! populations recorded.
vmn = [-80, -80, -80, -80]
vmx = [-50, -50, -50, -50]

nest.Simulate(Params['sim_interval'])

#! loop over simulation intervals
for t in pylab.arange(Params['sim_interval'], Params['simtime'],
                      Params['sim_interval']):

    # do the simulation
    nest.Simulate(Params['sim_interval'])

    # clear figure and choose colormap
    pylab.clf()
    pylab.jet()

    # now plot data from each recorder in turn, assume four recorders
    for name, r in recorders.iteritems():
        rec = r[0]
        sp = r[1]
        pylab.subplot(2, 2, sp)
        d = nest.GetStatus(rec)[0]['events']['V_m']

        if len(d) != Params['N']**2:
            # cortical layer with two neurons in each location, take average
            d = 0.5 * (d[::2] + d[1::2])

        # clear data from multimeter
        nest.SetStatus(rec, {'n_events': 0})
        pylab.imshow(pylab.reshape(d, (Params['N'], Params['N'])),
예제 #53
0
파일: project8.py 프로젝트: mgaitan/tickets
def main():
    # Load and show original images
    pylab.gray()  # set gray scale mode
    print
    print "0. Reading and formatting images..."
    images = {f: loadAndFormat(f) for f in IMAGE_FILES}
    for f in IMAGE_FILES:
        mShow(images[f])

    ###########################
    # -----> Thresholding
    print
    print "1. Thresholding images..."
    thresholdedImages = {f: getThresholdedImage(images[f]) for f in IMAGE_FILES}
    for name in IMAGE_FILES:
        mShow(thresholdedImages[name])

    ###########################
    # -----> Count objects
    # 1st attempt: label the thresholded image from task 1
    print
    print "2. Object counting"
    pylab.jet()  # back to color mode

    print "\t1st approach: Label thresholded images"
    for name in IMAGE_FILES:
        labeled, nrRegions = ndimage.label(thresholdedImages[name])
        print "\t" + name + ": " + str(nrRegions)
        mShow(labeled)

    # 2nd attempt: Changing threshold level
    print
    print "\t2nd approach: Tuned thresholds"
    # For 'objects.png' some objects are very small (e.g.: screw) or
    # have many shades (e.g.: spoon) which makes them disappear or appear
    # fragmented after thresholding.
    # The advantage of this image is that the background is very dark,
    # so we can try using a lower threshold to make all shapes more definite

    objImage = images['objects.png']
    T = mahotas.thresholding.otsu(objImage)
    thresholdedImage = objImage > T * 0.7

    # Looks better, but...
    labeled, nrRegions = ndimage.label(thresholdedImage)
    print '\tobjects.png' + ": " + str(nrRegions)
    # it returns 18!

    # 3rd attempt: Smoothing before thresholding
    print
    print "\t3rd approach: Smoothing + Tuned threshold"
    # Let's apply some Gaussian smoothing AND a lower threshold
    smoothImage = ndimage.gaussian_filter(objImage, 3)
    T = mahotas.thresholding.otsu(smoothImage)
    thresholdedImage = smoothImage > T * 0.7
    labeled, nrRegions = ndimage.label(thresholdedImage)
    print '\tobjects.png' + ": " + str(nrRegions)

    # it worked! Let's save the labeled images for later
    # (we will use them for center calculation)
    labeledImages = {}
    labeledImages['objects.png'] = (labeled, nrRegions)
    mShow(labeled)

    # Let's see if this approach works on the other images
    for name in ['circles.png', 'peppers.png']:
        img = images[name]
        smoothImage = ndimage.gaussian_filter(img, 3)
        T = mahotas.thresholding.otsu(smoothImage)
        thresholdedImage = smoothImage > T * 0.7
        labeled, nrRegions = ndimage.label(thresholdedImage)
        print '\t' + name + ": " + str(nrRegions)

    # Again no luck with the circles!
    # (We will take a closer look at the peppers later)
    # 4th attempt:
    # 'circles.png': The problem is that some circles appear "glued together".
    # Let's try another technique:
    #    - smoothing the picture with a Gaussian filter
    #    - then searching for local maxima and counting regions
    #        (smoothing avoids having many scatter maxima and a higher level
    #         must be used than in the previous attempt)
    #    - use watershed with the maxima as seeds over the thresholded image
    #       to complete the labelling of circles
    print
    print "\t4th approach: Smoothing + Local maxima + Watershed"

    smoothImage = ndimage.gaussian_filter(images['circles.png'], 10)
    localmaxImage = pymorph.regmax(smoothImage)

    # A distance transform must be applied before doing the watershed
    dist = ndimage.distance_transform_edt(thresholdedImages['circles.png'])
    dist = dist.max() - dist
    dist -= dist.min()
    dist = dist / float(dist.ptp()) * 255
    dist = dist.astype(np.uint8)

    seeds, nrRegions = ndimage.label(localmaxImage)
    labeled = pymorph.cwatershed(dist, seeds)
    print "\t" + 'circles.png' + ": " + str(nrRegions)

    # worked right only for 'circles.png' !
    labeledImages['circles.png'] = (labeled, nrRegions)
    mShow(labeled)

    print
    print "\t5th approach: Smoothing + Multi-threshold +" +\
            " Morphology labeling + Size filtering"
    # 5th approach (only peppers left!)
    imagePeppers = images['peppers.png']
    # Problems with peppers are:
    #  - very different colours, they cause thresholding to work poorly
    #  - each pepper has some brighter parts which are detected as local maxima
    # We propose to address those issues as follows:
    #  - gaussian filter to smooth regions of light or shadow within each pepper
    smoothImage = ndimage.gaussian_filter(imagePeppers, 2)

    #  - instead of thresholding to create a binary image,
    #    create multiple thresholds to separate the most frequent colors.
    #     In this case, 3 thresholds will be enough
    mthrImagePeppers = multiThreshold(smoothImage, 3)

    #  - ndimage.label didn't give good results, we try another
    #     labelling algorithm
    from skimage import morphology

    labeled = morphology.label(mthrImagePeppers)

    nrRegions = np.max(labeled) + 1
    print "\t\tTotal number of regions"
    print "\t\t\t" + 'peppers.png' + ": " + str(nrRegions)
    #	- after counting regions, filter to keep only the sufficiently big ones

    filtered, nrRegions = filterRegions(labeled, 0.05)
    print "\t\tBig enough regions"
    print "\t\t\t" + 'peppers.png' + ": " + str(nrRegions)
    labeledImages['peppers.png'] = (filtered, nrRegions)

    mShow(filtered)

    ###########################
    # -----> Find center points
    print
    print "3. Centers for objects"
    for img in IMAGE_FILES:
        labeledImage, nr_objects = labeledImages[img]
        CenterOfMass = ndimage.measurements.center_of_mass
        labels = range(1, nr_objects + 1)
        centers = CenterOfMass(labeledImage, labeledImage, labels)
        centers = [(int(round(x)), int(round(y))) for (x, y) in centers]
        print '\t' + img + ": " + str(centers)