def main():
    img = img_as_float(imread("HJoceanSmall.png"))
    img_seam_v = img_as_float(imread("HJoceanSmall.png"))
    img_transformed_v = img_as_float(imread("HJoceanSmall.png"))
    iterations = 20
    img_seam_v, img_transformed_v = seam_carve(iterations, img_seam_v, img_transformed_v)

    figure()

    subplot(221)
    imshow(img)
    title("1. Original")

    subplot(222)
    imshow(img_seam_v)
    title("2. Seam carved vertical")

    # Transposed Image

    img_seam_hv = img_transformed_v.transpose(1, 0, 2)
    img_transformed_hv = img_transformed_v.transpose(1, 0, 2)
    iterations = 20

    img_seam_hv, img_transformed_hv = seam_carve(iterations, img_seam_hv, img_transformed_hv)

    subplot(223)
    imshow(img_seam_hv.transpose(1, 0, 2))
    title("3. Seam carved horizontal")

    subplot(224)
    imshow(img_transformed_hv.transpose(1, 0, 2))
    title("4. Transformed Image")

    show()
Beispiel #2
0
    def AnalyseNSS(self):
        if self.Mode=="Manual":
            files=QFileDialog(self)
            files.setWindowTitle('Non-Synchronised Segment Stripes')
            self.CurrentImages=files.getOpenFileNames(self,caption='Non-Synchronised Segment Stripes')

        SSSDlg1=SSSDlg.SSSWidget(self)
        SSSDlg1.Img1=DCMReader.ReadDCMFile(str(self.CurrentImages[0]))
        SSSDlg1.SSS1.axes.imshow(SSSDlg1.Img1,cmap='gray')

        SSSDlg1.Img2=DCMReader.ReadDCMFile(str(self.CurrentImages[1]))
        SSSDlg1.SSS2.axes.imshow(SSSDlg1.Img2,cmap='gray')

        SSSDlg1.Img3=DCMReader.ReadDCMFile(str(self.CurrentImages[2]))
        SSSDlg1.SSS3.axes.imshow(SSSDlg1.Img3,cmap='gray')

        SSSDlg1.Img4=DCMReader.ReadDCMFile(str(self.CurrentImages[3]))
        SSSDlg1.SSS4.axes.imshow(SSSDlg1.Img4,cmap='gray')

        SSSDlg1.ImgCombi=SSSDlg1.Img1+SSSDlg1.Img2+SSSDlg1.Img3+SSSDlg1.Img4
        SSSDlg1.SSSCombi.axes.imshow(SSSDlg1.ImgCombi,cmap='gray')

        EPIDType=np.shape(SSSDlg1.Img1)

        pl.imsave('NSS.jpg',SSSDlg1.ImgCombi)
        Img1=pl.imread('NSS.jpg')
        if EPIDType[0]==384:
            Img2=pl.imread('NSSOrgRefas500.jpg')
        else:
            Img2=pl.imread('NSSOrgRef.jpg')
        self.MSENSS=np.round(self.mse(Img1,Img2))

        if self.Mode=="Manual":
            SSSDlg1.exec_()
def find_movement():
    # img = imread('shot1.jpg')
    # img2 = imread('shot2.jpg')
    img = imread("frame0.jpg")
    img2 = imread("frame2.jpg")
    img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    img1 = img_as_float(img1)
    img2 = img_as_float(img2)
    # print img1
    h1, w1 = img1.shape
    h2, w2 = img2.shape

    img3 = zeros((h1, w1))

    for x in range(0, h1 - 1):
        for y in range(0, w1 - 1):
            if abs(img1[x, y] - img2[x, y]) > 0.01:
                # print img1[x, y], " ", img2[x, y]
                img3[x, y] = 1

    figure()
    # subplot(1, 2, 1), imshow(img)
    # subplot(1, 2, 2), \
    imshow(img3)
    show()
Beispiel #4
0
def computeImageDifferences(numPictures):
  first = rgb2gray(pl.imread("reference/0.png"))
  others = []
  for num in xrange(1, numPictures + 1):
    others.append(rgb2gray(pl.imread("reference/%d.png" % num)))
    print num
  result = np.array(others) - first
  #pickle.dump(result, open("computeImageDifference.pickle", "w"))
  return result
Beispiel #5
0
def computePhaseFirstThree(folder):
    import math
    #transformation = np.vectorize(math.sqrt)
    #transformation = np.vectorize(lambda x: x ** 2)
    transformation = np.vectorize(lambda x: x)  # identity
    im1 = transformation(rgb2gray(pl.imread("%s/0.png" % folder)))
    im2 = transformation(rgb2gray(pl.imread("%s/1.png" % folder)))
    im3 = transformation(rgb2gray(pl.imread("%s/2.png" % folder)))

    result = images.computePhase(im1, im2, im3)
    pickle.dump(result, open("%s.pickle" % folder, "w"))
Beispiel #6
0
def load_img(path  = 'data/rjp_small.png', gray=True):
  try:
    x = pylab.imread(path)
  except:
    x = pylab.imread('../' + path)
  if len(x.shape) > 2 and gray:
    x =  x[:, :, 2]
  if len(x.shape) > 2 and x.shape[2] == 4:
    x = x[:,:,:3]
  if x.max() > 1: 
    x = x.astype('float') / 257.0
  return x
Beispiel #7
0
 def load(self, uri):
     filename = self.get(uri)
     if isimg(filename):
         obj = pylab.imread(filename)
     elif ishdf5(filename):
         f = h5py.File(filename, 'r')
         obj = f[self.key(filename)].value  # FIXME: lazy evaluation?              
     else:
         try:
             obj = pylab.imread(filename)
         except:
             raise CacheError('[bobo.cache][ERROR]: unsupported object type for loading key "%s" ' % self.key(uri))
     return obj
Beispiel #8
0
def test_file_image(fname):
  ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
  kwargs = to_dict_params(fname)

  # Creates the image in memory
  mem = BytesIO()
  fractal_data = call_kw(generate_fractal, kwargs)
  imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
  mem.seek(0) # Return stream position back for reading

  # Comparison pixel-by-pixel
  img_file = imread("images/" + fname)
  img_mem = imread(mem, format=ext)
  assert img_file.tolist() == img_mem.tolist()
def load_descs(config, i, norm=1, old_desc=False, invert_mask=False, use_masks2=0):
    fname = config.desc_filename(i)
    mask_name = config.mask_filename(i)
    mask      = pylab.imread(mask_name)
    descs     = load_ndesc_pc(fname, norm=norm, old_desc=old_desc)
    if use_masks2==0:
        descs     = [l for l in descs if bool(mask[l.v,l.u])!=bool(invert_mask)]
    elif use_masks2==1:
        mask2 = pylab.imread(config.mask2_filename(i))
        descs = [l for l in descs if bool(mask2[l.v,l.u])]
    elif use_masks2==2:
        mask2 = pylab.imread(config.mask2_filename(i))
        descs = [l for l in descs if not(mask2[l.v,l.u].astype('bool')) and mask[l.v,l.u].astype('bool')]
    return descs
Beispiel #10
0
def myimread(imgname,flip=False,resize=None):
    """
        read an image
    """
    img=None
    if imgname.split(".")[-1]=="png":
        img=pylab.imread(imgname)
    else:
        img=numpy.ascontiguousarray(pylab.imread(imgname)[::-1])
    if flip:
        img=numpy.ascontiguousarray(img[:,::-1,:])        
    if resize!=None:
        from scipy.misc import imresize
        img=imresize(img,resize)
    return img
Beispiel #11
0
    def __init__(self, path, waveaxis=None, regions=None, start=None, end=None):
        """ SpectrumImage(path[, waveaxis[, regions]]) initializes a new
        spectrum image from the specified image path.

        Upon initialization, the image is read in to a numpy ndarray, this
        method currently assumes that 2 of the three color channels are
        redundant and eliminates them. At the time of initialization, the
        wavelength axis (0 - columns, 1 - rows) may be specified as well
        as a tuple of lists representing regions in the form [min, max].
        """
        self.image = pylab.imread(path)
        self.image = self.image[:,:,0]
        self.start = start
        self.end = end
        self.regions = []
        if waveaxis is 0 or waveaxis is 1:
            self.waveaxis = waveaxis
            for region in regions:
                bounds = self._validateregion([region['min'], region['max']])
                try:
                    self.regions.append({'min': bounds[0], 'max': bounds[1],
                                         'group': region['group']})
                except TypeError:
                    pass
        elif waveaxis is not None:
            raise ValueError('If the wavelength axis is specified it must',  
                             'be 0 or 1.')
Beispiel #12
0
def test_perspective():
    # Defining the points to use. The first 4 are entered in the 
    # persectiveTransform function, the last is just used for the drawing of
    # the yellow lines, and is the same as the first
    points_x = [147, 100, 300, 392, 147]    
    points_y = [588, 370, 205, 392, 588]
    #points_x = [570, 821, 590, 346, 570]    
    #points_y = [186, 170, 590, 558, 186]

    # Get the image
    a = imread('flyeronground.png')
    a = rgb2gray(a)
    subplot(121)

    # Draw the lines
    plot(points_x, points_y, 'y-')
    imshow(a,vmin=0,vmax=1,cmap="gray")

    # Calculate and show the new image
    subplot(122)
    b = perspectiveTransform(a, points_x[0], points_y[0], 
                                points_x[1], points_y[1], 
                                points_x[2], points_y[2],
                                points_x[3], points_y[3], 300, 200)
    imshow(b,vmin=0,vmax=1,cmap="gray")
    show()
Beispiel #13
0
def dotdraw(s, direction="RL", **kwargs):
    """
  Make a drawing of an SX and display it.
  
  direction   one of "BT", "LR", "TB", "RL"
  """

    try:  # Check if we have pylab
        from pylab import imread, imshow, show, figure, axes
    except:
        # We don't have pylab, so just write out to file
        print "casadi.tools.graph.dotdraw: no pylab detected, will not show drawing on screen."
        dotgraph(s, direction=direction, **kwargs).write_ps("temp.ps")
        return

    if hasattr(show, "__class__") and show.__class__.__name__ == "PylabShow":
        # catch pyreport case, so we have true vector graphics
        figure_name = "%s%d.%s" % (show.basename, len(show.figure_list), show.figure_extension)
        show.figure_list += (figure_name,)
        dotgraph(s, direction=direction, **kwargs).write_pdf(figure_name)
        print "Here goes figure %s (dotdraw)" % figure_name
    else:
        # Matplotlib does not allow to display vector graphics on screen,
        # so we fall back to png
        temp = "_temp.png"
        dotgraph(s, direction=direction, **kwargs).write_png(temp)
        im = imread(temp)
        figure()
        ax = axes([0, 0, 1, 1], frameon=False)
        ax.set_axis_off()
        imshow(im)
        show()
Beispiel #14
0
 def get_section_image(self, cached=True):
     if cached:
         if self._section_image is None:
             self._section_image = self.get_section_image(cached=False)
         return self._section_image
     else:
         return pylab.imread(self.get_section_image_filename())
Beispiel #15
0
def main():
    A = pl.imread(IMAGE_FILE)

    i = 1
    pc_values = (1, 5, 10, 20, 30, 40)
    for num_pcs in pc_values:

        # perform (truncated) pca
        egvecs, proj, egvals = pca(A, num_pcs)

        # reconstruct image
        A_rec = np.dot(egvecs, proj).T + np.mean(A, axis=0)
         
        # create sublplot
        ax = pl.subplot(2, 3, i, frame_on=False)
        ax.xaxis.set_major_locator(pl.NullLocator())
        ax.yaxis.set_major_locator(pl.NullLocator())

        # draw
        pl.imshow(A_rec)
        pl.title("{} pc's".format(num_pcs))
        pl.gray()

        i += 1

    pl.show()
Beispiel #16
0
def _test():
    # make a unit circle going counter-clockwise
    radius = 60
    theta = np.linspace(0, 2*np.pi, 10)
    circle_ccw = np.array([radius*np.cos(theta), radius*np.sin(theta)])
    area = ContourArea(circle_ccw)
    assert area > 0
    circle_cw = MakeClockwise(circle_ccw)
    area = ContourArea(circle_cw)
    assert area < 0
    assert (circle_cw == circle_ccw[:,::-1]).all() # it actually got reversed
    p = circle_cw + np.array([[280],[430]])

    plb.ion()
    plb.figure(0)
    plb.gray()
    i = plb.imread('mri2.png')
    i = np.mean(i, axis=2)
    plb.imshow(i)
    global _contour
    _contour, = plb.plot(np.append(p[0,-1], p[0]),np.append(p[1,-1], p[1]))
    plb.draw()
    Snake2D(i, p, iterations=500)
    print 'done'
    plb.ioff()
    plb.savefig('mri-result.png')
    plb.show()
	def OnPopupItemGraph(self, event):

		for row in self.ReportGrid.GetSelectedRows():
			label = self.ReportGrid.GetCellValue(row,0)
			id = self.ReportGrid.GetCellValue(row,1)

			### plot the graph
			### TODO link with properties frame
			for fct in ('extTransition','intTransition', 'outputFnc', 'timeAdvance'):
				filename = "%s(%s)_%s.dot"%(label,str(id),fct)
				path = os.path.join(tempfile.gettempdir(), filename)

				### if path exist
				if os.path.exists(path):
					graph = pydot.graph_from_dot_file(path)
					filename_png = os.path.join(tempfile.gettempdir(),"%s(%s)_%s.png"%(label,str(id),fct))
					graph.write_png(filename_png, prog='dot')

					pylab.figure()
					img = pylab.imread(filename_png)
					pylab.imshow(img)

					fig = pylab.gcf()
					fig.canvas.set_window_title(filename)

					pylab.axis('off')
					pylab.show()
Beispiel #18
0
def show_fst(fst):
    import pydot,pylab
    graph = pydot.Dot(rankdir="LR")
    isyms = fst.InputSymbols()
    if not isyms: isyms = ASCII
    osyms = fst.OutputSymbols()
    if not osyms: osyms = ASCII
    for s in range(fst.NumStates()):
        if s==fst.Start():
            n = pydot.Node("%d"%s,shape="box")
            graph.add_node(n)
        if fst.IsFinal(s):
            l = '"'
            l += "%d"%s # node id
            if fst.Final(s).Value()!=0.0: # optional non-zero accept cost
                l += "/%s"%fst.Final(s).Value()
            l += '"'
            n = pydot.Node("%d"%s,label=l,penwidth="3")
            graph.add_node(n)
        for t in range(fst.NumArcs(s)):
            a = fst.GetArc(s,t)
            l = '"'
            l += '%s'%isyms.Find(a.ilabel)
            if a.olabel!=a.ilabel: l += ":%s"%osyms.Find(a.olabel)
            v = a.weight.Value()
            if v!=0.0: l += "/%s"%v
            l += '"'
            n = a.nextstate
            e = pydot.Edge("%d"%s,"%d"%n,label=l)
            graph.add_edge(e)
    graph.write_png("/tmp/_test.png")
    pylab.gca().set_xticks([]); pylab.gca().set_yticks([])
    pylab.clf()
    pylab.imshow(pylab.imread("/tmp/_test.png"))   
Beispiel #19
0
def get_neighbour_factor(path):
    """ get the correct chances in the factor of an image by counting """

    # load img in BW
    im = np.mean(imread(path), axis=2) > 0.5

    # initialize
    x_diff = 0
    y_diff = 0

    # for each row in image starting with second
    for i in range(1, im.shape[0]):

        # for each individual pixel column starting with second
        for j in range(1, im.shape[1]):
            x_diff += abs(int(im[i-1,j]) - int(im[i,j]))
            y_diff += abs(int(im[i,j-1]) - int(im[i,j]))

    # divide by amount of elements with neighbours
    amount_elements_x = (len(im)-1) * len(im[0])
    amount_elements_y = len(im) * (len(im[0])-1)

    x_diff = x_diff / float(amount_elements_x)
    y_diff = y_diff / float(amount_elements_y)

    print (x_diff, y_diff)
Beispiel #20
0
def analyze_digit_MLP(img):
    """ Takes in an image matrix, crops out the digits and outputs it to file """

    ocr.delete_files("../pics/cropped/")

    print ("Preprocessing Image, Cropping Digits Into 28 X 28 Image Matrices\n")
    cropped_img_to_show, cropped_thresh_to_Show, cropped_digits = ocr.save_digit_to_binary_img_as_mnist(img, dim = 28, saveToFile = True, imgSize = frame_new_dim)

    print ("Image Preprocessing Done, %d Potential Digits Were Cropped Out\n" % len(cropped_digits))

    print ("Predicting Results\n")
    print ("Image    Digit     probability")

    index = 0
    for input_digit in cropped_digits:
        path = "../pics/cropped/" + str(index) + ".png"
        input_digit = imread(path)
        digit, probability = mlp.predict(input_digit, mlp_classifier)
        print ("%d.png      %d         %f" % (index, digit, probability))
        index += 1

    new_dim = (SCALE_FACTOR * img.shape[1]/2, SCALE_FACTOR * img.shape[0]/2)
    cropped_img_to_show = cv2.resize(cropped_img_to_show, new_dim)
    cropped_thresh_to_Show = cv2.resize(cropped_thresh_to_Show, new_dim)
    cv2.imshow('handWriting Capture Cropped Image', cropped_img_to_show)
    cv2.imshow('handWriting Capture Cropped Thresh', cropped_thresh_to_Show)
Beispiel #21
0
def main():
    s = 2.0
    img = imread('cameraman.png')

    # Create all the images with each a differen order of convolution
    img1 = gD(img, s, 0, 0)
    img2 = gD(img, s, 1, 0)
    img3 = gD(img, s, 0, 1)
    img4 = gD(img, s, 2, 0)
    img5 = gD(img, s, 0, 2)
    img6 = gD(img, s, 1, 1)

    fig = plt.figure()
    ax1 = fig.add_subplot(2, 3, 1)
    ax1.set_title("Fzero")
    ax1.imshow(img1, cmap=cm.gray)
    ax2 = fig.add_subplot(2, 3, 2)
    ax2.set_title("Fx")
    ax2.imshow(img2, cmap=cm.gray)
    ax3 = fig.add_subplot(2, 3, 3)
    ax3.set_title("Fy")
    ax3.imshow(img3, cmap=cm.gray)
    ax4 = fig.add_subplot(2, 3, 4)
    ax4.set_title("Fxx")
    ax4.imshow(img4, cmap=cm.gray)
    ax5 = fig.add_subplot(2, 3, 5)
    ax5.set_title("Fyy")
    ax5.imshow(img5, cmap=cm.gray)
    ax6 = fig.add_subplot(2, 3, 6)
    ax6.set_title("Fxy")
    ax6.imshow(img6, cmap=cm.gray)
    show()
def structure_plot(fig, molids, activations=None):
    """plot molecule structures"""
    if activations != None:
        assert len(activations[0]) == len(molids[0])
        assert len(activations[1]) == len(molids[1])
    id2name = defaultdict(str, rdl.get_id2name())
    all_molids = molids[0] + molids[1]
    all_activations = np.hstack(activations)
    cr = utils.ceiled_root(len(all_molids))
    for i, molid in enumerate(all_molids):
        ax = fig.add_subplot(cr, cr, i+1)
        try:
            img = plt.imread(os.path.join(structures_path, molid + '.png'))
        except Exception, e:
            img = np.zeros(img.shape)
        ax.imshow(img)
        ax.set_xticks([])
        ax.set_yticks([])
        if i >= len(molids[0]):
            for child in ax.get_children():
                if isinstance(child, plt.matplotlib.spines.Spine):
                    child.set_color('#ff0000')
        if activations != None:
            ax.set_title('{}: {:.2f}'.format(id2name[molid], all_activations[i]),
                         rotation='20')
Beispiel #23
0
def extract_features(image_path_list):
  feature_list = []
  for image_path in image_path_list:
    features = []
    image_array = imread(image_path)
# Note: Looping through multiple filters for edge detection drastically slows
# Down the feature extraction while only marginally improving performance, thus
# it is left out for the HW submission
#     for ax in [0,1]:
#       for pct in [.01, .02]:
    emat = featureExtractor.getEdgeMatrix(image_array, sigpercent=.01, \
                                          axis=0)
    features.append( featureExtractor.getEdgePercent(image_array, emat) )
    features.append( featureExtractor.getNumMeridialEdges(emat) )
    features.append( featureExtractor.getNumEquatorialEdges(emat) )
    features.append( featureExtractor.getSize(image_array) )
    features.append( featureExtractor.getCentralRatio(image_array) )
    features.append( featureExtractor.getCentralRatio(emat) )
    features.append( featureExtractor.getMeanColorVal(image_array, 0) )
    features.append( featureExtractor.getMeanColorVal(image_array, 1) )
    features.append( featureExtractor.getMeanColorVal(image_array, 2) )
    features.append( featureExtractor.getVariance( image_array, 0 ) )
    features.append( featureExtractor.getVariance( image_array, 1 ) )
    features.append( featureExtractor.getVariance( image_array, 2 ) )
    xr, yr = featureExtractor.getCOM(image_array, 0)
    features.append(xr)
    features.append(yr)
    xg, yg = featureExtractor.getCOM(image_array, 1)
    features.append(xg)
    features.append(yg)
    xb, yb = featureExtractor.getCOM(image_array, 2)
    features.append(xb)
    features.append(yb)
    feature_list.append([image_path, features])
  return feature_list
Beispiel #24
0
def read_tiff(fname,res_x,res_y,pix_x,pix_y,ext_x,ext_y):
	# get array numbers
	img=pl.imread(fname)
	if len(img.shape)==2:
		img_arraynum=1
	else:
		img_arraynum=img.shape[2]
	#collapse accordingly
	if img_arraynum == 4:
		imgmat=numpy.multiply(img[:,:,0]+img[:,:,1]+img[:,:,2],img[:,:,3])
	elif img_arraynum == 1:
		imgmat = img
	else:
		print "Image has %d arrays, unhandled." % img_arraynum
		exit(0)

	### convert data to float64
	imgmat = numpy.array(imgmat,dtype=numpy.float64)

	### image parameters
	pix_x = imgmat.shape[1]
	pix_y = imgmat.shape[0]
	ext_x = pix_x * res_x
	ext_y = pix_y * res_y

	### convert to linear
	imgmat = convert2lin(imgmat,latitude,sensitivity)

	### return final image
	return imgmat,res_x,res_y,pix_x,pix_y,ext_x,ext_y
Beispiel #25
0
def test_with_file(fn):
    im = pylab.imread(fn)
    if im.ndim > 2:
        im = numpy.mean(im[:, :, :3], 2)
    pylab.imsave("intermediate.png", im, vmin=0, vmax=1., cmap=pylab.cm.gray)
    r = test_inline(im)
    return r
def extract_features(image_path_list):
  feature_list = []
  for image_path in image_path_list:
    features = []
    image_array = imread(image_path)
#############################################################################################
# INSERT THE FEATURES THAT YOU EXTRACT FROM THE IMAGE HERE                                  #
#############################################################################################
#    for ax in [0,1]:
#      for pct in [.01, .02]:
#        emat = featureExtractor.getEdgeMatrix(image_array, sigpercent=pct, \
#                                              axis=ax)
#        features.append( featureExtractor.getEdgePercent(image_array, emat) )
#        features.append( featureExtractor.getNumMeridialEdges(emat) )
#        features.append( featureExtractor.getNumEquatorialEdges(emat) )
#    features.append( featureExtractor.getSize(image_array) )
#    features.append( featureExtractor.getCentralRatio(image_array) )
#    features.append( featureExtractor.getCentralRatio(emat) )
#    features.append( featureExtractor.getMeanColorVal(image_array, 0) )
#    features.append( featureExtractor.getMeanColorVal(image_array, 1) )
#    features.append( featureExtractor.getMeanColorVal(image_array, 2) )
#    features.append( featureExtractor.getVariance( image_array, 0 ) )
#    features.append( featureExtractor.getVariance( image_array, 1 ) )
#    features.append( featureExtractor.getVariance( image_array, 2 ) )
#    xr, yr = featureExtractor.getCOM(image_array, 0)
#    features.append(xr)
#    features.append(yr)
#    xg, yg = featureExtractor.getCOM(image_array, 1)
#    features.append(xg)
#    features.append(yg)
#    xb, yb = featureExtractor.getCOM(image_array, 2)
#    features.append(xb)
#    features.append(yb)
    feature_list.append([image_path, features])
  return feature_list
Beispiel #27
0
def m2screenshot(mayavi_fig=None, mpl_axes=None, autocrop=True):
    """ Capture a screeshot of the Mayavi figure and display it in the
        matplotlib axes.
    """
    import pylab as pl
    # Late import to avoid triggering wx imports before needed.
    try:
        from mayavi import mlab
    except ImportError:
        # Try out old install of Mayavi, with namespace packages
        from enthought.mayavi import mlab

    if mayavi_fig is None:
        mayavi_fig = mlab.gcf()
    else:
        mlab.figure(mayavi_fig)
    if mpl_axes is not None:
        pl.axes(mpl_axes)

    filename = tempfile.mktemp('.png')
    mlab.savefig(filename, figure=mayavi_fig)
    image3d = pl.imread(filename)
    if autocrop:
        bg_color = mayavi_fig.scene.background
        image3d = autocrop_img(image3d, bg_color)
    pl.imshow(image3d)
    pl.axis('off')
    os.unlink(filename)
Beispiel #28
0
    def showOtherRF(self):
        if self.path1 != '' and self.path2 != '':
            self.calcDistance()
        else:
            print "Nie wybrano punktow"

            # WYSWIETLA INFORMACJE
            img = pylab.imread('img/wally.png', 'rb')
            pylab.imshow(img)
            pylab.plot(0, 0)

            # DAJE CZYSTY OBRAZ BEZ OSI ** PEWNIE MOZNA PROSCIEJ
            frame1 = pylab.gca()
            for xlabel_i in frame1.axes.get_xticklabels():
                xlabel_i.set_visible(False)
                xlabel_i.set_fontsize(0.0)
            for xlabel_i in frame1.axes.get_yticklabels():
                xlabel_i.set_fontsize(0.0)
                xlabel_i.set_visible(False)
            for tick in frame1.axes.get_xticklines():
                tick.set_visible(False)
            for tick in frame1.axes.get_yticklines():
                tick.set_visible(False)

            # SHOWTIME
            pylab.show()
def cropAutoWhitePNG(IMGFileName, padding = (10, 10)):
	assert(os.path.isfile(IMGFileName)),"PNG file does not exist"

	IMG = pylab.imread(IMGFileName)
	
	if IMG.shape[2] == 4:
		T = numpy.squeeze(numpy.take(IMG, [0, 1, 2], axis = 2))
		T = numpy.any(T < 1, axis = 2)
	elif IMG.shape[2] == 3:
		T = numpy.any(T < 1, axis = 2)
	elif IMG.ndim < 3:
		T = (T < 1)

	I = numpy.where(T)
	
	croppedIMG = numpy.array(IMG)

	for z in range(2):
		croppedIMG = numpy.take(croppedIMG, numpy.arange(numpy.min(I[z]), numpy.max(I[z]) + 1), axis = z)
	
	cornerPadding = numpy.ones((padding[0], padding[1], croppedIMG.shape[2]))
	topBottomPadding = numpy.ones((padding[0], croppedIMG.shape[1], croppedIMG.shape[2]))
	leftRightPadding = numpy.ones((croppedIMG.shape[0], padding[1], croppedIMG.shape[2]))

	T = numpy.concatenate((
	numpy.concatenate((cornerPadding, topBottomPadding, cornerPadding), axis = 1),
	numpy.concatenate((leftRightPadding, croppedIMG, leftRightPadding), axis = 1),
	numpy.concatenate((cornerPadding, topBottomPadding, cornerPadding), axis = 1)), axis = 0)

	scipy.misc.imsave(IMGFileName, T)
def show_xyfm(point_in_0, xyfm):
    x0,y0 = point_in_0
    for i,(x, y, f, m) in enumerate(xyfm):
        pylab.figure(i);
        pylab.imshow(pylab.imread(f))
        xyw = np.dot(m,[x0, y0, 1])
        x1,y1 = xyw[:2] / xyw[2]
        pylab.scatter(x1, y1, c='r', marker='+')
Beispiel #31
0
import pylab as pl
from scipy.misc import imresize, imfilter
import turtle

# load image
img = pl.flipud(pl.imread(".aar.png"))
a=turtle.Turtle()
s=turtle.Screen()
s.setup(400,400)
s.delay(5000)
# setup turtle
levels = 8
size = 2**levels
turtle.setup(img.shape[1] * 4.2, img.shape[0] * 4.2)
turtle.setworldcoordinates(0, 0, size, -size)
turtle.tracer(1000, 0)

# resize and blur image
img = imfilter(imresize(img, (size, size)), 'blur')

# define recursive hilbert curve
def hilbert(level, angle = 90):
    if level == 0:
        return
    if level == 1 and img[-turtle.pos()[1], turtle.pos()[0]] > 128:
        turtle.forward(2**level - 1)
    else:
        turtle.right(angle)
        hilbert(level - 1, -angle)
        turtle.forward(1)
        turtle.left(angle)
Beispiel #32
0
    def initialize(self):
        texture0 = np.zeros((100, 100, 4))
        texture1 = imread(get_image_path('_REF.png'))

        self.add_visual(TextureVisual, texture=texture0)
        self.set_data(texture=texture1)
Beispiel #33
0
        assert not (np.isnan(self._ll))
        if self._verbose:
            print self._ll
        return self._ll

    def mixture(self):
        return self._mm


if __name__ == "__main__":
    from ..families import UnivariateGaussian
    from pylab import imread
    import sys

    k = 8
    im = imread("examples/data/ar.ppm")
    data = (im.sum(axis=2) / 3.0).flatten().reshape((-1, 1))
    print data.shape
    em = BregmanSoftClustering(data, k, UnivariateGaussian, ())
    mm = em.run()
    mm.savetxt(sys.stdout)

    from matplotlib import pyplot

    pyplot.subplot(2, 1, 1)
    pyplot.hist(data, 256)
    pyplot.xlim(0, 256)

    pyplot.subplot(2, 1, 2)
    x = np.arange(0, 255, 0.1)
    pyplot.plot(x, mm(x))
Beispiel #34
0
                    patch += J[i, j] * J[i + k, j + l]
            energya = -eta * np.sum(I * J) - zeta * patch
            J[i, j] = -1
            patch = 0
            for k in range(-1, 1):
                for l in range(-1, 1):
                    patch += J[i, j] * J[i + k, j + l]
            energyb = -eta * np.sum(I * J) - zeta * patch
            if energya < energyb:
                J[i, j] = 1
            else:
                J[i, j] = -1
    return J


I = pl.imread('world.png')
N = np.shape(I)[0]
I = I[:, :, 0]
I = np.where(I < 0.1, -1, 1)
pl.imshow(I)
pl.title('Original Image')

noise = np.random.rand(N, N)
J = I.copy()
ind = np.where(noise < 0.1)
J[ind] = -J[ind]
pl.figure()
pl.imshow(J)
pl.title('Noisy image')
newJ = J.copy()
newJ = MRF(I, newJ)
def make_Fig5(exc_connectivity1='random',
              net_state1='asynchronous',
              exc_connectivity2='random',
              net_state2='synchronous',
              exc_connectivity3='clustered',
              net_state3='synchronous',
              node_group='1_random',
              N_E=800,
              N_I=200,
              tau_Em=50,
              tau_Im=25,
              p_exc=0.03,
              p_inh=0.20,
              tau_ref_E=10,
              tau_ref_I=5,
              V_leak_E_min=-70,
              V_leak_I_min=-70,
              V_th=-40,
              V_reset=-59,
              V_peak=0,
              V_syn_E=50,
              V_syn_I=-68,
              C_E=0.4,
              C_I=0.2,
              g_AE1=2.5e-3,
              g_AE2=2.5e-3,
              g_AE3=3.0e-3,
              g_AI=6.0e-3,
              g_GE=30.0e-3,
              g_GI=30.0e-3,
              g_extE=6.0e-3,
              g_extI1=4.0e-3,
              g_extI2=4.0e-3,
              g_extI3=6.0e-3,
              g_leak_E=10.0e-3,
              g_leak_I=5.0e-3,
              tau_Fl=1.5,
              tau_AIl=1.5,
              tau_AIr=0.2,
              tau_AId=1.0,
              tau_AEl=1.5,
              tau_AEr=0.2,
              tau_AEd=1.0,
              tauR_intra=2500.0,
              tauD_intra=15.0,
              tauR_extra=2500.0,
              tauD_extra=15.0,
              rateI=65,
              rateE=65,
              stim_factor=4.5,
              num_nodes_to_save=20,
              num_pairs=40,
              num_LFPs=8,
              windows=[1000.0, 1000.0, 1000.0],
              gaps=[200.0, 0.0, 200.0],
              padding_for_traces=500.0,
              stim_type='linear_increase',
              time_to_max=200,
              inh_connectivity='random',
              exc_syn_weight_dist='beta',
              ext_SD=True,
              int_SD=True,
              generate_LFPs=True,
              save_results=True,
              num_trials=15,
              crit_freq_V=(20.0, 100.0),
              filt_kind_V='band',
              crit_freq_LFP=100.0,
              filt_kind_LFP='low',
              V_mult_factor=10000.0,
              LFP_mult_factor=-2.5,
              run_new_net_sim=False,
              get_new_CC_results_all_pairs=False,
              master_folder_path='E:\\correlated_variability_',
              showfig=True,
              savefig=False):
    '''
    Generate Figure 5 from the manuscript.  Also, calculate and report CC
    results for clustered network with no synaptic depression.
        
    Parameters
    ----------
    exc_connectivity1: string
        type of E --> E and E --> I connectivity in model network used in A
        and B ('clustered' or 'random')
    net_state1: string
        Network 'state' for model used in A and B.
        If 'synchronous', choose inh synaptic time constants that support
        network spike-rate oscillations in response to the stimulus.
        Otherwise, use same time constants for inh and exc synapses.
    exc_connectivity2: string
        exc connectivity in model network used in C and D
    net_state2: string
        Network 'state' for model used in C and D.
    exc_connectivity3: string
        exc connectivity in model network used in E and F
    net_state3: string
        Network 'state' for model used in E and F.
    node_group: float
        group of network nodes from which to select example Vs and LFP
    tau_Em: float
        membrane time constant in ms for exc nodes
    tau_Im: float
        membrane time constant in ms for inh nodes
    N_E: float or int
        number of exc nodes
    N_I: float or int
        number of inh nodes
    p_exc: float
        connection probability for E --> E and E --> I
    p_inh: float
        connection probability for I --> E and I --> I
    tau_ref_E: float
        absolute refractory period for exc nodes in ms
    tau_ref_I: float
        absolute refractory period for inh nodes in ms
    V_leak_I: float or int
        leak reversal potential for inh nodes in mV
    V_leak_E: float or int
        leak reversal potential for exc nodes in mV
    V_th: float or int
        spike threshold for all nodes in mV
    V_reset: float or int
        post-spike reset membrane potential for all nodes in mV
    V_peak: float or int
        peak spike membrane potential for all nodes in mV
    dt: float
        time step in ms
    V_syn_E: float or int
        synaptic reversal potential for exc nodes in mV
    V_syn_I: float or int
        synaptic reversal potential for inh nodes in mV
    g_AE1: float
        synaptic conductance for AMPA channels on exc nodes in microS
        for model network used in A and B
    g_AE2: float
        synaptic conductance for AMPA channels on exc nodes in microS
        for model network used in C and D
    g_AE3: float
        synaptic conductance for AMPA channels on exc nodes in microS
        for model network used in E and F
    g_AI: float 
        synaptic conductance for AMPA channels on inh nodes in microS
    g_GE: float 
        synaptic conductance for GABA channels on exc nodes in microS
    g_GI1: float 
        synaptic conductance for GABA channels on inh nodes in microS
        for model network used in A and B
    g_GI2: float 
        synaptic conductance for GABA channels on inh nodes in microS
        for model network used in C and D
    g_GI3: float 
        synaptic conductance for GABA channels on inh nodes in microS
        for model network used in E and F
    tau_Fl: float or int
        inh to inh delay time constant in ms
    tau_Fr: float
        inh to inh rise time constant in ms
    tau_Fd: float
        inh to inh decay time constant in ms
    tau_AIl: float
        exc to inh AMPA delay time constant in ms
    tau_AIr: float
        exc to inh AMPA rise time constant in ms
    tau_AId: float 
        exc to inh AMPA decay time constant in ms
    tau_AEl: float
        exc to exc AMPA delay time constant in ms
    tau_AEr: float
        exc to exc AMPA rise time constant in ms
    tau_AEd: float 
        exc to exc AMPA decay time constant in ms
    tauR_intra: float
        synaptic weight recovery time constant in ms for intracortical
        synapses
    tauD_intra: float
        synaptic weight decay time constant in ms for intracortical synapses
    tauR_extra float
        synaptic weight recovery time constant in ms for external inputs
    tauD_extra: float
        synaptic weight decay time constant in ms for external inputs 
    g_extE: float
        synaptic conductance for ext input on exc in microS
    g_extI: float
        synaptic conductance for ext input on inh in microS
    C_E: float 
        capacitance for exc nodes in nF
    C_I: float 
        capacitance for inh nodes in nF
    g_leak_E: float
        leak conductance for exc nodes in microS
    g_leak_I: float l
        leak conductance for inh nodes in microS
    rateI: float or int 
        ongoing external input rate for exc nodes in Hz
    rateE: float or int 
        ongoing external input rate for inh nodes in Hz
    stim_factor: float or int
        factor by which external input increases after stim onset 
    num_nodes_to_save: int
        number of nodes for which to save synaptic inputs (for injecting
        into 'test' neurons in a separate function)  
    num_pairs: int
        number of test neuron pairs for which to calculate CCs        
    num_LFPs: float
        number of LFPs to simulate (determines # of nodes in each 'electrode')
    windows: list of floats
        widths of ongoing, transient, and steady-state windows (ms)
    gaps: list of floats
        sizes of gaps between stim onset and end of ongoing, stim onset and
        beginning of transient, end of transient and beginning of 
        steady-state (ms)
    padding: float
        size of window (ms) to be added to the beginning and end of each 
        simulation
    stim_type: string
        if 'linear_increase', the stimulus is mimicked as a gradual (step-wise)
        increase in external input rate.  Otherwise, single step function.
    time_to_max: float or int
        time to reach max input rate for 'linear_increase' stimulus (in ms)
    inh_connectivity: string
        type of I --> E and I --> I connectivity ('clustered' or 'random')
    exc_syn_weight_dist: string
        type of distribution from which to draw E --> E and E --> I nonzero
        weights.  If 'beta', draw from beta distribution.  Otherwise, draw
        from continuous uniform distribution.
    ext_SD: bool
        if True, apply synaptic adaptation to external input synapses after
        stimulus onset
    int_SD: bool
        if True, apply synaptic adaptation to all 'intracortical' synapses 
        after stimulus onset        
    generate_LFPs: bool
        if True, simulate LFPs as sums of synaptic currents to groups of
        exc nodes
    save_results: bool
        if True, save results for each trial
    num_trials: float or int
        number of trials to simulation
    crit_freq_V: float or tuple of floats 
        critical frequency for filtering membrane potentials 
        (e.g., (20.0, 100.0))
    filt_kind_V: string
        kind of filter to use for membrane potentials (e.g., 'band' for 
        bandpass)
    crit_freq_LFP: float or tuple of floats 
        critical frequency for filtering 'LFP' (e.g., 100.0) 
    filt_kind_LFP: string
        kind of filter to use for 'LFP' (e.g., 'low' for lowpass)
    V_mult_factor: int or float
        factor by which to multiply membrane potentials for example plot
        (e.g., 10000.0) 
    LFP_mult_factor: int or float
        factor by which to multiply 'LFP' for example plot (e.g., -2.5) 
    run_new_net_sim: Bool
        if True, run new network simulations (for num_trials trials), even if
        results for these settings already exist
    get_new_CC_results_all_pairs: Bool
        if True, calculate CCs for all pairs starting with traces.
    master_folder_path: string
        full path of directory containing data, code, figures, etc.
    showfig: Bool
        if True (and if savefig == True), plot spike rasters, include
        network schematics, and show the figure.  Otherwise, just calculate
        CCs for each of the three networks (which is relatively fast if 
        simulations already complete).
    savefig: Bool
        if True, save the figure to the specified path


    Returns
    -------
    None
        
    '''

    width_fig = 5.0
    height_fig = 9.0
    padding = 0.5  #space (in inches) b/w subfigs and edges of figure
    interpadding = 0.4  #reference unit for padding b/w subfigs

    fonts = FontProperties()
    fonts.set_weight('bold')
    fonts.set_size(6)
    fontm = FontProperties()
    fontm.set_weight('bold')
    fontm.set_size(12)
    fontl = FontProperties()
    fontl.set_weight('bold')
    fontl.set_size(12)
    mpl.rcParams['mathtext.default'] = 'regular'

    #a1:spike rasters, V1-V2 (20-100 Hz), LFP (for model_version1)
    #a2:stim trace for a1
    #a3:model schematic
    #b:V-V CC trajectories
    #c1:spike rasters, V1-V2 (20-100 Hz), LFP (for model_version2)
    #c2:stim trace for c1
    #c3:model schematic
    #d:V-V CC trajectories
    #e1:spike rasters, V1-V2 (20-100 Hz), LFP (for model_version3)
    #e2:stim trace for e1
    #e3:model schematic
    #f:V-V CC trajectories

    #widths and heights of all subfigures IN INCHES
    width_traj = 1.2
    width_traces = width_fig - 2 * padding - 1.5 * interpadding - width_traj
    height_stim = 0.1
    height_traces = (1. / 3.) * (height_fig - 2 * padding - 3 * interpadding -
                                 3 * height_stim)
    height_traj = (3. / 5.) * (height_traces + height_stim - interpadding)
    height_schematic = (2. / 5.) * (height_traces + height_stim - interpadding)
    width_schematic = width_traj

    #x and y positions of all subfigures and labels IN INCHES
    x_a1 = padding
    y_a1 = height_fig - padding - height_traces
    x_a2 = x_a1
    y_a2 = y_a1 - height_stim
    x_a3 = width_fig - padding - width_schematic
    x_b = width_fig - padding - width_traj
    y_b = y_a2
    y_a3 = y_b + height_traj + 0.35 * interpadding
    x_c1 = x_a1
    y_c1 = y_a2 - 1.5 * interpadding - height_traces
    x_c2 = x_c1
    y_c2 = y_c1 - height_stim
    x_c3 = x_a3
    x_d = x_b
    y_d = y_c2
    y_c3 = y_d + height_traj + 0.35 * interpadding
    x_e1 = x_a1
    y_e1 = padding + height_stim
    x_e2 = x_e1
    y_e2 = padding
    x_e3 = x_a3
    y_e3 = padding + height_traj + 0.35 * interpadding
    x_f = x_e3
    y_f = padding

    x_label_a = x_a1 - 0.3
    y_label_a = y_a1 + height_traces - 0.25 * interpadding
    x_label_b = x_b - 0.4
    y_label_b = y_b + height_traj
    x_label_c = x_c1 - 0.3
    y_label_c = y_c1 + height_traces - 0.25 * interpadding
    x_label_d = x_label_b
    y_label_d = y_d + height_traj
    x_label_e = x_e1 - 0.3
    y_label_e = y_e1 + height_traces - 0.25 * interpadding
    x_label_f = x_label_b
    y_label_f = y_f + height_traj

    #x and y positions of all subfigures and labels IN FRACTIONS OF FIG SIZE
    x_a1 = (x_a1 / width_fig)
    y_a1 = (y_a1 / height_fig)
    x_a2 = (x_a2 / width_fig)
    y_a2 = (y_a2 / height_fig)
    x_a3 = (x_a3 / width_fig)
    y_a3 = (y_a3 / height_fig)
    x_b = (x_b / width_fig)
    y_b = (y_b / height_fig)

    x_c1 = (x_c1 / width_fig)
    y_c1 = (y_c1 / height_fig)
    x_c2 = (x_c2 / width_fig)
    y_c2 = (y_c2 / height_fig)
    x_c3 = (x_c3 / width_fig)
    y_c3 = (y_c3 / height_fig)
    x_d = (x_d / width_fig)
    y_d = (y_d / height_fig)

    x_e1 = (x_e1 / width_fig)
    y_e1 = (y_e1 / height_fig)
    x_e2 = (x_e2 / width_fig)
    y_e2 = (y_e2 / height_fig)
    x_e3 = (x_e3 / width_fig)
    y_e3 = (y_e3 / height_fig)
    x_f = (x_f / width_fig)
    y_f = (y_f / height_fig)

    x_label_a = (x_label_a / width_fig)
    y_label_a = (y_label_a / height_fig)
    x_label_b = (x_label_b / width_fig)
    y_label_b = (y_label_b / height_fig)
    x_label_c = (x_label_c / width_fig)
    y_label_c = (y_label_c / height_fig)
    x_label_d = (x_label_d / width_fig)
    y_label_d = (y_label_d / height_fig)
    x_label_e = (x_label_e / width_fig)
    y_label_e = (y_label_e / height_fig)
    x_label_f = (x_label_f / width_fig)
    y_label_f = (y_label_f / height_fig)

    #widths and heights of all subfigures IN FRACTIONS OF FIG SIZE
    width_traces = (width_traces / width_fig)
    height_traces = (height_traces / height_fig)
    width_traj = (width_traj / width_fig)
    height_traj = (height_traj / height_fig)
    width_schematic = (width_schematic / width_fig)
    height_schematic = (height_schematic / height_fig)
    height_stim = (height_stim / height_fig)

    V_mult_factor = 10000.0
    LFP_mult_factor = -2.5

    fig = plt.figure(figsize=(width_fig, height_fig), dpi=300)

    ###################### Panel A1: spikes, Vs, LFP
    width = width_traces
    height = height_traces
    x_pos = x_a1
    y_pos = y_a1
    rect = (x_pos, y_pos, width, height)
    fig.text(x_label_a, y_label_a, 'A', fontproperties=fontl)
    ax1 = fig.add_axes(rect)
    if showfig == True or savefig == True:
        plot_model_traces(
            ax1, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE1, g_AI, g_GE, g_GI,
            g_extE, g_extI1, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_LFPs, windows, gaps, padding_for_traces,
            stim_type, time_to_max, exc_connectivity1, inh_connectivity,
            exc_syn_weight_dist, net_state1, ext_SD, int_SD, generate_LFPs,
            save_results, num_trials, crit_freq_V, filt_kind_V, crit_freq_LFP,
            filt_kind_LFP, V_mult_factor, LFP_mult_factor, run_new_net_sim,
            master_folder_path)

    ###################### Panel A2: stim trace
    width = width_traces
    height = height_stim
    x_pos = x_a2
    y_pos = y_a2
    rect = (x_pos, y_pos, width, height)
    ax2 = fig.add_axes(rect)
    plot_stim_trace(ax2, windows, gaps, 0)
    ax2.set_xlim(0, sum(windows) + sum(gaps))

    ###################### Panel A3: model_schematic
    width = width_schematic
    height = height_schematic
    x_pos = x_a3
    y_pos = y_a3
    rect = (x_pos, y_pos, width, height)
    ax3 = fig.add_axes(rect)
    if showfig == True or savefig == True:
        image_path = 'file:\\' + master_folder_path + \
            '\\figures\\random_schematic.png'
        image = urllib2.urlopen(image_path)
        array = pylab.imread(image)
        ax3.imshow(array)
        ax3.set_axis_off()
        ax3.text(0.5,
                 1.15,
                 'random asynchronous',
                 fontsize=8,
                 horizontalalignment='center',
                 verticalalignment='bottom',
                 transform=ax3.transAxes)

    ###################### Panel B: CC trajectories
    width = width_traj
    height = height_traj
    x_pos = x_b
    y_pos = y_b
    rect = (x_pos, y_pos, width, height)
    ax4 = fig.add_axes(rect)
    fig.text(x_label_b, y_label_b, 'B', fontproperties=fontl)
    plot_CC(ax4, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE1, g_AI, g_GE, g_GI,
            g_extE, g_extI1, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_pairs, num_LFPs, windows, gaps,
            padding_for_traces, stim_type, time_to_max, exc_connectivity1,
            inh_connectivity, exc_syn_weight_dist, net_state1, ext_SD, int_SD,
            generate_LFPs, save_results, num_trials, crit_freq_V, filt_kind_V,
            run_new_net_sim, get_new_CC_results_all_pairs, master_folder_path)

    ###################### Panel C1: spikes, Vs, LFP
    width = width_traces
    height = height_traces
    x_pos = x_c1
    y_pos = y_c1
    rect = (x_pos, y_pos, width, height)
    fig.text(x_label_c, y_label_c, 'C', fontproperties=fontl)
    ax5 = fig.add_axes(rect)
    if showfig == True or savefig == True:
        plot_model_traces(
            ax5, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE2, g_AI, g_GE, g_GI,
            g_extE, g_extI2, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_LFPs, windows, gaps, padding_for_traces,
            stim_type, time_to_max, exc_connectivity2, inh_connectivity,
            exc_syn_weight_dist, net_state2, ext_SD, int_SD, generate_LFPs,
            save_results, num_trials, crit_freq_V, filt_kind_V, crit_freq_LFP,
            filt_kind_LFP, V_mult_factor, LFP_mult_factor, run_new_net_sim,
            master_folder_path)

    ###################### Panel C2: stim trace
    width = width_traces
    height = height_stim
    x_pos = x_c2
    y_pos = y_c2
    rect = (x_pos, y_pos, width, height)
    ax6 = fig.add_axes(rect)
    plot_stim_trace(ax6, windows, gaps, 0)
    ax6.set_xlim(0, sum(windows) + sum(gaps))

    ###################### Panel C3: model_schematic
    width = width_schematic
    height = height_schematic
    x_pos = x_c3
    y_pos = y_c3
    rect = (x_pos, y_pos, width, height)
    ax7 = fig.add_axes(rect)
    if showfig == True or savefig == True:
        image_path = 'file:\\' + master_folder_path + \
            '\\figures\\random_schematic.png'
        image = urllib2.urlopen(image_path)
        array = pylab.imread(image)
        ax7.imshow(array)
        ax7.set_axis_off()
        ax7.text(0.5,
                 1.15,
                 'random synchronous',
                 fontsize=8,
                 horizontalalignment='center',
                 verticalalignment='bottom',
                 transform=ax7.transAxes)

    ###################### Panel D: CC trajectories
    width = width_traj
    height = height_traj
    x_pos = x_d
    y_pos = y_d
    rect = (x_pos, y_pos, width, height)
    ax8 = fig.add_axes(rect)
    fig.text(x_label_d, y_label_d, 'D', fontproperties=fontl)
    plot_CC(ax8, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE2, g_AI, g_GE, g_GI,
            g_extE, g_extI2, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_pairs, num_LFPs, windows, gaps,
            padding_for_traces, stim_type, time_to_max, exc_connectivity2,
            inh_connectivity, exc_syn_weight_dist, net_state2, ext_SD, int_SD,
            generate_LFPs, save_results, num_trials, crit_freq_V, filt_kind_V,
            run_new_net_sim, get_new_CC_results_all_pairs, master_folder_path)

    ###################### Panel E1: spikes, Vs, LFP
    width = width_traces
    height = height_traces
    x_pos = x_e1
    y_pos = y_e1
    rect = (x_pos, y_pos, width, height)
    fig.text(x_label_e, y_label_e, 'E', fontproperties=fontl)
    ax9 = fig.add_axes(rect)
    rateI = 50
    rateE = 50
    stim_factor = 6
    if showfig == True or savefig == True:
        plot_model_traces(
            ax9, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE3, g_AI, g_GE, g_GI,
            g_extE, g_extI3, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_LFPs, windows, gaps, padding_for_traces,
            stim_type, time_to_max, exc_connectivity3, inh_connectivity,
            exc_syn_weight_dist, net_state3, ext_SD, int_SD, generate_LFPs,
            save_results, num_trials, crit_freq_V, filt_kind_V, crit_freq_LFP,
            filt_kind_LFP, V_mult_factor, LFP_mult_factor, run_new_net_sim,
            master_folder_path)

    ###################### Panel E2: stim trace
    width = width_traces
    height = height_stim
    x_pos = x_e2
    y_pos = y_e2
    rect = (x_pos, y_pos, width, height)
    ax10 = fig.add_axes(rect)
    plot_stim_trace(ax10, windows, gaps, 0)
    ax10.set_xlim(0, sum(windows) + sum(gaps))

    ###################### Panel E3: model_schematic
    width = width_schematic
    height = height_schematic
    x_pos = x_e3
    y_pos = y_e3
    rect = (x_pos, y_pos, width, height)
    ax11 = fig.add_axes(rect)
    if showfig == True or savefig == True:
        image_path = 'file:\\' + master_folder_path + \
            '\\figures\\small_world_schematic.png'
        image = urllib2.urlopen(image_path)
        array = pylab.imread(image)
        ax11.imshow(array)
        ax11.set_axis_off()
        ax11.text(0.5,
                  1.15,
                  'clustered',
                  fontsize=8,
                  horizontalalignment='center',
                  verticalalignment='bottom',
                  transform=ax11.transAxes)

    ###################### Panel F: CC trajectories
    width = width_traj
    height = height_traj
    x_pos = x_f
    y_pos = y_f
    rect = (x_pos, y_pos, width, height)
    ax12 = fig.add_axes(rect)
    fig.text(x_label_f, y_label_f, 'F', fontproperties=fontl)
    plot_CC(ax12, fonts, node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh,
            tau_ref_E, tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset,
            V_peak, V_syn_E, V_syn_I, C_E, C_I, g_AE3, g_AI, g_GE, g_GI,
            g_extE, g_extI3, g_leak_E, g_leak_I, tau_Fl, tau_AIl, tau_AIr,
            tau_AId, tau_AEl, tau_AEr, tau_AEd, tauR_intra, tauD_intra,
            tauR_extra, tauD_extra, rateI, rateE, stim_factor,
            num_nodes_to_save, num_pairs, num_LFPs, windows, gaps,
            padding_for_traces, stim_type, time_to_max, exc_connectivity3,
            inh_connectivity, exc_syn_weight_dist, net_state3, ext_SD, int_SD,
            generate_LFPs, save_results, num_trials, crit_freq_V, filt_kind_V,
            run_new_net_sim, get_new_CC_results_all_pairs, master_folder_path)

    ###################### Calculate and report CC values for clustered
    ###################### network with no synaptic depression
    ext_SD = False
    int_SD = False

    report_CC(
        node_group, N_E, N_I, tau_Em, tau_Im, p_exc, p_inh, tau_ref_E,
        tau_ref_I, V_leak_E_min, V_leak_I_min, V_th, V_reset, V_peak, V_syn_E,
        V_syn_I, C_E, C_I, g_AE3, g_AI, g_GE, g_GI, g_extE, g_extI3, g_leak_E,
        g_leak_I, tau_Fl, tau_AIl, tau_AIr, tau_AId, tau_AEl, tau_AEr, tau_AEd,
        tauR_intra, tauD_intra, tauR_extra, tauD_extra, rateI, rateE,
        stim_factor, num_nodes_to_save, num_pairs, num_LFPs, windows, gaps,
        padding_for_traces, stim_type, time_to_max, exc_connectivity3,
        inh_connectivity, exc_syn_weight_dist, net_state3, ext_SD, int_SD,
        generate_LFPs, save_results, num_trials, crit_freq_V, filt_kind_V,
        run_new_net_sim, get_new_CC_results_all_pairs, master_folder_path)

    if savefig == True:
        figpath = master_folder_path + '\\figures\\Fig5'
        fig.savefig(figpath + '.png', dpi=300)
        figpath2 = '\\home\\caleb\\Dropbox\\Fig5'
        fig.savefig(figpath2 + '.png', dpi=300)
    if showfig == True:
        pylab.show()
import sys
import pymaxflow
import pylab
import numpy as np

eps = 0.01

im = pylab.imread(sys.argv[1]).astype(np.float32)

indices = np.arange(im.size).reshape(im.shape).astype(np.int32)
g = pymaxflow.PyGraph(im.size, im.size * 3)

g.add_node(im.size)

# adjacent
diffs = np.abs(im[:, 1:] - im[:, :-1]).ravel() + eps
e1 = indices[:, :-1].ravel()
e2 = indices[:, 1:].ravel()
g.add_edge_vectorized(e1, e2, diffs, 0 * diffs)

# adjacent up
diffs = np.abs(im[1:, 1:] - im[:-1, :-1]).ravel() + eps
e1 = indices[1:, :-1].ravel()
e2 = indices[:-1, 1:].ravel()
g.add_edge_vectorized(e1, e2, diffs, 0 * diffs)

# adjacent down
diffs = np.abs(im[:-1, 1:] - im[1:, :-1]).ravel() + eps
e1 = indices[:-1, :-1].flatten()
e2 = indices[1:, 1:].ravel()
g.add_edge_vectorized(e1, e2, diffs, 0 * diffs)
Beispiel #37
0
              zorder=5,
              clip_on=False,
              transform=fig.transFigure)
for lx, ly in zip(linex, liney):
    ax.plot(lx, ly, **kwargs)
kwargs = dict(fontsize=6,
              va="baseline",
              ha="center",
              color=obscolor["computational"],
              transform=fig.transFigure)
ax.text(l1 + wi / 2, bi + 0.1415, observernames["computational"], **kwargs)

# # #  PLOT OBSERVER MODEL SKETCHES  # # #
if "A" in PLOT:
    ax = axes["observers"]
    ax.imshow(pl.imread(f"./panel/observermodelspictogram_wide.png"))
    ax.set_frame_on(False)
    print_panel_label("A", ax, abs_x=0.025, abs_y=0.97)

# # #  PLOT ALT. OBSERVERS' PERFORMANCES  # # #
if "B" in PLOT:
    print_panel_label("B", ax, abs_x=0.025, abs_y=0.795)
    # PERPARE GENERAL VARS
    pc = perf_level["chance"]
    pp = perf_level["perfect"]
    nObs = len(observers)
    nCond = len(conds)
    nTracker = len(visible_trackers)
    ymin, ymax = 10., 0.
    from scipy.stats import sem
    ax = axes["bars"]
Beispiel #38
0
import matplotlib.pyplot as plt
import numpy as np
import ocr

from pylab import imread, imshow, figure, show, subplot, plot, scatter
from scipy.cluster.vq import kmeans, vq
from skimage import data, img_as_uint, img_as_float
from skimage.external.tifffile import imsave
from skimage.filters import threshold_otsu, threshold_adaptive, threshold_yen
from skimage.segmentation import clear_border

imageFile = '../pics/1.png'
image = imread(imageFile)
img = data.imread(imageFile, as_grey=True)

global_thresh = threshold_yen(img)
# True False binary matrix represent color value of the img using global thresholding
binary_global = img > global_thresh

block_size = 40

# True False binary matrix represent color value of the img using adaptive thresholding
binary_adaptive = threshold_adaptive(img, block_size, offset=0)

# 0 1 binary matrix
img_bin_global = clear_border(img_as_uint(binary_global))

# 0 1 binary matrix 
img_bin_adaptive = clear_border(img_as_uint(binary_adaptive))

savedImg = img_as_float(binary_adaptive)
Beispiel #39
0
    myFiles = []  # 文件列表
    for (root, dirs, files) in os.walk(input):
        for file in files:
            # 如果【不是标注图】并且【是jpg格式】
            if file.find("_gt") < 0 and file.find('.jpg') >= 0:
                myFiles.append(file)  # 添加到文件列表
        break  # os.walk只对当前目录遍历一层
    # 对文件列表进行洗牌
    random.shuffle(myFiles)
    length = len(myFiles) * 4  # 因为需要旋转4个角度
    # 建立一个编号列表。线性递增表为[0,1,...,length-1],对这个线性递增表进行洗牌,作为输出文件的序号
    lst = build_shuffle_list(length)
    lst = list(lst)
    for i, s in enumerate(myFiles):  #i:操作序号,s:当前文件名
        # 分离文件名和扩展名。name为当前文件文件名(无后缀)
        name = os.path.splitext(s)[0]
        print(i, name)
        imgName = f'{input}/' + name + '.jpg'  #输入原图
        dataName = f'{input}/' + name + '_gt.jpg'  #输入标注图
        img = plt.imread(imgName)  #图片读取
        data = plt.imread(dataName)
        # 直接保存
        saveFiles(img, data, lst)
        for j in range(3):  #连续旋转3次
            img = RotateClockWise90(img)
            data = RotateClockWise90(data)
            saveFiles(img, data, lst)
    add_log('结束时间:')
    # 对于长操作,操作后关机
    # os.system('poweroff')
Beispiel #40
0
        print(fn)

        try:
            os.makedirs(os.path.dirname(fn) +
                        '/../training_images')  #, exists_ok = True)
        except:
            pass
        try:
            os.makedirs(os.path.dirname(fn) +
                        '/../test_images')  #, exists_ok = True)
        except:
            pass

        #raw = rawpy.imread(fn)
        #bayer = raw.raw_image
        bayer = pylab.imread(fn)

        pylab.imshow(bayer)

        print(bayer.shape)

        for i in range(xmin, xmax, width):
            for j in range(ymin, ymax, height):

                try:
                    if i > j:
                        train_01 = bayer[j:j + height, i:i + width]
                        if train_01.shape[0] > 0 and train_01.shape[1] > 0:
                            train_01_fn = fn.replace(
                                'raw_images', 'training_images').replace(
                                    '.jpg', '-train_01_%d_%d.jpg' % (i, j))
Beispiel #41
0
    SCALE = 1.5
    if event.button == "up":
        width = (x2 - x1) / SCALE
        height = (y2 - y1) / SCALE
    elif event.button == "down":
        width = (x2 - x1) * SCALE
        height = (y2 - y1) * SCALE
    ax.set_xlim(x - width * xp, x + width * (1 - xp))
    ax.set_ylim(y - height * yp, y + height * (1 - yp))
    pylab.draw()


fig.canvas.mpl_connect('scroll_event', on_scroll)

#Show background image
bg = pylab.imread(configuration["bg_picture"])
pylab.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
ax = pylab.subplot(111)
pylab.imshow(bg, extent=configuration["extent"], origin='lower')
'''
#Make the plots
labels = marks.keys()

import mission.layout.layout as layout
desired_labels = set(layout.places)

removed_labels = set(labels).difference(desired_labels)
if removed_labels:
    print "The following labels no longer appear in layout.places"
    print ", ".join(x for x in removed_labels)
    resp = raw_input("Would you like to remove them from the layout? y/N")
Beispiel #42
0
    fresize.restype = None
    fresize.argtypes = [
        ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int,
        ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int, c_int, c_int
    ]
    ddims = [
        int(round(sdims[0] * scale)),
        int(round(sdims[1] * scale)), sdims[2]
    ]
    mxdst = zeros((ddims), dtype=datatype)
    tmp = zeros((ddims[0], sdims[1], sdims[2]), dtype=datatype)
    img1 = img
    t1 = time()
    fresize(img1, sdims[0], tmp, ddims[0], sdims[1], sdims[2])
    fresize(tmp, sdims[1], mxdst, ddims[1], ddims[0], sdims[2])
    t2 = time()
    return mxdst.reshape(ddims[2], ddims[1], ddims[0]).T


if __name__ == "__main__":
    from numpy.random import random_integers
    from time import time
    from pylab import imread, figure, imshow
    from ctypes import c_float, c_double, c_int

    img = imread("test.png").astype(c_double)
    imshow(img)
    img1 = resize(img, 0.25)
    figure()
    imshow(img1)
Beispiel #43
0
###Cargar Imagen original### 

import matplotlib.pyplot as plt
import numpy as np
from pylab import imread, imshow

img = imread('D:\Esteban\Desktop\Ezatara\EZ\EZ-Images\grim.jpg').astype(np.float32)  #Se lee archivo con la funcion imread de pylab

image = img / 255
imshow(image)		#Se muestra la imagen con la funcion ishow de la libreria matplotlib


#Cargar y Leer CSV 

import numpy as np

data = np.genfromtxt('D:/Esteban/Desktop/Ezatara/EZ-U/Aseguramiento/POC/train.csv', delimiter = ',', skip_header=1, dtype=None) #Se utiliza numpy y su funcion 
																																#de genfromtxt para leer el archivo
for row in data:
	print(row)						#Se imprimen las row del archivo en forma de array
	#print(row[0], row[1], row[2])	#Para cargar el csv pero no desplegar de forma de array


###Cargar la imagen y ponerle y manejar el contraste a la misma###

from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
from pylab import imread, imshow, gray, mean

img = imread('D:\Esteban\Desktop\Ezatara\EZ\EZ-Images\grim.jpg').astype(np.float32)	#Se lle el archivo con imread de pylab 
Beispiel #44
0
        dimshow(resampled_img)
        plt.subplot(1, 2, 2)
        dimshow(resampled_mask)
        ps.savefig()

        # feed it to "enhance"
        enhance.update(resampled_mask.ravel(), img[Yi, Xi, :])

        plt.clf()
        dimshow(enhance.enhI.reshape((H, W, 3)))
        ps.savefig()

    sys.exit(0)

    # test stretching
    img = plt.imread('demo/apod1.jpg')
    (H, W, B) = img.shape
    print('Image', img.shape, img.dtype)

    imx = np.sqrt(img.astype(np.float32) / 255.)

    enhance = EnhanceImage(H * W, B)
    enhance.update(np.ones((H * W), bool), imx.reshape((-1, B)))

    en = enhance.enhI.reshape((H, W, B))

    plt.clf()
    plt.imshow(en, interpolation='nearest', origin='lower')
    plt.savefig('en.png')

    stretch = enhance.stretch_to_match(img).reshape((H, W, B))
Beispiel #45
0
from __future__ import division
import glob
import os.path
import numpy as np
import amitgroup as ag
import pylab as plt

from config import SETTINGS

files = glob.glob(os.path.join(SETTINGS['src_dir'], '*.png'))

im = plt.imread(files[32])
imgrey = im[...,:3].mean(axis=2).astype(np.float64)

edges = ag.features.bedges(imgrey, radius=1, firstaxis=True)

edges2 = ag.features.bedges_from_image(im.astype(np.float64), radius=1, firstaxis=True)

num_edges, num_edges2 = edges.sum(), edges2.sum()
print "Edges (grayscale):", num_edges 
print "Edges (RGB):", num_edges2 

print "Increase:", num_edges2/num_edges

ag.plot.images(np.r_[edges, edges2])


Beispiel #46
0
import os
sys.path.append('../../')

from Utils.StainNormalization import StainNormalizationWithVector,StainNormalizationWithVector2

h_ref = np.array([[0.24502717, 0.80708244, 0.34289746],[0.63904356, 0.67133489, 0.30034316]])

proot ='/home/zyx31/DATA_CRLM/Patches/Patches_Level0/Patches_1024/All/'
proot_norm = '/home/zyx31/DATA_CRLM/Patches/Patches_Level0/Patches_1024/Norm/'

pindex = 0
plist = os.listdir(proot)

for pindex in range(len(plist)):
    ppath = proot + plist[pindex]
    #ppath = proot + fname_dict['T'][0]
    print("processing %s"%plist[pindex])
    try:
        patch = np.array(plt.imread(ppath))[:,:,:3]
        normp  = StainNormalizationWithVector(patch,h_ref=h_ref,\
                                          illuminant_ref=[255,255,255],no_channel=2,Df=5, init=[260,320])

        #plt.imshow(normp)
        plt.imsave(proot_norm+plist[pindex],normp)
    
    except Exception as e:
        print(e)
    
    if pindex %200 ==0:
        print("----------------->",pindex)
    
Beispiel #47
0
        return AttentionModule.aggregate(outputs, gates, last_output,
                                         last_gate), reg_loss


if __name__ == '__main__':
    import time
    t = time.time()
    net = WideResNetAttention(1000,
                              attention_depth=3,
                              nheads=4,
                              has_gates=True,
                              pretrain_path="pretrained/wrn-50-2.t7").eval()
    print(time.time() - t)
    import pylab
    import cv2
    import numpy as np
    im = pylab.imread('./demo/goldfish.jpeg') / 255.
    im = cv2.resize(im, (224, 224))
    im = im.transpose(2, 0, 1)
    im -= np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    im /= np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    im = torch.Tensor(im[None, ...])
    t = time.time()
    out = net(Variable(im))
    print(time.time() - t)
    max, ind = torch.max(out, 1)
    # max, ind = torch.max(net.linear(out), 1)
    # print(max)
    print(ind)
from pylab import imread, imshow, figure, show, subplot
import matplotlib.pyplot as plt
from numpy import reshape, uint8, flipud
from sklearn.cluster import KMeans
from copy import deepcopy

img = imread('sample.jpeg')

pixel = reshape(img, (img.shape[0] * img.shape[1],
                      3))  # picture can change from 8 * 8 * 3 into 64 * 3
pixel_new = deepcopy(pixel)

print(img.shape)
print(pixel_new.shape)

model = KMeans(n_clusters=5)
labels = model.fit_predict(pixel)
palette = model.cluster_centers_  # compress the color, chage the matrix of the picture all use center point

for i in range(len(pixel)):
    pixel_new[i, :] = palette[
        labels[i]]  # use label as subscript to chagne every matrix context

imshow(reshape(pixel_new, (img.shape[0], img.shape[1], 3)))
plt.show()
Beispiel #49
0
def plotarea(ra, dec, radius, name, prefix, tims=None, rds=[]):
    from astrometry.util.util import Tan
    W, H = 512, 512
    scale = (radius * 60. * 4) / float(W)
    print 'SDSS jpeg scale', scale
    imgfn = 'sdss-mosaic-%s.png' % prefix
    if not os.path.exists(imgfn):
        url = (('http://skyservice.pha.jhu.edu/DR9/ImgCutout/getjpeg.aspx?' +
                'ra=%f&dec=%f&scale=%f&width=%i&height=%i') %
               (ra, dec, scale, W, H))
        f = urllib2.urlopen(url)
        of, tmpfn = tempfile.mkstemp(suffix='.jpg')
        os.close(of)
        of = open(tmpfn, 'wb')
        of.write(f.read())
        of.close()
        cmd = 'jpegtopnm %s | pnmtopng > %s' % (tmpfn, imgfn)
        os.system(cmd)
    # Create WCS header for it
    cd = scale / 3600.
    args = (ra, dec, W / 2. + 0.5, H / 2. + 0.5, -cd, 0., 0., -cd, W, H)
    wcs = Tan(*[float(x) for x in args])

    plt.clf()
    I = plt.imread(imgfn)
    plt.imshow(I, interpolation='nearest', origin='lower')
    x, y = wcs.radec2pixelxy(ra, dec)
    R = radius * 60. / scale
    ax = plt.axis()
    plt.gca().add_artist(
        matplotlib.patches.Circle(xy=(x, y),
                                  radius=R,
                                  color='g',
                                  lw=3,
                                  alpha=0.5,
                                  fc='none'))
    if tims is not None:
        print 'Plotting outlines of', len(tims), 'images'
        for tim in tims:
            H, W = tim.shape
            twcs = tim.getWcs()
            px, py = [], []
            for x, y in [(1, 1), (W, 1), (W, H), (1, H), (1, 1)]:
                rd = twcs.pixelToPosition(x, y)
                xx, yy = wcs.radec2pixelxy(rd.ra, rd.dec)
                print 'x,y', x, y
                x1, y1 = twcs.positionToPixel(rd)
                print '  x1,y1', x1, y1
                print '  r,d', rd.ra, rd.dec,
                print '  xx,yy', xx, yy
                px.append(xx)
                py.append(yy)
            plt.plot(px, py, 'g-', lw=3, alpha=0.5)

            # plot full-frame image outline too
            # px,py = [],[]
            # W,H = 2048,1489
            # for x,y in [(1,1),(W,1),(W,H),(1,H),(1,1)]:
            #     r,d = twcs.pixelToRaDec(x,y)
            #     xx,yy = wcs.radec2pixelxy(r,d)
            #     px.append(xx)
            #     py.append(yy)
            # plt.plot(px, py, 'g-', lw=1, alpha=1.)

    if rds is not None:
        px, py = [], []
        for ra, dec in rds:
            print 'ra,dec', ra, dec
            xx, yy = wcs.radec2pixelxy(ra, dec)
            px.append(xx)
            py.append(yy)
        plt.plot(px, py, 'go')

    plt.axis(ax)
    fn = '%s.png' % prefix
    plt.savefig(fn)
    print 'saved', fn
# 		with slim.arg_scope(vgg.vgg_arg_scope()):
# 			output, _ = vgg.vgg_16(normalized_image, num_classes = 1000, is_training = False)
# 			probabilities = tf.nn.softmax(output)
# 		init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'vgg_16.ckpt'), slim.get_model_variables('vgg_16'))
# 		sess = tf.Session()
# 		init_fn(sess)

# 	pass

if __name__ == "__main__":
    # Set some parameters
    image_size = vgg.vgg_16.default_image_size
    batch_size = 1

    image_file = "./data/imagenet/catdog/catdog.jpg"
    image = plt.imread(image_file)
    # plt.imshow(image)
    # plt.annotate('Something', xy = (0.05, 0.95), xycoords = 'axes fraction')
    # plt.show()

    labels = imagenet.create_readable_names_for_imagenet_labels()

    # Define graph
    with tf.Graph().as_default():
        x = tf.placeholder(dtype=tf.float32, shape=(image_size, image_size, 3))
        normalized_image = vgg_preprocessing.preprocess_image(
            x, image_size, image_size, is_training=False)
        normalized_images = tf.expand_dims(normalized_image, 0)
        with slim.arg_scope(vgg.vgg_arg_scope()):
            output, _ = vgg.vgg_16(normalized_images,
                                   num_classes=1000,
Beispiel #51
0
    xp, yp = (x - x1) / (x2 - x1), (y - y1) / (y2 - y1)
    SCALE = 1.5
    if event.button == "up":
        width = (x2 - x1) / SCALE
        height = (y2 - y1) / SCALE
    elif event.button == "down":
        width = (x2 - x1) * SCALE
        height = (y2 - y1) * SCALE
    ax.set_xlim(x - width * xp, x + width * (1 - xp))
    ax.set_ylim(y - height * yp, y + height * (1 - yp))
    pylab.draw()


fig.canvas.mpl_connect('scroll_event', on_scroll)

bg = pylab.imread(
    os.path.join(os.path.join(path, 'images'), args.locale + '.jpg'))
pylab.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
ax = pylab.subplot(111)

bg_width = 27.7
bg_height = 16.66
corner_offset_x = -5.5
corner_offset_y = -bg_height + 3

extent = (corner_offset_x, bg_width + corner_offset_x, corner_offset_y,
          bg_height + corner_offset_y)
pylab.imshow(bg, extent=extent, origin='lower')

xs = [m['position'][1] for m in marks]
ys = [m['position'][0] for m in marks]
reverse = {
    def get_classifier_params(self):
        return self.linear.parameters()

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 3, 2, 1)
        x = self.group0(x)
        x = self.group1(x)
        x = self.group2(x)
        x = self.group3(x)
        return F.log_softmax(self.linear(x.mean(3).mean(2)), dim=1)


if __name__ == '__main__':
    net = WideResNet().eval()
    import pylab
    import cv2
    import numpy as np
    cat = pylab.imread('demo/goldfish.jpeg') / 255.
    cat = cv2.resize(cat, (224, 224))
    cat = cat.transpose(2, 0, 1)
    cat -= np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    cat /= np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    cat = torch.Tensor(cat[None, ...])
    out = net(Variable(cat))
    max, ind = torch.max(net.linear(out), 1)
    # print(max)
    print(ind)
Beispiel #53
0
from scipy.misc import lena
from pylab import imread
from scipy.ndimage import gaussian_filter
from stl_tools import numpy2stl, text2png
"""
Some quick examples
"""

A = lena()  # load Lena image, shrink in half
A = gaussian_filter(A, 1)  # smoothing
numpy2stl(A, "examples/Lena.stl", scale=0.1, solid=False)

A = 256 * imread("examples/example_data/NASA.png")
A = A[:, :, 2] + 1.0 * A[:, :, 0]  # Compose RGBA channels to give depth
A = gaussian_filter(A, 1)  # smoothing
numpy2stl(A, "examples/NASA.stl", scale=0.05, mask_val=5., solid=True)

A = 256 * imread("examples/example_data/openmdao.png")
A = A[:, :,
      0] + 1. * A[:, :, 3]  # Compose some elements from RGBA to give depth
A = gaussian_filter(A, 2)  # smoothing
numpy2stl(A,
          "examples/OpenMDAO-logo.stl",
          scale=0.05,
          mask_val=1.,
          min_thickness_percent=0.005,
          solid=True)

text = ("$\oint_{\Gamma} (A\, dx + B\, dy) = \iint_{U} \left(\\frac{\partial "
        "B}{\partial x} - \\frac{\partial A}{\partial y}\\right)\ dxdy$ \n\n "
        "$\\frac{\partial \\rho}{\partial t} + \\frac{\partial}{\partial x_j}"
Beispiel #54
0
def render(input_text, fnum=1, dpath=None, verbose=True):
    """
    fixme or remove
    """
    import pylab as plt
    import matplotlib as mpl
    #verbose = True
    text = make_full_document(input_text)
    cwd = os.getcwd()
    if dpath is None:
        text_dir = join(cwd, 'tmptex')
    else:
        text_dir = dpath
    util_path.ensuredir(text_dir, verbose=verbose)
    text_fname = 'latex_formatter_temp.tex'
    text_fpath = join(text_dir, text_fname)
    pdf_fpath = splitext(text_fpath)[0] + '.pdf'
    jpg_fpath = splitext(text_fpath)[0] + '.jpg'
    try:
        os.chdir(text_dir)
        util_io.write_to(text_fpath, text)
        pdflatex_args = ('pdflatex', '-shell-escape', '--synctex=-1',
                         '-src-specials', '-interaction=nonstopmode')
        args = pdflatex_args + (text_fpath, )
        util_cplat.cmd(*args, verbose=verbose)
        assert util_path.checkpath(pdf_fpath, verbose=verbose), 'latex failed'
        # convert latex pdf to jpeg
        util_cplat.cmd('convert',
                       '-density',
                       '300',
                       pdf_fpath,
                       '-quality',
                       '90',
                       jpg_fpath,
                       verbose=verbose)
        assert util_path.checkpath(jpg_fpath,
                                   verbose=verbose), 'imgmagick failed'
        tex_img = plt.imread(jpg_fpath)
        # Crop img bbox
        nonwhite_x = np.where(tex_img.flatten() != 255)[0]
        nonwhite_rows = nonwhite_x // tex_img.shape[1]
        nonwhite_cols = nonwhite_x % tex_img.shape[1]
        x1 = nonwhite_cols.min()
        y1 = nonwhite_rows.min()
        x2 = nonwhite_cols.max()
        y2 = nonwhite_rows.max()
        #util.embed()
        cropped = tex_img[y1:y2, x1:x2]
        fig = plt.figure(fnum)
        fig.clf()
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(cropped, cmap=mpl.cm.gray)
        #mpl.rc('text', usetex=True)
        #mpl.rc('font', family='serif')
        #plt.figure()
        #plt.text(9, 3.4, text, size=12)
    except Exception as ex:
        print('LATEX ERROR')
        print(text)
        print(ex)
        print('LATEX ERROR')
        pass
    finally:
        os.chdir(cwd)
    def testSubpixelShift(self):
        W, H = 100, 100
        cx, cy = 34.2, 56.7
        X, Y = np.meshgrid(np.arange(W), np.arange(H))
        S = 2.
        G = 1. / (2. * pi * S**2) * np.exp(-((X - cx)**2 + (Y - cy)**2) /
                                           (2. * S**2))
        #G /= G.max()
        print G.sum()
        print np.sum(G * X), np.sum(G * Y)
        imfn = 'test-plotstuff-2.fits'
        pyfits.writeto(imfn, G, clobber=True)

        wcs = anwcs_create_box(33.5, 10.4, 1., W, H)
        anwcs_rotate_wcs(wcs, 25.)
        wcsfn = 'test-plotstuff-2.wcs'
        anwcs_write(wcs, wcsfn)

        plot = Plotstuff()
        plot.outformat = PLOTSTUFF_FORMAT_PNG
        plot.size = (W, H)
        plot.wcs_file = wcsfn
        plot.color = 'black'
        plot.plot('fill')

        im = plot.image
        im.image_low = 0
        im.image_high = G.max()
        plot_image_set_wcs(im, wcsfn, 0)
        plot_image_set_filename(im, imfn)
        plot.plot('image')

        plotfn = 'test-plotstuff-2.png'
        plot.write(plotfn)

        I = plt.imread(plotfn)
        I = I[:, :, 0]
        sx, sy = (I * X).sum() / I.sum(), (I * Y).sum() / I.sum()
        print sx, sy
        ex, ey = cx, cy
        self.assertTrue(abs(sx - ex) < 0.1)
        self.assertTrue(abs(sy - ey) < 0.1)

        # Shift the plot's WCS CRPIX.
        dx = 0.25
        dy = 0.3
        sip = anwcs_get_sip(wcs)
        tan = sip.wcstan
        plotstuff_set_wcs(plot.pargs, wcs)

        xy = plot.xy
        plot_xy_set_wcs_filename(xy, wcsfn)
        plot.marker = 'crosshair'

        # Move the plot WCS origin
        for step in range(16):
            if step < 8:
                tan.set_crpix(tan.crpix[0] + dx, tan.crpix[1])
                ex += dx
            else:
                tan.set_crpix(tan.crpix[0], tan.crpix[1] + dy)
                ey += dy
            #anwcs_print_stdout(wcs)
            plot.color = 'black'
            plot.plot('fill')
            plot.plot('image')
            plotfn = 'test-plotstuff-2b%i.png' % step
            plot.write(plotfn)
            #anwcs_write(plot.pargs.wcs, 'test-plotstuff-2b.wcs')
            I = plt.imread(plotfn)
            I = I[:, :, 0]
            sx, sy = (I * X).sum() / I.sum(), (I * Y).sum() / I.sum()
            print sx, sy
            print ex, ey
            self.assertTrue(abs(sx - ex) < 0.1)
            self.assertTrue(abs(sy - ey) < 0.1)

            plot.color = 'red'
            plot_xy_clear_list(xy)
            # don't plot at sx,sy / ex,ey -- original image coords
            # are unchanged.
            plot_xy_vals(xy, cx + 1, cy + 1)
            plot.plot('xy')
            plotfn = 'test-plotstuff-2c%i.png' % step
            plot.write(plotfn)
            # visual check that plot xy symbols match source centroid -- yes

        # Scan image WCS in RA,Dec; check that recovered source position
        # through plot WCS matches.
        # reset...
        plot.wcs_file = wcsfn
        plotwcs = anwcs_open(wcsfn, 0)

        wcs = anwcs_open(wcsfn, 0)
        im.wcs = wcs
        sip = anwcs_get_sip(wcs)
        tan = sip.wcstan
        ddec = 1.2 * 1. / W
        ok, era, edec = anwcs_pixelxy2radec(wcs, cx, cy)
        print era, edec
        for step in range(16):
            tan.set_crval(tan.crval[0], tan.crval[1] + ddec)
            edec += ddec

            plot.color = 'black'
            plot.plot('fill')
            plot.plot('image')
            plotfn = 'test-plotstuff-2d%i.png' % step
            plot.write(plotfn)

            I = plt.imread(plotfn)
            I = I[:, :, 0]
            sx, sy = (I * X).sum() / I.sum(), (I * Y).sum() / I.sum()
            #print sx,sy
            ok, ra, dec = anwcs_pixelxy2radec(plotwcs, sx, sy)

            #print era,edec
            #print ra,dec
            print 'dRA,dDec', ra - era, dec - edec
            self.assertTrue(abs(ra - era) < 1e-4)
            self.assertTrue(abs(dec - edec) < 1e-4)
Beispiel #56
0
import scipy
import numpy
import pylab
from focal import *

# load image
filename  = "./t10k-images-idx3-ubyte__idx_000__lbl_7_.png"
img = pylab.imread(filename) #grayscale image
pylab.figure()
pylab.imshow(img, cmap="Greys_r")

fcl = Focal()
num_kernels = len(fcl.kernels.full_kernels) # four simulated layers

# spikes contains a rank-ordered list of triples with the following
# information:
# [ pixel/neuron index (int), pixel value (float), layer id (int) ]
spikes = fcl.apply(img)

# we convert the spike list to 4 images, spike_imgs is a dictionary
# containing an image per simulated layer
spike_imgs = spike_trains_to_images_g(spikes, img, num_kernels)

pylab.figure()
i = 1
for k in spike_imgs.keys():
  pylab.subplot(2, 2, i)
  pylab.imshow(spike_imgs[k], cmap="Greys_r")
  i += 1

#convert to spike source array
Beispiel #57
0
# Regularization parameters for kernel estimation
f_alpha = 0.                 # promotes smoothness
f_beta  = 0.1                  # Thikhonov regularization
optiter = 500                 # number of iterations for minimization
tol     = 1e-10               # tolerance for when to stop minimization
# ============================================================================

# Hopefully no need to edit anything below

# ----------------------------------------------------------------------------
# Some more code for backuping the results
# ----------------------------------------------------------------------------
if backup:
    # Create helper functions for file handling
    yload = lambda i: 1. * pl.imread(FILENAME(i)).astype(np.float32)

    # For backup purposes
    EXPPATH = '%s/%s_sf%dx%d_csf%dx%d_maxiter%d_alpha%.2f_beta%.2f' % \
              (RESPATH,ID,sf[0],sf[1],csf[0],csf[1],optiter,f_alpha,f_beta)

    xname = lambda i: '%s/x_%04d.png' % (EXPPATH,i)
    yname = lambda i: '%s/y_%04d.png' % (EXPPATH,i)
    fname = lambda i: '%s/f_%04d.png' % (EXPPATH,i)

    # Create results path if not existing
    try:
        os.makedirs(EXPPATH)
    except:
        pass
Beispiel #58
0
def Load(filename):
    '''
       load an image stored in filename are return as a numpy object
     '''
    return (pyl.imread(filename))
Beispiel #59
0
	p = np.size(coeff,axis=1)
	idx = np.argsort(latent) # sorting the eigenvalues
	idx = idx[::-1]       # in ascending order
	# sorting eigenvectors according to the sorted eigenvalues
	coeff = coeff[:,idx]
	latent = latent[idx] # sorting eigenvalues
	if numpc < p and numpc >= 0:
		coeff = coeff[:,range(numpc)] # cutting some PCs if needed
		score = np.dot(coeff.T,M) # projection of the data in the new space
	return coeff, score, latent




#from pylab import imread,subplot,imshow,title,gray,figure,show,NullLocator
A = pl.imread('dog.jpg') # load an image
A = np.mean(A,2) # to get a 2-D array
full_pc = np.size(A,axis=1) # numbers of all the principal components
i = 1
dist = []
for numpc in range(0,full_pc+10,10): 
	coeff, score, latent = princomp(A,numpc)
	Ar = np.dot(coeff,score).T + np.mean(A,axis=0) # image reconstruction
	# difference in Frobenius norm
	dist.append(linalg.norm(A-Ar,'fro'))
	# showing the pics reconstructed with less than 50 PCs
	if numpc <= 25:
		ax = pl.subplot(2,3,i,frame_on=False)
		ax.xaxis.set_major_locator(pl.NullLocator()) # remove ticks
		ax.yaxis.set_major_locator(pl.NullLocator())
		i += 1 
Beispiel #60
0
        # ml - ap - dv
        ml, ap, dv = location
        sliceIndex = 0
        for (k, v) in sliceBounds.iteritems():
            if ap > v:
                sliceIndex = k
                break

        if sliceIndex == 0:
            physio.utils.error("Could not find slice index for: %f" % \
                    ap, ValueError)

        atlasDir = config.get('filesystem', 'atlas')
        sliceFile = '%s/%03i.png' % (atlasDir, k)

        im = pl.imread(sliceFile)

        pl.imshow(im)

        # convert ml, dv to pixel coordinates
        x, y = skull_to_pixel(ml, -dv, sliceIndex, im.shape)
        logging.debug("Channel: %i at %.3f %.3f %.3f" % (ch, ml, ap, dv))
        pl.scatter(x, y, color='r')
        pl.axhline(y, linestyle='-.', color='b')
        pl.axvline(x, linestyle='-.', color='b')
        pl.text(x - 10, y + 10, "%.3f, %.3f, %.3f" % (ml, ap, dv), \
                ha='right', va='top')

        pl.title("Channel: %i" % ch)
        pl.xlabel("ML (mm)")
        pl.ylabel("DV (mm)")