def generate_matrix_visualization(known_words, bookmark_words):

    m = []
    for i in range(0,100):
        m.append([])
        for j in range (0, 100):
            if (i*100+j) in bookmark_words:
                m[i].append(0.65)
            elif (i*100+j) in known_words:
                m[i].append(0.4)
            else:
                m[i].append(0.2)
    m.reverse()
	
    # we need this next line to scale the color scheme
    m[0][0]=0

    matrix = numpy.matrix(m)


    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    ax.set_aspect('equal')
    plt.imshow(matrix, interpolation='none', extent=[0,10000,0,10000])
    frame = pylab.gca()

    frame.axes.get_xaxis().set_ticks([])
	#plt.axis([0,10000,0,10000])
    plt.ylabel('Rank')
    plt.title('Encountered Ranked Words by User')
    plt.colorbar()
    plt.show()
Beispiel #2
0
 def plot_bernoulli_matrix(self, show_npfs=False):
   """
   Plot the heatmap of the Bernoulli matrix 
   @self
   @show_npfs - Highlight NPFS detections [Boolean] 
   """
   matrix = self.Bernoulli_matrix
   if show_npfs == False:
     plot = plt.imshow(matrix)
     plot.set_cmap('hot')
     plt.colorbar()
     plt.xlabel("Bootstraps")
     plt.ylabel("Feature")
     plt.show()
   else:
     for i in self.selected_features:
       for k in range(len(matrix[i])):
         matrix[i,k] = .5
     plot = plt.imshow(matrix)
     plot.set_cmap('hot')
     plt.xlabel("Bootstraps")
     plt.ylabel("Feature")
     plt.colorbar()
     plt.show()
   return None
Beispiel #3
0
def inter_show(start, lc, eta, vol_ins, props, lbl_outs, grdts, pars):
    '''
    Plots a display of training information to the screen
    '''
    import matplotlib.pylab as plt
    name_in, vol  = vol_ins.popitem()
    name_p,  prop = props.popitem()
    name_l,  lbl  = lbl_outs.popitem()
    name_g,  grdt = grdts.popitem()

    m_input = volume_util.crop(vol[0,:,:,:], prop.shape[-3:]) #good enough for now

    # real time visualization
    plt.subplot(251),   plt.imshow(vol[0,0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('input')
    plt.subplot(252),   plt.imshow(m_input[0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('matched input')
    plt.subplot(253),   plt.imshow(prop[0,0,:,:],   interpolation='nearest', cmap='gray')
    plt.xlabel('output')
    plt.subplot(254),   plt.imshow(lbl[0,0,:,:],    interpolation='nearest', cmap='gray')
    plt.xlabel('label')
    plt.subplot(255),   plt.imshow(grdt[0,0,:,:],   interpolation='nearest', cmap='gray')
    plt.xlabel('gradient')

    plt.subplot(256)
    plt.plot(lc.tn_it, lc.tn_err, 'b', label='train')
    plt.plot(lc.tt_it, lc.tt_err, 'r', label='test')
    plt.xlabel('iteration'), plt.ylabel('cost energy')
    plt.subplot(257)
    plt.plot( lc.tn_it, lc.tn_cls, 'b', lc.tt_it, lc.tt_cls, 'r')
    plt.xlabel('iteration'), plt.ylabel( 'classification error' )
    return
Beispiel #4
0
def zsview(im, cmap=pl.cm.gray, figsize=(8,5), contours=False, ccolor='r'):
    z1, z2 = zscale(im)
    pl.figure(figsize=figsize)
    pl.imshow(im, vmin=z1, vmax=z2, origin='lower', cmap=cmap, interpolation='none')
    if contours:
        pl.contour(im, levels=[z2], origin='lower', colors=ccolor)
    pl.tight_layout()
Beispiel #5
0
def plot_confusion_matrix(cm, title='', cmap=plt.cm.Blues):
    #print cm
    #display vehicle, idle, walking accuracy respectively
    #display overall accuracy
    print type(cm)
   # plt.figure(index
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    #plt.figure("")
    plt.title("Confusion Matrix")
    plt.colorbar()
    tick_marks = [0,1,2]
    target_name = ["driving","idling","walking"]


    plt.xticks(tick_marks,target_name,rotation=45)

    plt.yticks(tick_marks,target_name,rotation=45)
    print len(cm[0])

    for i in range(0,3):
        for j in range(0,3):
         plt.text(i,j,str(cm[i,j]))
    plt.tight_layout()
    plt.ylabel("Actual Value")
    plt.xlabel("Predicted Outcome")
Beispiel #6
0
def plot_grid_experiment_results(grid_results, params, metrics):
    global plt
    params = sorted(params)
    grid_params = grid_results.grid_params
    plt.figure(figsize=(8, 6))
    for metric in metrics:
        grid_params_shape = [len(grid_params[k]) for k in sorted(grid_params.keys())]
        params_max_out = [(1 if k in params else 0) for k in sorted(grid_params.keys())]
        results = np.array([e.results.get(metric, 0) for e in grid_results.experiments])
        results = results.reshape(*grid_params_shape)
        for axis, included_in_params in enumerate(params_max_out):
            if not included_in_params:
                results = np.apply_along_axis(np.max, axis, results)

        print results
        params_shape = [len(grid_params[k]) for k in sorted(params)]
        results = results.reshape(*params_shape)

        if len(results.shape) == 1:
            results = results.reshape(-1,1)
        import matplotlib.pylab as plt

        #f.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
        plt.imshow(results, interpolation='nearest', cmap=plt.cm.hot)
        plt.title(str(grid_results.name) + " " + metric)

        if len(params) == 2:
            plt.xticks(np.arange(len(grid_params[params[1]])), grid_params[params[1]], rotation=45)
        plt.yticks(np.arange(len(grid_params[params[0]])), grid_params[params[0]])
        plt.colorbar()
        plt.show()
def viz_docwordfreq_sidebyside(P1, P2, title1='', title2='', 
                                vmax=None, aspect=None, block=False):
  from matplotlib import pylab
  pylab.figure()

  if vmax is None:
    vmax = 1.0
    P1limit = np.percentile(P1.flatten(), 97)
    if P2 is not None:
      P2limit = np.percentile(P2.flatten(), 97)
    else:
      P2limit = P1limit
    while vmax > P1limit and vmax > P2limit:
      vmax = 0.8 * vmax

  if aspect is None:
    aspect = float(P1.shape[1])/P1.shape[0]
  pylab.subplot(1, 2, 1)
  pylab.imshow(P1, aspect=aspect, interpolation='nearest', vmin=0, vmax=vmax)
  if len(title1) > 0:
    pylab.title(title1)
  if P2 is not None:
    pylab.subplot(1, 2, 2)
    pylab.imshow(P2, aspect=aspect, interpolation='nearest', vmin=0, vmax=vmax)
    if len(title2) > 0:
      pylab.title(title2)
  pylab.show(block=block)
Beispiel #8
0
def show_binary_images(samples, nsamples, d1, d2, nrows, ncols):
    """
    Plots samples in a NumPy 2D array ``samples`` as ``d1`` by ``d2`` images.
    (one sample per row of ``samples``).

    The samples are assumed to be images with binary pixels. The
    images are layed out in a ``nrows`` by ``ncols`` grid.
    """
    perm = range(nsamples)
    #random.shuffle(perm)
    if samples.shape[0] < nrows*ncols:
        samples_padded = numpy.zeros((nrows*ncols,samples.shape[1]))
        samples_padded[:samples.shape[0],:] = samples
        samples = samples_padded

    image = 0.5*numpy.ones((nrows*(d1+1)-1,ncols*(d2+1)-1),dtype=float)
    for i in range(nrows):
        for j in range(ncols):
            image[(i*d1+i):((i+1)*d1+i),(j*d2+j):((j+1)*d2+j)] = (1-samples[perm[i*ncols + j]].reshape(d1,d2))

    bordered_image = 0.5 * numpy.ones((nrows*(d1+1)+1,ncols*(d2+1)+1),dtype=float)

    bordered_image[1:nrows*(d1+1),1:ncols*(d2+1)] = image

    imshow(bordered_image,cmap = cm.Greys,interpolation='nearest')
    xticks([])
    yticks([])
Beispiel #9
0
def show_filters(weights,nweights,d1, d2, nrows, ncols, scale):
    """
    Plots the rows of NumPy 2D array ``weights`` as ``d1`` by ``d2`` images.

    The images are layed out in a ``nrows`` by ``ncols`` grid.

    Option ``scale`` sets the maximum absolute value of elements in ``weights``
    that will be plotted (larger values will be clamped to ``scale``, with the
    right sign).
    """
    perm = range(nweights)
    #random.shuffle(perm)
    image = -scale*numpy.ones((nrows*(d1+1)-1,ncols*(d2+1)-1),dtype=float)
    for i in range(nrows):
        for j in range(ncols):
            image[(i*d1+i):((i+1)*d1+i),(j*d2+j):((j+1)*d2+j)] = -1*weights[perm[i*ncols + j]].reshape(d1,d2)

    for i in range(nrows*(d1+1)-1):
        for j in range(ncols*(d2+1)-1):
            a = image[i,j]
            if a > scale:
                image[i,j] = scale
            if a < -scale:
                image[i,j] = -scale

    bordered_image = scale * numpy.ones((nrows*(d1+1)+1,ncols*(d2+1)+1),dtype=float)

    bordered_image[1:nrows*(d1+1),1:ncols*(d2+1)] = image

    imshow(bordered_image,cmap = cm.Greys,interpolation='nearest')
    xticks([])
    yticks([])
Beispiel #10
0
def EnhanceContrast(g, r=3, op_kernel=15, silence=True):
    
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(op_kernel,op_kernel))
    opening = cv2.morphologyEx(g, cv2.MORPH_OPEN, kernel)
    
    g_copy = np.asarray(np.copy(g), dtype=np.float)

    m_f = np.mean(opening)
        
    u_max = 245; u_min = 10; t_min = np.min(g); t_max = np.max(g)

    idx_gt_mf = np.where(g_copy > m_f)
    idx_lt_mf = np.where(g_copy <= m_f)

    g_copy[idx_gt_mf] = -0.5 * ((u_max-u_min) / (m_f-t_max)**r) * (g_copy[idx_gt_mf]-t_max)**r + u_max
    g_copy[idx_lt_mf] = 0.5 * ((u_max-u_min) / (m_f-t_min)**r) * (g_copy[idx_lt_mf]-t_min)**r + u_min 

    if silence == False:
        plt.subplot(1,2,1)
        plt.imshow(g, cmap='gray')
        plt.title('Original image')
        plt.subplot(1,2,2)
        plt.imshow(g_copy, cmap='gray')
        plt.title('Enhanced image')
        plt.show()
        
    return g_copy
def test_likelihood_evaluator3():
    
    tr = template.TemplateRenderCircleBorder()
    tr.set_params(14, 6, 4)

    t1 = tr.render(0, np.pi/2)
    img = np.zeros((240, 320), dtype=np.uint8)

    env = util.Environmentz((1.5, 2.0), (240, 320))
    
    le2 = likelihood.LikelihoodEvaluator3(env, tr)

    img[(120-t1.shape[0]/2):(120+t1.shape[0]/2), 
        (160-t1.shape[1]/2):(160+t1.shape[1]/2)] += t1 *255
    pylab.subplot(1, 2, 1)
    pylab.imshow(img, interpolation='nearest', cmap=pylab.cm.gray)

    state = np.zeros(1, dtype=util.DTYPE_STATE)

    xvals = np.linspace(0, 2.,  100)
    yvals = np.linspace(0, 1.5, 100)
    res = np.zeros((len(yvals), len(xvals)), dtype=np.float32)
    for yi, y in enumerate(yvals):
        for xi, x in enumerate(xvals):
            state[0]['x'] = x
            state[0]['y'] = y
            state[0]['theta'] = np.pi / 2. 
            res[yi, xi] =     le2.score_state(state, img)
    pylab.subplot(1, 2, 2)
    pylab.imshow(res)
    pylab.colorbar()
    pylab.show()
 def apply_butterworth_filter(self):
     """
     Apply Butterworth low pass filter.
     The code is derived from Paragraph 4.8.2, 
     Butterworth Lowpass Filters, 
     "Digital image Processing (3rd edition)" by R.C. Gonzalez.
     """
     max_ksize = max(self.img.shape[0], self.img.shape[1])
     self.dft4img = self.get_dft(self.img, showdft=True)
     cv2.imshow(self.test_winname, self.img)
     cv2.imshow(self.ctrl_panel_winname, np.zeros((100, 600), np.uint8))
     cv2.createTrackbar(
         "stopband**2", self.ctrl_panel_winname, 3, (max_ksize - 1) / 2, self.update_butterworth_win)
     self.update_butterworth_win()
     print "Reducing high frequency noise, Press a to accept"
     while True:
         ch = cv2.waitKey()
         if ch == 27:
             break
         if ch == 97:
             self.img = self.tmp
             plt.imshow(self.img)
             plt.show()
             break
     cv2.destroyAllWindows()
    def static_view(self, m=0, n=1, NS=100):
        """=============================================================
	   Grafica Estatica (m,n) Modo normal:
	    
	    Realiza un grafico de densidad del modo de oscilación (m,n)
	    de la membrana circular en el tiempo t=0

	    ARGUMENTOS:
	      *Numero cuantico angular				m
	      *Numero cuantico radial				n
	      *Resolucion del grid (100 por defecto)		NS
	============================================================="""
        # Grid
        XM = np.linspace(-1 * self.R, 1 * self.R, NS)
        YM = np.linspace(1 * self.R, -1 * self.R, NS)
        # ---------------------------------------------------------------
        Z = np.zeros((NS, NS))
        for i in xrange(0, NS):
            for j in xrange(0, NS):
                xd = XM[i]
                yd = YM[j]
                rd = (xd ** 2 + yd ** 2) ** 0.5
                thd = np.arctan(yd / xd)
                if xd < 0:
                    thd = np.pi + thd
                if rd < self.R:
                    Z[j, i] = self.f(rd, thd, 0, m, n)
                # ---------------------------------------------------------------
        Z[0, 0] = -1
        Z[1, 0] = 1
        plt.xlabel("X (-R,R)")
        plt.ylabel("Y (-R,R)")
        plt.title("Circular Membrane: (%d,%d) mode" % (m, n))
        plt.imshow(Z)
        plt.show()
Beispiel #14
0
def plotMatrix(data):
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    ax.set_aspect('equal')
    plt.imshow(data, interpolation='nearest', cmap=plt.cm.ocean)
    plt.colorbar()
    plt.show()
Beispiel #15
0
 def showimg(self, img):
     from matplotlib import pylab as pl
     pixels = img.shape[0] / 3
     size = int(sqrt(pixels))
     img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
     pl.imshow(img, interpolation='nearest')
     pl.show()
def scipy_stuff():
  from scipy.interpolate import griddata
  from matplotlib import pylab
  import cPickle as pickle
  print "loading points"
  points, x_diff, y_diff = pickle.load(open("temp_data.pickle", "rb"))

  y_pts, x_pts = zip(*points)

  print "Creating grid points"
  grid_points = []
  for j in range(2500):
    for i in range(2500):
      grid_points.append((j, i))

  print "Gridding data"
  x_grid = griddata(points, x_diff, grid_points)
  y_grid = griddata(points, y_diff, grid_points)
  x_grid.shape = (2500, 2500)
  y_grid.shape = (2500, 2500)

  print "Plotting"
  pylab.subplot(3, 1, 1)
  pylab.imshow(x_grid)
  pylab.subplot(3, 1, 2)
  pylab.imshow(y_grid)
  pylab.subplot(3, 1, 3)
  pylab.scatter(x_pts, y_pts)
  pylab.show()
Beispiel #17
0
def prob_grid():
    im_store=grocery_store_im()
    im=im_store.copy()
    shape=im.shape
    plt.imshow(im)
    print shape
    nx=shape[0]
    ny=shape[1]
    print nx*ny
    x=np.array([np.arange(nx) for i in xrange(ny)]).flatten()
    y=np.array([np.repeat(i,nx) for i in xrange(ny)]).flatten()

    imflat=[]
    for plane in range(3):
        f=(im[0:,0:,plane]).flatten()
        print f.shape
        imflat.append(f)

    xc=nx/2.0
    yc=ny/2.0
    rad=100.0
    mask=np.sqrt((x-xc)**2 + (y-yc)**2 ) < rad
    #return mask, x,y,imflat

    imflat_g=imflat[0]
    imflat_g[mask]=0.5
    imflat_g.shape=(nx,ny)

    im[0:,0:,0]=imflat_g
    plt.imshow(im)
def setThreshold(fname, aviProps, bg, ths, morphDiameter):    
    cap = cv2.VideoCapture(fname)
    sPlot = [221,222,223,224]
    nFrames = aviProps[6]
    frames = np.random.random_integers(int(nFrames/2), nFrames, 4)
    fig = plt.figure()
    cmap = colormap.gray
    cmap.set_over('r')
    for f in np.arange(0,4):
        cap.set(1,frames[f])
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)      
        bgSubGray = subtractBg(gray, bg)        
        thsGray = applyThreshold(bgSubGray, ths)
        thsGray = erode(thsGray, morphDiameter)
        thsGray = dilate(thsGray, morphDiameter)
        gray[gray==255] = 254
        gray[thsGray==255] = 255
        cOm = ndimage.measurements.center_of_mass(thsGray)
        #if tFrame.sum()/255 < nestThreshold: cOm = nestPosition
        fig.add_subplot(sPlot[f])
        plt.imshow(gray, vmax=254)
        plt.set_cmap(cmap)
        plt.plot(cOm[1],cOm[0], 'o')
    cap.release()
    plt.show()
    return ths, morphDiameter
def show_sample_images():
    for i, imglist in enumerate(imgs[:5]):
        for j, ifile in enumerate(imglist):
            print('{},{}'.format(i, j))
            im = loadim(ifile)
            plt.imshow(im)
            plt.show()
Beispiel #20
0
    def plotscalefactors(self):
        '''
        Plots the distribution of scalefactors between consecutive images.
        This is to check manually that the hdr image assembly is done correctly.
        '''
        import matplotlib.pylab as plt

        print('Scalefactors for HDR-assembling are', self.scalefactors)
        print('Standard deviations for Scalefactors are', self.scalefactorsstd)

        # Plots the scalefactordistribution and scalefactors for each pixelvalue
        self.scaleforpix = range(len(self.raw))
        for n in range(len(self.raw) - 1):
            A = self.getimgquotient(n)
            B = self._getrealimg(n + 1)
            fig = plt.figure()
            plt.xlabel('x [pixel]')
            plt.ylabel('y [pixel]')
            fig.set_size_inches(10, 10)
            plt.imshow(A)
            plt.clim([0.95 * A[np.isfinite(A)].min(), 1.05 * A[np.isfinite(A)].max()])
            plt.colorbar()
            fig = plt.figure()
            linplotdata = np.array([B[np.isfinite(B)].flatten(), A[np.isfinite(B)].flatten()])
            plt.plot(linplotdata[0, :], linplotdata[1, :], 'ro')
Beispiel #21
0
def plot_imgs(imgs, ratios=[1, 1]):
    plt.gray()
    gs = gridspec.GridSpec(1, len(imgs), width_ratios=ratios)
    for i in range(len(imgs)):
        plt.subplot(gs[i])
        plt.imshow(imgs[i])
    return gs
def part2(w):
    f = plt.figure()
    for i in range(16):
        f.add_subplot(4,4,i+1)
        plt.axis('off')
        plt.imshow(np.reshape(normalize(w[1:,i]), (20,20)), cmap = matplotlib.cm.Greys_r)
    plt.savefig('3.png')
Beispiel #23
0
def plot_valid(b, s):
  from dials.array_family import flex

  b = [0.1, 0.2, 0.3, 0.4, 0.5]
  s = [0.1, 0.3, 0.5, 0.3, 0.1]


  v1 = flex.bool(flex.grid(100, 100))
  v2 = flex.bool(flex.grid(100, 100))
  v3 = flex.bool(flex.grid(100, 100))
  r = [float(ss) / float(bb) for ss, bb in zip(s, b)]

  for BB in range(0, 100):
    for SS in range(0, 100):
      B = -5.0 + BB / 10.0
      S = -5.0 + SS / 10.0
      V1 = True
      V2 = True
      V3 = True
      for i in range(len(b)):
        if B*b[i]+S*s[i] <= 0:
          V1 = False
          break
      for i in range(len(b)):
        if B*b[i] <= -S*s[i]:
          V2 = False
          break

      v1[BB,SS] = V1
      v2[BB,SS] = V2

  from matplotlib import pylab
  pylab.imshow(v1.as_numpy_array())
  pylab.show()
  exit(0)
def display_data(x, **kwargs):
  plt.set_cmap('gray')
  nrows, ncols = x.shape
  example_width = int(kwargs.get('example_width', round(math.sqrt(ncols))))
  example_height = int(ncols / example_width)
  display_rows = int(math.floor(math.sqrt(nrows)))
  display_cols = int(math.ceil(nrows/display_rows))
  pad = 1
  display_array = -np.ones( (pad + display_rows *(example_height + pad),
                            pad + display_cols * (example_width + pad)) );
  curr_ex = 0;
  for j in range(0, display_rows):
    for i in range(0, display_cols):
      if (curr_ex >= nrows):
        break;
      max_val = np.max(np.abs(x[curr_ex, :]))
      x_splice_start = pad + j*(example_height + pad)
      y_splice_start = pad + i*(example_width + pad)
      display_array[x_splice_start:(x_splice_start+example_height),
                    y_splice_start:(y_splice_start+example_width)] = \
                    np.reshape(x[curr_ex,:], (example_height, example_width)) / max_val
      curr_ex += 1
    if (curr_ex >= nrows):
      break
  plt.imshow(display_array)
  plt.show()
Beispiel #25
0
def plot_prob_for_zero(c, b, s):
  from math import log, exp, factorial
  from dials.array_family import flex
  L = flex.double(flex.grid(100, 100))
  MASK = flex.bool(flex.grid(100, 100))
  c = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
  b = [bb / sum(b) for bb in b]
  s = [ss / sum(s) for ss in s]
  for BB in range(0, 100):
    for SS in range(0, 100):
      B = 0 + BB / 10000.0
      S = 0 + SS / 40.0
      LL = 0
      for i in range(len(b)):
        if B*b[i] + S*s[i] <= 0:
          MASK[BB, SS] = True
          LL = -999999
          break
        else:
          LL += c[i]*log(B*b[i]+S*s[i]) - log(factorial(c[i])) - B*b[i] - S*s[i]

      L[BB, SS] = LL
  index = flex.max_index(L)
  i = index % 100
  j = index // 100
  B = 0 + j / 10000.0
  S = 0 + i / 40.0
  print flex.max(L), B, S
  from matplotlib import pylab
  import numpy
  im = numpy.ma.masked_array(flex.exp(L).as_numpy_array(), mask=MASK.as_numpy_array())
  pylab.imshow(im)
  pylab.show()
  exit(0)
def PlotMtxError(Corr_w):
    max_val = 1
    min_val = -0.1

    AvCorr = np.sum(Corr_w, axis=0)
    dCorr = Corr_w - AvCorr
    errCorr = np.log10(np.sqrt(np.einsum("i...,i...", dCorr, dCorr)) / np.absolute(AvCorr) / np.sqrt(Corr_w.shape[0]))
    # print errCorr.shape
    # print errCorr

    plt.rcParams.update({"font.size": 6, "font.weight": "bold"})
    for i in xrange(errCorr.shape[0]):
        plt.subplot(2, 7, i + 1)
        plt.title("SITE " + str(i + 1) + ":: \nHistogram of errors in corr. mtx.")
        plt.hist(errCorr[0, :, :].flatten(), 256, range=(min_val, max_val))
        plt.xlabel("log_10(sigma)")
        plt.ylabel("Count")

        plt.subplot(2, 7, i + 7 + 1)
        plt.imshow(errCorr[0, :, :], vmin=min_val, vmax=max_val)
        cbar = plt.colorbar(shrink=0.25, aspect=40)
        cbar.set_label("log_10(sigma)")
        plt.set_cmap("gist_yarg")
        plt.title("SITE " + str(i + 1) + ":: \nError in corr. matx. values")
        plt.xlabel("Site i")
        plt.ylabel("Site j")
    plt.show()
Beispiel #27
0
def reconstructContactMap(map, datavec):
    """ Plots a given vector as a contact map

    Parameters
    ----------
    map : np.ndarray 2D
        The map from a MetricData object
    datavec : np.ndarray
        The data we want to plot in a 2D map
    """
    map = np.array(map, dtype=int)
    atomidx = np.unique(map.flatten()).astype(int)
    mask = np.zeros(max(atomidx)+1, dtype=int)
    mask[atomidx] = range(len(atomidx))

    # Create a new map which maps from vector indexes to matrix indexes
    newmap = np.zeros(np.shape(map), dtype=int)
    newmap[:, 0] = mask[map[:, 0]]
    newmap[:, 1] = mask[map[:, 1]]

    contactmap = np.zeros((len(atomidx), len(atomidx)))
    for i in range(len(datavec)):
        contactmap[newmap[i, 0], newmap[i, 1]] = datavec[i]
        contactmap[newmap[i, 1], newmap[i, 0]] = datavec[i]

    from matplotlib import pylab as plt
    plt.imshow(contactmap, interpolation='nearest', aspect='equal')
    plt.colorbar()
    #plt.axis('off')
    #plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
    #plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
    plt.show()
def locate(data, plot=False, rectangle=False, total_pooling=16):
    #    data = cv2.cvtColor(cv2.imread("test1.jpg"), cv2.COLOR_BGR2RGB)

    heatmap = 1 - \
        heatmodel.predict(data.reshape(
            1, data.shape[0], data.shape[1], data.shape[2]))

    if plot:
        plt.imshow(heatmap[0, :, :, 0])
        plt.title("Heatmap")
        plt.show()
        plt.imshow(heatmap[0, :, :, 0] > 0.99, cmap="gray")
        plt.title("Car Area")
        plt.show()

    if rectangle:
        xx, yy = np.meshgrid(
            np.arange(heatmap.shape[2]), np.arange(heatmap.shape[1]))
        x = (xx[heatmap[0, :, :, 0] > 0.99])
        y = (yy[heatmap[0, :, :, 0] > 0.99])

        for i, j in zip(x, y):
            cv2.rectangle(data, (i * total_pooling, j * total_pooling),
                          (i * total_pooling + 48, j * total_pooling + 48), 1)

    return heatmap, data
Beispiel #29
0
 def func(im):
     plt.figure()
     plt.title(ti)
     if type(im) == list:
         im = np.zeros(max_shape)
     plt.imshow(im,cmap=cmap,vmin=a_min,vmax=a_max)
     plt.axis('off')
Beispiel #30
0
def search():
	#extract the query from the searchbox
	search_term = searchbox.get()
	print "searched for: "+search_term
	
	search_term = search_term.replace(' ','%20')
	url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+search_term+'&userip=MyIP')

	request = urllib2.Request(url, None, {'Referer': search_term})
	response = urllib2.urlopen(request)
	
	# Process the JSON string.
	results = simplejson.load(response)
	data = results['responseData']
	img_url=data['results'][0]['unescapedUrl']
	
	#update the url in the url text box
	url_disp.set(img_url)
	
	#check and print the filename of the image
	file_name= img_url.split("/")[-1]
	print "the file name of the image is: " +file_name

	img_data = urllib2.urlopen(img_url).read()
	output = open('dl_img.jpg','wb')
	output.write(img_data)
	output.close()

	#update the displayed image to the image download
	root.image = mpimg.imread('dl_img.jpg')
	plt.imshow(root.image)
	canvas.show()
	root.update()
Beispiel #31
0
l_ch = 25  #length of portlandite
lx = (l_ch + ll) * 1.0e-6
ly = 2.0e-6
dx = 1.0e-6

domain = yantra.Domain2D(corner=(0, 0),
                         lengths=(lx, ly),
                         dx=dx,
                         grid_type='nodal')
domain.nodetype[:, ll + 1:ll + l_ch] = ct.Type.MULTILEVEL
domain.nodetype[0, :] = ct.Type.SOLID
domain.nodetype[-1, :] = ct.Type.SOLID
domain.nodetype[:, -1] = ct.Type.SOLID

plt.figure(figsize=(5, 5))
plt.imshow(domain.nodetype)
plt.show()

#%%  VALUES
nn = os.path.basename(__file__)[:-3]
fn.make_output_dir(root_dir + '\\results\\output\\04_internal_energy\\')
path = root_dir + '\\results\\output\\04_internal_energy\\' + nn + '\\'
fn.make_output_dir(path)

phrqc_input = {
    'c_bc': {
        'type': 'pco2',
        'value': 3.4
    },  #3.05E-02, 3.74E-02, 4.30E-02
    'c_mlvl': {
        'type': 'conc',
Beispiel #32
0
    12.4.2018.1: base
    1.5.2019.1: set negative power spectrum values to zero
    
"""
import matplotlib.pylab as plt
import numpy as np
import scipy.fftpack as scfft
import skimage.io as skio

testImage = skio.imread('dCosine1.png')
fft1 = scfft.fft2(testImage)
fft2 = scfft.fftshift( fft1 )
ps = np.abs(fft2)
ps = 20*np.log10(ps+0.1)
ps = ps.astype(int)
h,w = ps.shape
for r in range(h):
    for c in range(w):
        if ps[r,c]<0:
            ps[r,c]=0
plt.figure(1)
plt.clf()
plt.imshow(testImage)
plt.figure(2)
plt.clf()
plt.imshow(ps)
plt.savefig('ps_dCosine1.png')
plt.show()
#skio.imshow(testImage)
#skio.imsave('powerSpectrum.png',np.log10(ps))
    dirichlet_space, points_interior, k)
slp_pot_ext = bempp.api.operators.potential.helmholtz.single_layer(
    dirichlet_space, points_exterior, k0)
dlp_pot_int = bempp.api.operators.potential.helmholtz.double_layer(
    dirichlet_space, points_interior, k)
dlp_pot_ext = bempp.api.operators.potential.helmholtz.double_layer(
    dirichlet_space, points_exterior, k0)

total_field_int = (slp_pot_int * interior_field_neumann -
                   dlp_pot_int * interior_field_dirichlet).ravel()
total_field_ext = (dlp_pot_ext * exterior_field_dirichlet -
                   slp_pot_ext * exterior_field_neumann
                   ).ravel() + Amp * np.exp(1j * k0 * points_exterior[0])

total_field = np.zeros(points.shape[1], dtype='complex128')
total_field[idx_ext] = total_field_ext
total_field[idx_int] = total_field_int
total_field = total_field.reshape([Nx, Ny])

##############################################################
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab as plt

fig = plt.figure(figsize=(10, 8))
plt.imshow(np.real(total_field.T), extent=[-1000, 1000, -1000, 1000])
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
plt.savefig('45m_perm1')
Beispiel #34
0
            min_idx = temp_l.index(min(temp_l))
            selected_glyph = torch.unsqueeze(data[min_idx, :, :, :], 0)

    return selected_glyph  # 1*3*64*(64*26)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch',
                        help='number of epochs for training',
                        type=int,
                        default=10)
    parser.add_argument('--batch_size',
                        help='number of batches',
                        type=int,
                        default=2)
    parser.add_argument('--color_path',
                        help='path for style data sources',
                        type=str,
                        default='mini_datasets/Capitals_colorGrad64/')
    parser.add_argument('--noncolor_path',
                        help='path for glyph data sources',
                        type=str,
                        default='mini_datasets/Capitals64/')
    args = parser.parse_args()

    selected_glyph = select(torch.rand((1, 3, 64, 64 * 5)))

    plt.imshow(torch.squeeze(selected_glyph).permute(1, 2, 0))
    plt.show()
Beispiel #35
0
#将四个分类器的输出拼在一起
output = keras.layers.concatenate(x)
#构建模型
model = keras.Model(inputs=input_tensor, outputs=output)
model.summary()
#生成网络结构图用来查看
keras.utils.plot_model(model, show_shapes=True, to_file='model.png')

#编译模型
model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])
#训练模型
checkpointer = keras.callbacks.ModelCheckpoint(
    filepath="output/weights.{epoch:02d}--{val_loss:.2f}-{val_acc:.4f}.hdf5",
    verbose=2,
    save_weights_only=True)
history = model.fit_generator(gen(),
                              steps_per_epoch=10000,
                              epochs=10,
                              callbacks=[checkpointer],
                              validation_data=gen(),
                              validation_steps=1000)
model.save('captch_model-1.h5')

#验证
X, y = next(gen(1))
y_pred = model.predict(X)
plt.title('real: %s\npred:%s' % (decode(y), decode(y_pred)))
plt.imshow(X[0], cmap='gray')
Beispiel #36
0
                else:
                    for i_diff in diffs:
                        i2 = i1 + i_diff
                        for j_diff in diffs:
                            j2 = j1 + j_diff
                            r2 = np.array([i2, j2])

                            val = R - dist(r1, r2)

                            if val > 0:
                                circ[i2, j2] = val
        return circ

    f = plt.figure(figsize=(5, 5))    
    circ = circle(Nx, Ny, R=10)
    im = plt.imshow(circ, cmap='inferno')
    plt.colorbar()
    plt.title(r'filter response ($R = 10$ pixels)')
    plt.xlabel('pixels')
    plt.ylabel('pixels')
    # plt.savefig('data/figs/img/response.pdf', dpi=300)
    plt.show()

    def colorbar(mappable):
        ax = mappable.axes
        fig = ax.figure
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        return fig.colorbar(mappable, cax=cax)

    f = plt.figure(figsize=(9, 7), constrained_layout=True)
    image_test = x.resize((32, 32))
    image_test = np.array(image_test)
    pic = image_test.reshape((1, 1024))
    sum1 = np.zeros((32, 32))
    signal = []
    for j in range(1024):
        RD_pattern = RD_p[j].reshape((1024, 1))
        anti_pattern = RD_anti[j].reshape((1024, 1))
        pattern = RD_p[j].reshape((1024, 1))
        temp = np.dot(pic, RD_pattern) - np.dot(pic, anti_pattern)
        signal.append(temp)
        sum1 = sum1 + temp * pattern.reshape((32, 32))
    if i % 2000 == 0:
        print("Have finished %d signal" % i)
    signal = np.squeeze(signal)
    signals.append(signal)

plt.figure()
for i in range(200):
    sum3 = np.zeros((32, 32))
    signal = signals[i]
    y = 0
    for k in signal[:1024]:
        y = y + 1
        pattern = RD_p[y - 1].reshape((1024, 1))
        sum3 = sum3 + k * pattern.reshape((32, 32))
    sum3[0, 0] = np.mean(sum3)
    plt.imshow(sum3, cmap=plt.cm.gray)
    plt.axis('off')
    plt.pause(2)
#  plt.show()
    #     train_batches,
    #     epochs=EPOCHS,
    #     validation_data=validation_batches
    # )
    #
    # plot_history(history)
    # model.save('saved_models\\transfer_learning_mobilenet_cat_dog')
    model = tf.keras.models.load_model(
        'saved_models/transfer_learning_mobilenet_cat_dog')

    image_batch, label_batch = next(iter(train_batches.take(1)))
    image_batch = image_batch.numpy()
    label_batch = label_batch.numpy()

    class_names = np.array(info.features['label'].names)
    print(class_names)

    predicted_batch = model.predict(image_batch)
    predicted_batch = tf.squeeze(predicted_batch).numpy()
    predicted_ids = np.argmax(predicted_batch, axis=-1)
    predicted_class_names = class_names[predicted_ids]

    plt.figure(figsize=(10, 9))
    for n in range(30):
        plt.subplot(6, 5, n + 1)
        plt.imshow(image_batch[n])
        color = 'blue' if predicted_ids[n] == label_batch[n] else "red"
        plt.title(predicted_class_names[n].title(), color=color)
        plt.axis('off')
    plt.suptitle("ImageNet prediction")
    plt.show()
Beispiel #39
0
def fluxExtract(science,bias,dark,flats,hdr,plotap=False,rad=18,plotname=None):
    ' Returns final flux of source '
    # Calibrate Image. Flatten? add bias and flat to input
    flagg = 0
    
    # Defaul shape of sbig 2x2 binning
    xsize,ysize = 1266,1676
    if xsize != np.shape(bias)[0]:
        print 'WARNING: check that size of science file matches assumptions'

    if flats == 0:
        flat = np.ones((xsize,ysize))
    else:
        flat = flats[hdr['FILTER']]
    
    if dark == 0:
        dark = np.ones((xsize,ysize))
    else:
        dark = dark[str(hdr['EXPTIME'])]

    if np.mean(bias) == 0:
        bias = np.zeros((xsize,ysize))
    else:
        bias = bias

    # Size of science to trim calib frames (which are always full)
    # Science frames either full or sub: 400x400 around center ()
    centx, centy = int(1663/2), int(1252/2) # use config file in future, x and y are switched from thesky vs pyfits???
    subframe_size = np.shape(science)
    dx,dy         = subframe_size[0]/2,subframe_size[0]/2
    
    l,b,r,t = centx - dx, centy+dy, centx+dx, centy-dy # top and bottom switched

    data = (science - bias[t:b,l:r] - dark[t:b,l:r])/flat[t:b,l:r]
    
    if np.mean(data) < 0:
        flagg += 1
        
    # Get source x,y position
    x,y = sourceFinder(science)
    positions = (x,y)
    
    if x==0:
        flagg += 1
    
    if y==0:
        flagg += 1
        
    # Define Apertures
    apertures = CircularAperture(positions, r=rad)
    annulus_apertures = CircularAnnulus(positions, r_in=rad+5, r_out=rad+20)

    # Get fluxes
    rawflux_table = aperture_photometry(data, apertures)
    bkgflux_table = aperture_photometry(data, annulus_apertures)
    bkg_mean = bkgflux_table['aperture_sum'] / annulus_apertures.area()
    bkg_sum = bkg_mean * apertures.area()
    final_sum = rawflux_table['aperture_sum'] - bkg_sum

    # Plot
    if plotap == True:
        plt.ioff()
        plt.figure(-999)
        plt.clf()
        plt.imshow(np.log(data), origin='lower')
        apertures.plot(color='red', lw=1.5, alpha=0.5)
        annulus_apertures.plot(color='orange', lw=1.5, alpha=0.5)
        plt.savefig(plotname)

    return final_sum[0],flagg,x,y,bkg_mean[0]
Beispiel #40
0
def show_data(data_sample):
    plt.imshow(data_sample[0].numpy().reshape(IMAGE_SIZE, IMAGE_SIZE), cmap='gray')
    plt.title('y = '+ str(data_sample[1].item()))
Beispiel #41
0
import cv2
import numpy as np
import matplotlib.pylab as plt

o = cv2.imread(r"..\lena.jpg")
orgb = cv2.cvtColor(o, cv2.COLOR_BGR2RGB)
bgd = np.zeros((1, 65), np.float64)
fgd = np.zeros((1, 65), np.float64)
rect = (50, 50, 550, 700)
mask2 = np.zeros(o.shape[:2], np.uint8)
mask2[30:512, 50:400] = 3
mask2[50:700, 150:700] = 1
cv2.grabCut(o, mask2, None, bgd, fgd, 5, cv2.GC_INIT_WITH_MASK)
mask2 = np.where((mask2 == 2) | (mask2 == 0), 0, 1).astype("uint8")
ogc = o * mask2[:, :, np.newaxis]
ogc = cv2.cvtColor(ogc, cv2.COLOR_BGR2RGB)

plt.subplot(121)
plt.imshow(orgb)
plt.axis("off")
plt.subplot(122)
plt.imshow(ogc)
plt.axis("off")
plt.show()
            plt.figure()
            plt.plot(all_dat_z, '.')
            plt.title('{} {}'.format(ss, strain_name))

            #cc = np.nanpercentile(all_dat_c, [99], axis=1)[0]
            #all_outliers_metrics.append((exp_row_id,cc))

            #for ii, dd in enumerate(cc):
            #if dd > 2:
            #    mm = all_JSD[ii][0]
            for ii, dat in enumerate(all_JSD):
                feat, mm, _ = dat[0]
                plt.figure()
                plt.subplot(2, 1, 1)
                plt.imshow(mm[-2])
                plt.subplot(2, 1, 2)
                plt.imshow(mm[-1])

                #plt.subplot(3,1,3)
                #plt.plot(all_dat_c[ii])

                rr = exp_row_id[ii]
                tt = os.path.basename(exp_df.loc[rr, 'mask_file'])
                plt.suptitle('{} {}'.format(rr, tt))

            cc = np.nanmedian(all_dat_z, axis=0)
            all_outliers_metrics.append((exp_row_id, cc))
    #%%
    exp_id, out_metric = map(np.concatenate, list(zip(*all_outliers_metrics)))
Beispiel #43
0
        density_map,attention_map= sess.run(net, feed_dict={x: image})
        print(time.time()-time1)
        density_map = np.asarray(density_map)
        attention_map= np.asarray(attention_map)

        saver.save(sess, "model/pruneSFANet")

        print(density_map.shape)
        print(attention_map.shape)

        # image_ndarray = density_map.eval(session=sess)
        test=np.squeeze(density_map,0)
        test=np.squeeze(test,-1)
        print(type(test))
        print(test.shape)
        plt.imshow(test)
        plt.show()











# # Testing the data flow of the network with some random inputs.
# if __name__ == "__main__":
#     x = tf.placeholder(tf.float32, [1, 200, 300, 1])
train = pd.read_csv(os.path.join(root_dir, 'train.csv'))

test = pd.read_csv(os.path.join(root_dir, 'test.csv'))

print(train.sample(10))

print(test.sample(10))

img_name = rng.choice(train.ID)

file_path = os.path.join(root_dir, 'Train', img_name)

img = imread(file_path, flatten=True)

pylab.imshow(img)

pylab.axis('off')

pylab.show()

temp = []

for img_name in train.ID:
    img_path = os.path.join(root_dir, 'Train', img_name)
    img = imread(img_path)
    img = imresize(img, (32, 32))
    img = img.astype('float32')
    temp.append(img)
    print(len(temp))
        x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
        img1 = cv.line(img1, (x0, y0), (x1, y1), color, 1)
        img1 = cv.circle(img1, tuple(pt1), 5, color, -1)
        img2 = cv.circle(img2, tuple(pt2), 5, color, -1)
    return img1, img2


# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
lines1 = lines1.reshape(-1, 3)
img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
lines2 = lines2.reshape(-1, 3)
img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
display(img3, 0, 2)
display(img4, 0, 2)
display(img5, 0, 2)
display(img6, 0, 2)

plt.subplot(121), plt.imshow(img5)
plt.subplot(122), plt.imshow(img3)
#plt.show()

E = cv.transpose(mtx) * F * mtx

s, u, vt = cv.SVDecomp(E)
Beispiel #46
0
    subdir = './live_demo_temp/'
elif 'att2in' in opt.model:
    subdir = '/datadrive/att_results/demo_att2in/'
else:
    assert False
img_name = opt.demo_image.split('/')[-1][:-4]
output_dir = subdir + img_name + '/'
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# save predicted sentence
fd = open(output_dir + '/' + img_name + '.caption.txt', 'w')
fd.write(sents[0])
fd.close()

for i in range(1, len(alphas) - 1):

    a = alphas[i].cpu().data.numpy().squeeze()
    np.unravel_index(a.argmax(), a.shape)

    plt.figure()
    plt.imshow(vis_image)

    a -= np.min(a) * 0.5
    a /= np.max(a)
    attention_map = transform.resize(a, vis_image.shape[:2], order=1)
    plt.imshow(attention_map, alpha=0.3)

    plt.savefig(os.path.join(output_dir, 'demo_image_' + str(i) + '.jpg'))
    plt.close()
Beispiel #47
0
print(train_loss_list)
# 可以将net.params保存,下次可以直接装载W和b
print(net.params)

x = np.arange(1,10001,1)

import matplotlib.pylab as plt


plt.xlabel('iteration')
plt.ylabel('loss')
plt.figure()
plt.subplot(2,2,1)
plt.plot(x,train_loss_list)
plt.subplot(2,2,2)
plt.imshow(x_test[3:4].reshape(28,28))
plt.subplot(2,2,3)
plt.imshow(x_test[1000:1001].reshape(28,28))

plt.subplot(2,2,4)
plt.imshow(x_test[5000:5001].reshape(28,28))

plt.show()

# 1000次迭代,无法正确分类(将9识别为7),10000次迭代,成功进行分类
print(np.argmax(net.predict(x_test[1000:1001])))
print(np.argmax(t_test[1000:1001]))



import cv2
import matplotlib.pylab as plt

# contruir el parse y sus argumentos

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help="direccion donde esta la imagen")
ap.add_argument("-c",
                "--coords",
                help="lista de los puntos separados por comas")
args = vars(ap.parse_args())
'''
carguemos la imagen y tomemos las cordenadas  (lista de los (x,y))
Esto debe ser automatizado para poder ser usado de forma autonoma
'''
image = cv2.imread(args["image"])
pts = np.array(eval(args["coords"]), dtype="float32")
print(pts)
# aplicar la transformacion de 4 puntos para obtener la imagen con la pperspectiva deseada
warped = four_point_transform(image, pts)
# muestre las imagenes originales y la transformacion
plt.imshow(image, cmap='gray', interpolation='bicubic')
plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
plt.show()
plt.imshow(warped, cmap='gray', interpolation='bicubic')
plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
plt.show()
#cv2.imshow("Original",image)
#cv2.imshow("Warped",warped)
#cv2.waitkey(0)
Beispiel #49
0
ax.set_zlabel('E')


#plotting the D vector
plt.figure(3)
ax = plt.axes(projection='3d')
ax.scatter3D(dx,dy,dz,c=dz, cmap='plasma',linewidth=0.1)
ax.set_xlabel('dx')
ax.set_ylabel('dy')
ax.set_zlabel('dz')
plt.title('Vector D')
plt.show()

#plotting the Berry phase
plt.figure(1)
plt.subplot(211)
plt.plot(count,Fl,'k-')
plt.plot(count,Fl,'k-')
plt.subplot(212)
plt.plot(pathx,pathy,'m-',linewidth=1.2)


plt.figure(2)
ax = plt.axes(projection='3d')
ax.scatter3D(kxlist,kylist,Flist,c=Flist, cmap='rainbow',linewidth=0.1)
plt.figure(3)
im = plt.imshow(np.array(np.transpose(temp)), cmap='rainbow', extent=(-3.14,3.14,3.14,-3.14),interpolation='bilinear')
plt.colorbar(im)
plt.show()

Beispiel #50
0
T = int(sys.argv[2])  # 1 is a quarter
col.pop_capacity = N

cells = np.zeros([N, T]) - 1
food = np.zeros(T)

for t in range(0, T):
    col.update_population()
    col.remove_dead()
    col.consume_resource()
    col.remove_dead()
    col.ensure_queen()
    food[t] = col.resource_stockpile
    if len(col.population) > np.size(cells[:, 1]):
        # print('Too many mole rats!')
        new = len(col.population) - np.size(
            cells[:, 1])  # number of new rows needed
        cells = np.append(cells, np.zeros([new, T]) - 1, axis=0)
    for i in range(0, len(col.population)):
        cells[i, t] = mole_sim.custom_sort(col.population[i])

import matplotlib.pylab as plt

plt.imshow(cells, aspect='auto')
plt.show()

plt.plot(range(T), food)
plt.show()

np.savetxt('population_history.txt', cells, fmt='%i')
np.savetxt('resource_history.txt', food, fmt='%i')
Beispiel #51
0
                    pass
                  else:
#                    F[Y,X]=math.log1p(P[i,2])
                    F[Y,X]=P[i,2]
                FGK = np.zeros(F.shape)
                FGK = convolve2d_1ch(F, GK, padding='edge')
                FGK /=np.linalg.norm(FGK)
                for i in range(nW):
                  for j in range(nW):
                    fpout.write('{:g} '.format(FGK[i,j]))
                fpout.write('{}\n'.format(s))
#                import pdb;pdb.set_trace(); #for debug 20191105
#                print P
#                plt.imshow(FGK);plt.title("Feature");plt.colorbar();plt.show();
#                plt.imshow(FGK);plt.title("Feature");plt.colorbar();plt.pause(0.05)
                plt.imshow(FGK);plt.title("Feature");plt.pause(0.05)
#
    fpout.close()
    print('#Saved {}.'.format(fnout))
#              import pdb;pdb.set_trace(); #for debug 20191105
#              cmd='cp tmp/*-poles0.dat {}/'.format(dp)
#              print(cmd);
#              myshell(cmd)
#              cmd='mkpoleptv2+ st:{} k:8 Np:24 dir:tmp dir2:{} dout:{} rsa:2:20 tt:0:400 nt:18 nr:2 r_max:2 DISP:{}'.format(sm,dm,dq,DISP)
#              print(cmd);
#              myshell(cmd)
#              #mkpoleptv2+.c 
#              #(1)reads <dir2>/<st>   #ex: <dm>/<fms-zero9-R1> ??
#              #(2)calls mkpoleptv1+, which 
#              #   reads <dir>/*-poles0.dat and 
#              #   creates q (*-p.dat)  in <dq>, 
Beispiel #52
0
    batch_xs, batch_ys = mnist.train.next_batch(
        100)  #mnist.train.next_batch(100)#
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
    if i % 100 == 0:
        train_accuracy = 0
        for j in range(100):
            batch_xs, batch_ys = mnist_test.train.next_batch(100)
            train_accuracy = train_accuracy + accuracy.eval(session=sess,
                                                            feed_dict={
                                                                x: batch_xs,
                                                                y_: batch_ys,
                                                                keep_prob: 1
                                                            })
        lr = sess.run(learning_rate)
        print("step %d, lr=%g ,training accuracy %g" %
              (i, lr, train_accuracy / 100))

#Import as glayscale images
name = 'input_number.jpg'
img = Image.open(name).convert('L')
plt.imshow(img)
#transform them into 28*28 size
img.thumbnail((28, 28))
#float
img = numpy.array(img, dtype=numpy.float32)
img = 1 - numpy.array(img / 255)
img = img.reshape(1, 784)
p = sess.run(y_conv, feed_dict={x: img, y_: [[0.0] * 10], keep_prob: 0.5})[0]
#prediction
print(name + "is recognized as:")
print(numpy.argmax(p))
            cv2.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), 2)

    img = cv2.addWeighted(img, 0.8, blank_image, 1, 0)
    return img


img = cv2.imread('Test.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

print(img.shape)
height = img.shape[0]
width = img.shape[1]

roi_vertices = [(width / 2, height), (width / 2, height / 2),
                (width / 1.65, height / 2), (width, height / 1.25),
                (width, height)]

grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(grayimg, 75, 150)
crop = roi(canny, numpy.array([roi_vertices], numpy.int32))
lines = cv2.HoughLinesP(crop,
                        rho=6,
                        theta=numpy.pi / 60,
                        threshold=160,
                        lines=numpy.array([]),
                        minLineLength=40,
                        maxLineGap=25)
line_image = draw_lines(img, lines)

pylab.imshow(line_image)
pylab.show()
Beispiel #54
0
### 03 create parallel projection

print("Projecting...")
phantom = foam_ct_phantom.FoamPhantom(prefix + 'phantom' + suffix)

geom = foam_ct_phantom.ParallelGeometry(256, 256,
                                        np.linspace(0, np.pi, 128, False),
                                        3 / 256)

phantom.generate_projections(prefix + 'proj_par' + suffix, geom)

projs = foam_ct_phantom.load_projections(prefix + 'proj_par' + suffix)

print(len(projs), "projections, shape", projs.shape)
pl.imshow(projs[0])
pl.savefig("projected.png")
pl.imshow(projs[3])
pl.savefig("projected3.png")

### 08 add poisson noise
print("Adding Noise...")
fac = foam_ct_phantom.estimate_absorption_factor(prefix + 'proj_par' + suffix,
                                                 0.5)

foam_ct_phantom.apply_poisson_noise(input_file=prefix + 'proj_par' + suffix,
                                    output_file=prefix + 'proj_noisy' + suffix,
                                    seed=1234,
                                    flux=100,
                                    absorption_factor=fac)
Beispiel #55
0
def show_image(image):
    plt.imshow(image, cmap='gray', vmin=0, vmax=255)
    plt.show()
Beispiel #56
0
if 0:
    # rotate image 45 degree
    rows,cols = img1.shape
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-1,1)
    img2 = cv2.warpAffine(img1,M,(cols,rows))
else:
    img2 = cv2.imread(filename_2)
    img2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)


orb = cv2.ORB_create()

# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)

# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

# Match descriptors.
matches = bf.match(des1,des2)

# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)

# Draw first 10 matches.
matchNumber = 10
img3 = cv2.drawMatches(img1 , kp1, img2, kp2, matches[:matchNumber], 2)

plt.imshow(img3),plt.show()
import numpy as np
import scipy
import skimage as ski
import skimage.io
import matplotlib.pylab as plt

im = ski.io.imread('world-map.gif').astype('float64')  # read image
im = np.sum(im, axis=2)  # sum across the three colour channels
plt.imshow(im)  # display the image
plt.gray()
plt.show()

d, v = np.linalg.eig(np.cov(im.transpose()))

x = im
m = np.mean(x, axis=1)

for i in range(x.shape[0]):
    x[i, :] = x[i, :] - m[i]

p = 4

# reconstruct the data with top p eigenvectors
y = (x @ v[:, :p]) @ v[:, :p].transpose()

# plotting
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.imshow(im)
ax2.imshow(np.abs(y))

# add mean
Beispiel #58
0
aim[20:25, 20:25] = 1j
aim[30:40, 30:40] = .1 + .1j
dbm = a.img.gaussian_beam(2, shape=aim.shape).astype(n.complex)
dbm[0, 0] = 2
dbm /= dbm.sum()

dim = n.fft.ifft2(n.fft.fft2(aim) * n.fft.fft2(dbm))

print('COMPLEX TEST:')
cim, info = a.deconv.clean(dim, dbm)
print(info)
print('-----------------------------------------------------------------')

p.subplot(221)
dat = n.log(n.abs(dim))
p.imshow(dat, vmin=dat.max() - 6, vmax=dat.max())
p.colorbar(shrink=.5)
p.subplot(222)
dat = n.log(n.abs(a.img.recenter(dbm, (SIZE / 2, SIZE / 2))))
p.imshow(dat, vmin=dat.max() - 6, vmax=dat.max())
p.colorbar(shrink=.5)
p.subplot(223)
dat = n.log(n.abs(cim))
p.imshow(dat, vmin=dat.max() - 6, vmax=dat.max())
p.colorbar(shrink=.5)
p.subplot(224)
dat = n.log(n.abs(aim - cim))
p.imshow(dat, vmin=dat.max() - 6, vmax=dat.max())
p.colorbar(shrink=.5)
p.show()
Beispiel #59
0
    #%%
    batch_x, batch_y = next(img_generator)
    batch_y = transform2skelxy(batch_y[:, :48], batch_y[:, 48], batch_y[:, 49],
                               batch_y[:, 50:])

    n_rows, n_cols = 3, 3
    tot = n_rows * n_cols

    plt.figure()
    for mm in range(tot):
        img = batch_x[mm]
        yr = batch_y[mm]

        yr = yr * img.shape[1] / 2 + img.shape[1] / 2.
        plt.subplot(n_rows, n_cols, mm + 1)
        plt.imshow(np.squeeze(img), interpolation='none', cmap='gray')
        plt.plot(yr[:, 0], yr[:, 1], 'r')
        plt.plot(yr[0, 0], yr[0, 1], 'x')
#%%
#from skimage.filters import rank
#
#plt.figure()
#plt.subplot(1,2,1)
#plt.imshow(np.squeeze(img*255), interpolation='none', cmap='gray')
#plt.subplot(1,2,2)
#
#img_r = np.round((np.abs(img)**3)*255*2)
#img_r = -np.clip(img_r, 0, 255)/255
#img_r = gaussian_filter(img_r, sigma=1.5)
#plt.imshow(np.squeeze(img_r), interpolation='none', cmap='gray')
solver_params['collision_model']='srt'
solver_params['phrqc_flags']={}
solver_params['phrqc_flags']['only_interface']=True
#solver_params['phrqc_flags']['smart_run']=True
#solver_params['phrqc_smart_run_tol']=1e-8
rt= yantra.PhrqcReactiveTransport('AdvectionDiffusion', domain,
                                  domain_params,{},solver_params)
#%%run model
time=[]
AvgCa =[]
iters = 2
while rt.iters < iters: #rt.time<=1:#20
    rt.advance()
    AvgCa.append(np.sum(rt.fluid.Ca.c)/np.sum(rt.fluid.Ca.nodetype<=0)) 
    time.append(rt.time)
        
#%%plot results
plt.figure()
plt.plot(time,AvgCa)
plt.xlabel('Time [s]')
plt.ylabel('Avg. Ca conc in aqeuous phase [mM]')
plt.show()

plt.figure()
plt.imshow(rt.fluid.Ca.c)
plt.colorbar()
plt.title('Ca concentarion [mol/l]')
plt.show()

print(rt.fluid.Ca._ss[3:8,3:8] + rt.fluid.Ca._c[3:8,3:8])
print(rt.solid.portlandite.c[5,5])