예제 #1
0
파일: demo_icp.py 프로젝트: amiller/rtmodel
def animate_random(max_iters=1000, mod=100):
    global pnew, points_range
    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]
    RT = obj.RT

    prev_rimg = obj.range_render(camera.kinect_camera())
    window.canvas.SetCurrent()
    pnew = prev_rimg.point_model(True)
    points_range = pnew

    if 0:
        obj.RT = np.dot(RT, M)
        rimg = obj.range_render(camera.kinect_camera())
        window.canvas.SetCurrent()
        pm = rimg.point_model(True)
        points_range = pm

        for iters in range(max_iters):
            pnew, err, npairs, uv = fasticp.fast_icp(rimg, pnew, 1000, dist=0.005)
            if iters % mod == 0:
                # print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
                window.Refresh()
                pylab.waitforbuttonpress(0.02)

        pnew = pm

        window.Refresh()
        pylab.waitforbuttonpress(0.02)        
예제 #2
0
파일: demo_icp.py 프로젝트: amiller/rtmodel
def perturb(max_iters=100, mod=10):
    global pnew, uv, err, points_range, rimg, range_image

    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]

    # Rotated object view
    RT = obj.RT
    rp = random_perturbation().astype('f')
    obj.RT = np.dot(rp, obj.RT)
    range_image = obj.range_render(camera.kinect_camera())
    obj.RT = RT

    points_range = range_image.point_model(True)

    # Original object view
    rimg = obj.range_render(camera.kinect_camera())
    pnew = rimg.point_model()

    # Estimate the transformation rp

    for iters in range(max_iters):
        npairs, pnew = model.align(range_image, pnew, rtmodel.RATE1, rtmodel.DIST1, 6)
        #pnew, err, npairs, uv = fasticp.fast_icp(range_image, pnew, 0.1, dist=0.05)
        if iters % mod == 0 or 1:
            #print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
            window.Refresh()
            pylab.waitforbuttonpress(0.02)
            break

    window.Refresh()
예제 #3
0
파일: toolsPlot.py 프로젝트: htlemke/ixppy
def getCoordinate(direction='both',axh=None,fig=None):
  """Tool for selecting a coordinate, functionality similar to ginput for a single point. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  hor=False;ver=False
  if direction is 'horizontal' or 'hor' or 'both':
    hor=True
  if direction is 'vertical' or 'ver' or 'both':
    ver=True

  finished=False
  def button_press_callback(event):
    if event.inaxes:
      if event.button == 3:
        finished = True
  fig.canvas.mpl_connect('button_press_event', button_press_callback)
  print("Select a coordinate, finish with right click.")
  linh = []
  while not finished:
    for tlinh in linh:
      tlinh.remove()
      linh = []
    pl.draw()
    pos = pl.ginput(1)[0]
    if hor:
      linh.append(pl.axvline(pos[0]))
    if ver:
      linh.append(pl.axhline(pos[1]))
    pl.draw()
    pl.waitforbuttonpress()

  
  fig.canvas.draw()
  return pos
예제 #4
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
def once():
    global depth, rgb
    preview.canvas.SetCurrent()

    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    preview.clearcolor=[0,0,0,0]
    preview.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        preview.modelmat = main.R_display
    else:
        preview.modelmat = main.R_aligned

    preview.Refresh()
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
예제 #6
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display
    
    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)
    
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
예제 #7
0
 def error(x):
     theta, dist = x
     line = middle_offset(theta, dist, size)
     s =  1./(d.score(line, True) + 1e-5)
     clf()
     imshow(d.debug * d.image)
     pylab.waitforbuttonpress(0.01)
     return s
예제 #8
0
파일: record.py 프로젝트: amiller/quartet
def show_depth(name, depth):
    #im = cv.CvreateImage((depth.shape[1],depth.shape[0]), 8, 3)
    #cv.SetData(im, colormap.color_map(depth/2))
    #cv.ShowImage(name, im)
    #cv2.imshow(name, colormap.color_map(depth/2))
    cv2.imshow(name, 1024./depth)
    #pylab.imshow(colormap.color_map(depth))
    pylab.waitforbuttonpress(0.005)
예제 #9
0
    def testing(self, testFace, visualiseInfo=None):
        # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.            
        ret = self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
        mm = ret[0]
        vv = ret[1]
        post = ret[3]        
        # find nearest neighbour of mm and SAMObject.model.X
        dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))

        facePredictionBottle = yarp.Bottle()
    
        for j in range(dists.shape[0]):
            dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
            print "Dist: " + str(testFace.shape)
        nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
        if self.SAMObject.type == 'mrd':
            ret_y = self.SAMObject.model.bgplvms[1]._raw_predict(post.X)
            vv_y = ret_y[1]
            print "With " + str(vv.mean()) + "(" + str(vv_y) + ")" +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
            textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]

        elif self.SAMObject.type == 'bgplvm':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
            textStringOut=self.participant_index[int(self.L[nn,:])]
        if (vv.mean()<0.00012):
            choice=numpy.random.randint(4)
            if (choice==0):
                 facePredictionBottle.addString("Hello " + textStringOut)
            elif(choice==1):
                 facePredictionBottle.addString("I am watching you " + textStringOut)
            elif(choice==2):
                 facePredictionBottle.addString(textStringOut + " could you move a little you are blocking my view of the outside")
            else:
                 facePredictionBottle.addString(textStringOut + " will you be my friend")                  
            # Otherwise ask for updated name... (TODO: add in updated name)
        else:
            facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")        
     
        # Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
        if visualiseInfo is not None:
            fig_nn = visualiseInfo['fig_nn']
            fig_nn = pb.figure(11)
            pb.title('Training NN')
            fig_nn.clf()
            pl_nn = fig_nn.add_subplot(111)
            pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
            pb.title('Training NN')
            pb.show()
            pb.draw()
            pb.waitforbuttonpress(0.1)
            
        self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)

        if( self.speakStatusInBottle.get(0).asString() == "quiet"):
            self.outputFacePrection.write(facePredictionBottle)

        facePredictionBottle.clear()
예제 #10
0
def once():
    dataset.advance()
    depthL, depthR = dataset.depthL, dataset.depthR
    maskL, rectL = preprocess.threshold_and_mask(depthL, config.bgL)
    maskR, rectR = preprocess.threshold_and_mask(depthR, config.bgR)
    show_mask("maskL", maskL.astype("f"), rectL)
    show_mask("maskR", maskR.astype("f"), rectR)

    pylab.waitforbuttonpress(0.01)
예제 #11
0
def check_dataset(dataset, labels, label_map, index):
    data = np.uint8(dataset[index]).reshape((32, 32))
    i = np.argwhere(labels[index] == 1)[0][0]
    import matplotlib.pyplot as plt  # im.show may not be implemented
                                     #  in opencv-python on Tk GUI (such as Linux)
    import pylab
    plt.ion()
    plt.imshow(data)
    pylab.waitforbuttonpress(timeout=5)
    print("label:", label_map[i])
예제 #12
0
def animate():
    while True:

        line = random_middle_line()
        d = DividingLine(synthetic_image(line=line))
        d.traverse(line, True)
        #d.traverse_np(line, True)
        pylab.clf()
        pylab.imshow(d.debug)
        pylab.waitforbuttonpress(0.01)
예제 #13
0
def sample_rays(n_rays=10000, reset=False):
    global paths
    global total_rays
    global line_verts, line_colors

    if reset or not 'line_verts' in globals():
        paths = []
        total_rays = 0
        line_verts = np.empty((0,3),'f')
        line_colors = np.empty((0,3),'f')
    total_rays += n_rays
    line_verts_ = []
    line_colors_ = []

    ps = mycybvh.sample_rays(source, sink, sinkrad, n_rays, ROULETTE)
    keys = ['source','sink','diverge','scaflect']

    for path in ps:
        p_ = []
        x1 = path[0]['origin']
        x1 = x1['x'], x1['y'], x1['z']
        orgn = True
        for p in path[1:]:
            o = p['origin']
            d = p['direction']
            ntype = keys[p['ntype']]
            cumdist = p['cumdist']
            origin = o['x'],o['y'],o['z']
            direction = d['x'],d['y'],d['z']
            x2 = origin
            if ntype == 'sink':
                line_colors_ += 2*((1,.6,.6),)
            elif orgn:
                line_colors_ += 2*((.6,.6,1),)
                orgn=False
            else:
                line_colors_ += 2*((1,1,1),)
            x2 = origin
            line_verts_.append(x1)
            line_verts_.append(x2)
            x1 = x2
            p_.append((origin, direction, ntype, cumdist))
        paths.append(p_)
    if line_colors_:
        line_colors = np.vstack((line_colors, np.array(line_colors_,'f')))
        line_verts = np.vstack((line_verts, np.array(line_verts_,'f')))
    window.Refresh()

    pylab.clf();
    times, pressure = energy_contributions()
    pylab.hist(times,weights=pressure,bins=100, range=(0,0.2))
    pylab.waitforbuttonpress(0.03)
    update_filter()
예제 #14
0
    def testing(self, testFace, choice, visualiseInfo=None):
        # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.            
        #mm,vv,pp=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)

        ret=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
         
        mm = ret[0]
        vv = ret[1]
        post = ret[3]        

        # find nearest neighbour of mm and SAMObject.model.X
        dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))

        facePredictionBottle = yarp.Bottle()
    
        for j in range(dists.shape[0]):
            dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
        nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
        if self.SAMObject.type == 'mrd':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
            textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]

        elif self.SAMObject.type == 'bgplvm':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
            textStringOut=self.participant_index[int(self.L[nn,:])]
        if(choice.get(0).asInt() == 16 and vv.mean()<0.00012):            
            facePredictionBottle.addString("You are " + textStringOut)
        elif(choice.get(0).asInt() == 16 and vv.mean()>0.00012):
            facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")        
     
        # Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
        if visualiseInfo is not None:
            fig_nn = visualiseInfo['fig_nn']
            fig_nn = pb.figure(11)
            pb.title('Training NN')
            fig_nn.clf()
            pl_nn = fig_nn.add_subplot(111)
            pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
            pb.title('Training NN')
            pb.show()
            pb.draw()
            pb.waitforbuttonpress(0.1)
            
        self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)

        if( self.speakStatusInBottle.get(0).asString() == "quiet"):
            self.outputFacePrection.write(facePredictionBottle)

        facePredictionBottle.clear()
        #return pp

        return ret[2]
def click_point(im):
    fig = pylab.figure(1);
    pylab.imshow(im)
    #pylab.xlim(xlim)
    #pylab.ylim(ylim)
    point = []
    def pick(event): 
        point.append((event.xdata, event.ydata))
    cid = fig.canvas.mpl_connect('button_press_event', pick)
    print("Click a point")
    while not point: pylab.waitforbuttonpress()
    print "Ok!", point
    return point[0]
예제 #16
0
파일: toolsPlot.py 프로젝트: htlemke/ixppy
def getRectangleCoordinates(axh=None,fig=None,autozoom=True):
  """Tool for selecting a rectangle, functionality similar to ginput. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  class ROI:
    def __init__(self,fig,axh):
      self.fig = fig
      self.axh = axh
      self.lims = list(axh.axis())
      self.boxh = None
      self.finished = False

    def coo(self,eclick,erelease):

      self.lims = [min([eclick.xdata,erelease.xdata]),
       max([eclick.xdata,erelease.xdata]),
       min([eclick.ydata,erelease.ydata]),
       max([eclick.ydata,erelease.ydata])]

      if autozoom:
	ll = np.asarray(self.lims).reshape(2,2)
	ll[0] = np.mean(ll[0]) + np.array([-1,1])*np.diff(ll[0])*0.7
	ll[1] = np.mean(ll[1]) + np.array([-1,1])*np.diff(ll[1])*0.7
	self.axh.axis(ll.ravel())
       
      if not self.boxh is None:  self.boxh.remove()
      ptch = pl.Rectangle([self.lims[0],self.lims[2]],self.lims[1]-self.lims[0],self.lims[3]-self.lims[2],facecolor='r',alpha=0.5,ec='k')
      self.boxh = self.axh.add_patch(ptch)
      fig.canvas.draw()
    
    def button_press_callback(self,event):
      if event.inaxes:
        if event.button == 3:
          self.finished = True
	  if self.boxh is None:
            self.lims = list(axh.axis())

	   
  roi = ROI(fig,axh)
  selector = pl.matplotlib.widgets.RectangleSelector(axh,roi.coo)
  fig.canvas.mpl_connect('button_press_event', roi.button_press_callback)
  print("Select rectangular region of interest, finish with right click.")
  while not roi.finished:
    pl.waitforbuttonpress()
  if not roi.boxh is None: 
    roi.boxh.remove()
    axh.patches[-1].remove()
  del selector
  fig.canvas.draw()
  return roi.lims
예제 #17
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c,_ = hashalign.find_best_alignment(grid.occ,0*grid.occ,
                                        target_model,~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0', grid.occ, color=np.array([0.2,1,0.2,1]))
        else:
            blockdraw.show_grid('1', incorrect,
                                color=np.array([1,1,0.1,1]))
            nf = not_filled*0
            nf[:,next_layer,:] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf,
                                color=np.array([1,0.2,1.0,1]))
            blockdraw.show_grid('3', correct, color=np.array([0.1,0.3,0.1,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
예제 #18
0
파일: main.py 프로젝트: mark-ross/two-opt
def main():

    f = "tsp-medium.tsp"
    j = File.read(f)  # to generate data, use the R_File.read access
    t = TwoOpt(j)  # returns an object ready to sort

    pylab.show()
    pylab.waitforbuttonpress()

    t.sort()  # one round of sorting

    # finally, once it's finished, just wait for button press
    pylab.waitforbuttonpress()
    pylab.close()
예제 #19
0
def waitKey(fig):
	from pylab import waitforbuttonpress
	r = [None]
	fig.canvas.mpl_connect('key_press_event', lambda e: r.__setitem__(0, e.key))
	while not waitforbuttonpress():
		pass
	return r[0]
예제 #20
0
def get_terminus():
    from matplotlib.widgets import Cursor
    def tellme(s):
        print s
        plt.title(s,fontsize=16)
        plt.draw()

    plt.setp(plt.gca(),autoscale_on=False)

    cursor = Cursor(plt.axes(), useblit=True, color='white', linewidth=1 )

    happy = False
    while not happy:
        pts = []
        while len(pts) < 4:
            tellme('Select 4 corners of the terminus region')
            pts = np.asarray( plt.ginput(4, timeout=-1) )
            if len(pts) < 4:
                tellme('Too few points, starting over')
                time.sleep(1) # Wait a second

        ph = plt.fill(pts[:,0], pts[:,1], 'white', lw = 2, alpha=0.5)

        tellme('Done? Press any key if yes, mouse click to reset')

        happy = plt.waitforbuttonpress()

        # Get rid of fill
        if not happy:
            for p in ph: p.remove()

        return pts
예제 #21
0
def click_points(n, im):
    fig = pylab.figure(1);
    pylab.imshow(im)

    def pick(event):
        points.append((event.xdata, event.ydata))
        print('Picked point %d of %d' % (len(points),n))

    fig.canvas.mpl_connect('close_event', lambda _: sys.exit(1))
    cid = fig.canvas.mpl_connect('button_press_event', pick)
    points = []
    print("Click %d points" % (n,))

    while len(points) < n:
        pylab.waitforbuttonpress()

    print "Ok!", points
예제 #22
0
def once():
    global depth
    global rgb
    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    global x,y,z,xyz

    if PREPROCESS:
        global mask, rect
        mask, rect = preprocess.threshold_and_mask(depth, config.bg)

        (l,t),(r,b) = rect
        depth_ = np.ascontiguousarray(depth[t:b,l:r])
        v,u = np.mgrid[t:b,l:r].astype('f')
        x,y,z = calibkinect.convertOpenNI2Real(depth_,u,v)
        x=x[mask[t:b,l:r]]
        y=y[mask[t:b,l:r]]
        z=z[mask[t:b,l:r]]

        rgb_ = rgb[t:b,l:r,:][mask[t:b,l:r],:]
    else:
        x,y,z = calibkinect.convertOpenNI2Real(depth)
        #global X,Y,Z
        #X,Y,Z = calibkinect.convertReal2OpenNI(x,y,z)

        x,y,z = map(lambda _:_.flatten(), (x,y,z))
        rgb_ = rgb.reshape(-1,3)

    #x = x[depth>0]
    #y = y[depth>0]
    #z = z[depth>0]
    xyz_ = np.ascontiguousarray(np.dstack((x,y,z)).reshape(-1,3))
    #xyz_ = xyz - xyz.mean(0)
    #xyz_ /= np.std(xyz_)

    rgba = np.empty((xyz_.shape[0],4),'f')
    rgba[:,3] = 1
    rgba[:,:3] = rgb_.astype('f')/256.0

    window.lookat = np.array([0,0,0])
    window.update_points(xyz_,rgba)
    window.clearcolor = [0,0,0,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
def run_calib():
    config.load('data/newest_calibration')
    opennpy.align_depth_to_rgb()

    samples = []

    for i in arange(0,2*np.pi,np.pi/8): 
        line = line_through_point(center, i)
        draw_line_XZ(line)
        pylab.waitforbuttonpress(0.1)
        for _ in range(60):
            opennpy.sync_update()
            rgb, _ = opennpy.sync_get_video()
            depth, _ = opennpy.sync_get_depth()
        samples.append((line, rgb, depth))

    return samples
예제 #24
0
def once():
    if not FOR_REAL:
        dataset.advance()
        global depth
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect

    (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)

    # Compute the surface normals
    normals.normals_opencl(depth, mask, rect)

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()
    R_aligned = lattice.translation_opencl(R_oriented)

    global modelmat
    if modelmat is None:
        modelmat = R_aligned.copy()
    else:
        modelmat,_ = grid.nearest(modelmat, R_aligned)

    global face, Xo, Yo, Zo
    _,_,_,face = np.rollaxis(opencl.get_modelxyz(),1)
    Xo,Yo,Zo,_ = np.rollaxis(opencl.get_xyz(),1)

    global cx,cy,cz
    cx,cy,cz,_ = np.rollaxis(np.frombuffer(np.array(face).data,
                                           dtype='i1').reshape(-1,4),1)
    R,G,B = [np.abs(_).astype('f') for _ in cx,cy,cz]

    window.update_xyz(Xo,Yo,Zo,COLOR=(R,G,B,R*0+1))

    window.clearcolor = [1,1,1,0]
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
예제 #25
0
파일: toolsPlot.py 프로젝트: htlemke/ixppy
def getSpanCoordinates(direction='horizontal',axh=None,fig=None,data=None):
  """Tool for selecting a span, functionality similar to ginput. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  class ROI:
    def __init__(self,fig,axh,direction):
      self.fig = fig
      self.axh = axh
      self.lims = []
      self.boxh = []
      self.finished = False
      self.direction = direction

    def coo(self,tmin,tmax):
      self.lims = [tmin,tmax]
      if self.boxh: self.boxh.remove()
      if self.direction is 'horizontal':
        self.boxh = self.axh.axvspan(tmin,tmax,facecolor='r',alpha=0.5)
        delta = tmax-tmin
        axh.set_xlim([tmin-0.2*delta, tmax+0.2*delta])
        if data is not None:
          dat = data[1]

      if self.direction is 'vertical':
        self.boxh = self.axh.axhspan(tmin,tmax,facecolor='r',alpha=0.5)
        delta = tmax-tmin
        axh.set_ylim([tmin-0.2*delta, tmax+0.2*delta])
      fig.canvas.draw()
    
    def button_press_callback(self,event):
      if event.inaxes:
        if event.button == 3:
          self.finished = True
  roi = ROI(fig,axh,direction)
  selector = pl.matplotlib.widgets.SpanSelector(axh,roi.coo,direction)
  fig.canvas.mpl_connect('button_press_event', roi.button_press_callback)
  print("Select Span region of interest, finish with right click.")
  while not roi.finished: pl.waitforbuttonpress()
  print("Span %s selected."%(roi.lims))
  roi.boxh.remove()
  fig.canvas.draw()
  del selector
  return roi.lims
예제 #26
0
파일: demo_icp.py 프로젝트: amiller/rtmodel
def once():
    global range_image, points_range, RTold, RTguess
    ts, M = seqiter.next()

    # Take the image from an alternate camera location
    obj.RT = np.dot(RTold, M)
    range_image = obj.range_render(camera.kinect_camera())

    points_range = range_image.point_model()
    if RTguess is None:
        range_image.camera.RT = np.eye(4, dtype='f')
    else:
        range_image.camera.RT = RTguess

    p = model.add(range_image)
    if p:
        points_range = p
        RTguess = p.RT

    window.Refresh()
    pylab.waitforbuttonpress(0.02)    
예제 #27
0
파일: toolsPlot.py 프로젝트: htlemke/ixppy
def getBins(lims,direction='horizontal',axh=None,fig=None):
  # TODO
  """Tool for selecting a span, functionality similar to ginput. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  class BINNING:
    def __init__(self,fig,axh,direction):
      self.fig = fig
      self.axh = axh
      self.lims = []
      self.boxh = []
      self.finished = False
      self.direction = direction

    def coo(self,tmin,tmax):
      self.lims = [tmin,tmax]
      if self.boxh:
        self.boxh.remove()
      if self.direction is 'horizontal':
        self.boxh = pl.axvspan(tmin,tmax,facecolor='r',alpha=0.5)
      if self.direction is 'vertical':
        self.boxh = pl.axhspan(tmin,tmax,facecolor='r',alpha=0.5)
      fig.canvas.draw()
    
    def button_press_callback(self,event):
      if event.inaxes:
        if event.button == 3:
          self.finished = True
  roi = ROI(fig,axh,direction)
  selector = pl.matplotlib.widgets.SpanSelector(axh,roi.coo,direction)
  fig.canvas.mpl_connect('button_press_event', roi.button_press_callback)
  print("Select Span region of interest, finish with right click.")
  while not roi.finished:
    pl.waitforbuttonpress()
  
  roi.boxh.remove()
  fig.canvas.draw()
  del selector
  return roi.lims
예제 #28
0
파일: sim.py 프로젝트: a-rahimi/sqp-control
def test_draw_car():
  P.ion()
  P.clf()
  P.axis('scaled')
  P.axis([-1,1,-1,1])

  X = [ (-.1,-.5, pi/3, 0, -pi/6),
        (-.1,-.5, pi/3, 0, 0.),
        (-.1,-.5, pi/3, 0, pi/6),
        (-.1,-.5, 0, 0, -pi/6),
        (-.1,-.5, 0, 0, 0),
        (-.1,-.5, 0, 0, pi/6),
        ]
  hs = []

  for x in X:
    for h in hs:
      h.remove()
    hs = draw_car(P.gca(), x, color='b')
    P.draw()
    #time.sleep(1.)
    P.waitforbuttonpress()
예제 #29
0
def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    #blockdraw.show_grid('vac', grid.vac,
    #                    color=np.array([0.6,1,0.6,0]))
    if 0 and lattice.is_valid_estimate():
        window.clearcolor=[0.9,1,0.9,0]
    else:
        window.clearcolor=[0,0,0,0]
        #window.clearcolor=[1,1,1,0]
        window.flag_drawgrid = True

    if 1:
        update_display()

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
예제 #30
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat
    (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)

    global n,w
    if 0:
        n,w = normals.normals_numpy(depth)
        show_normals(n, w, 'normals_numpy')

    if 0:
        n,w = normals.normals_c(depth)
        show_normals(n, w, 'normals_c')

    if 1:
        normals.opencl.set_rect(rect)
        dt = timeit.timeit(lambda:
                           normals.normals_opencl(depth, mask, rect).wait(),
                           number=1)

        #print dt
        nw = normals.opencl.get_normals()
        n,w = nw[:,:,:3], nw[:,:,3]
        #show_normals(n, w, 'normals_opencl')
        show_normals_sphere(n, w)

    pylab.waitforbuttonpress(0.01)
예제 #31
0
 def close(self):
     pl.waitforbuttonpress()
예제 #32
0
파일: xpeek.py 프로젝트: omereis/ipeek_omer
def debug_eta(instrument):
    from numpy import log10
    import pylab

    class XPeekEta(XPeek):
        def enddata(self, lineid):
            #print ">>>> end",lineid, len(self.data[lineid].columns['TIMESTAMP'])
            #self.plot(lineid)
            pass

        def newdata(self, lineid):
            self.etas = []
            self.models = []
            #print ">>>> new",lineid, len(self.data[lineid].columns['TIMESTAMP'])
        def newpoint(self, lineid):
            line = self.data[lineid]
            eta = line.columns['TIMESTAMP'][-1] + line.time_remaining()
            self.etas.append(eta)
            self.models.append(line.time_per_point())
            #print ">>>>  add",lineid, len(line.columns['TIMESTAMP']),len(self.etas)
            #if len(self.etas)%k == k-1: self.plot(lineid)
        def plot(self, lineid):
            line = self.data[lineid]
            t = numpy.array(line.columns['TIMESTAMP'])
            eta = (numpy.array(self.etas) - t[0]) / 60.
            t = (t - t[0])
            dt = (t[1:] - t[:-1]) / 60.
            pt = numpy.arange(len(dt))
            all_pt = numpy.arange(line.points)

            pylab.clf()
            pylab.subplot(211)
            if eta[-1] > 60 * 24 * 5:
                eta, eta_units = eta / (60 * 24), "days"
            elif eta[-1] > 60 * 5:
                eta, eta_units = eta / 60, "hrs"
            else:
                eta_units = "min"
            pylab.plot(t[1:] / 3600, eta[1:], '-x')
            pylab.xlabel('measurement time (hours)')
            pylab.ylabel('estimated run time (%s)' % eta_units)
            pylab.grid(True)
            lo, hi = min(eta[1:]), max(eta[1:])
            pylab.ylim(lo - (hi - lo) * 0.5, hi + (hi - lo) * 0.5)
            pylab.subplot(212)
            #pylab.axis([pt[0],pt[-1],dt[0],dt[-1]])
            #pylab.plot(pt,log10(dt),'xg',pt[1:],log10(self.models[-1][1:len(pt)]),'-g')
            pylab.plot(pt[1:], dt[1:], 'xg', pt[1:],
                       self.models[-1][1:len(pt)] / 60., '-g')
            #for k in range(65,75):
            #    pylab.plot(pt[1:],self.models[k][1:len(pt)]/60.,'-b',alpha=0.7,hold=True)
            #pylab.xscale('symlog')
            #pylab.yscale('symlog')
            pylab.ylabel('dt (min)')
            pylab.xlabel('Point number')
            lo, hi = min(dt), max(dt)
            pylab.ylim(lo - (hi - lo) * 0.5, hi + (hi - lo) * 0.5)
            #pylab.axis('auto')
            #pylab.subplot(313)
            #pylab.pcolormesh(numpy.array(self.models))

    xpeek = XPeekEta(instrument)
    thread.start_new_thread(xpeek.process_stream, ())
    time.sleep(5)
    while True:
        lineids = xpeek.data.keys()
        xpeek.lock.acquire()
        xpeek.plot(lineids[0])
        xpeek.lock.release()
        pylab.waitforbuttonpress()
예제 #33
0
    visualiseInfo['fig_nn'] = fig_nn
else:
    visualiseInfo = None

# Read and test images from iCub eyes in real-time

#fig_input = pb.figure()
#subplt_input = fig_input.add_subplot(111)

while (True):
    try:
        testFace = mySAMpy.readImageFromCamera()
        print "Face data shape 0 " + str(
            testFace.shape[0]) + " Face data shape 1 " + str(testFace.shape[1])
        #subplt_input.imshow(testFace, cmap=plt.cm.Greys_r)
        mySAMpy.testing(testFace, visualiseInfo)
        #pp = mySAMpy.testing(testFace, visualiseInfo)
        #time.sleep(0.5)
        #l = pp.pop(0)
        #l.remove()
        #pb.draw()
        #del l
        pb.waitforbuttonpress(0.1)

    except KeyboardInterrupt:
        print 'Interrupted'
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
def crossCorrelation(SearchImg,
                     PatchImg,
                     xyLowerLeft,
                     illustrate=False,
                     subpixel=False):
    #perform template matching with normalized cross correlation (NCC)
    res = cv2.matchTemplate(SearchImg, PatchImg, cv2.TM_CCORR_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(
        res)  #min_loc for TM_SQDIFF
    match_position_x = max_loc[0] + PatchImg.shape[1] / 2
    match_position_y = max_loc[1] + PatchImg.shape[0] / 2
    del min_val, min_loc

    if subpixel:
        #         zoom_factor = 10.0
        #         SearchImg_new, xyLowerLeft_upscale = getTemplate(SearchImg, [match_position_x, match_position_y], PatchImg.shape[0]+2, PatchImg.shape[1]+2)
        #         SearchImg_upscale = ndimage.zoom(SearchImg_new, zoom_factor)
        #         PatchImg_upscale = ndimage.zoom(PatchImg, zoom_factor)
        #         res_upscale = cv2.matchTemplate(SearchImg_upscale, PatchImg_upscale, cv2.TM_CCORR_NORMED)
        #         min_val, max_val, min_loc, max_loc_upscale = cv2.minMaxLoc(res_upscale) #min_loc for TM_SQDIFF
        #         match_position_x_upscale = np.float((max_loc_upscale[0] + PatchImg_upscale.shape[1]/2)) / zoom_factor
        #         match_position_y_upscale = np.float((max_loc_upscale[1] + PatchImg_upscale.shape[0]/2)) / zoom_factor
        #
        #         match_position_x = match_position_x_upscale + xyLowerLeft_upscale[0]
        #         match_position_y = match_position_y_upscale + xyLowerLeft_upscale[1]
        #
        #         if illustrate:
        #             plt.subplot(131),plt.imshow(res_upscale,cmap = 'gray')
        #             plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        #             plt.plot(match_position_x_upscale*zoom_factor-PatchImg_upscale.shape[1]/2,
        #                      match_position_y_upscale*zoom_factor-PatchImg_upscale.shape[0]/2, "r.", markersize=10)
        #             plt.subplot(132),plt.imshow(SearchImg_upscale,cmap = 'gray')
        #             plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        #             plt.plot(match_position_x_upscale*zoom_factor-3, match_position_y_upscale*zoom_factor+3, "r.", markersize=10)
        #             plt.subplot(133),plt.imshow(PatchImg_upscale,cmap = 'gray')
        #             plt.title('Template'), plt.xticks([]), plt.yticks([])
        #             plt.show()
        #             plt.waitforbuttonpress()
        #             plt.cla()

        #perform subpixel matching with template and search area in frequency domain
        SearchImg_32, _ = getTemplate(SearchImg,
                                      [match_position_x, match_position_y],
                                      PatchImg.shape[0], PatchImg.shape[1])
        SearchImg_32 = np.float32(SearchImg_32)
        PatchImg_32 = np.float32(PatchImg)
        shiftSubpixel, _ = cv2.phaseCorrelate(
            SearchImg_32, PatchImg_32)  #subpixle with fourier transform
        match_position_x = match_position_x - shiftSubpixel[
            0]  #match_position_x - shiftSubpixel[1]
        match_position_y = match_position_y - shiftSubpixel[
            1]  #match_position_y + shiftSubpixel[0]

    if illustrate:
        plt.subplot(131), plt.imshow(res, cmap='gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.plot(match_position_x - PatchImg.shape[1] / 2,
                 match_position_y - PatchImg.shape[0] / 2,
                 "r.",
                 markersize=10)
        plt.subplot(132), plt.imshow(SearchImg, cmap='gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.plot(match_position_x, match_position_y, "r.", markersize=10)
        plt.subplot(133), plt.imshow(PatchImg, cmap='gray')
        plt.title('Template'), plt.xticks([]), plt.yticks([])
        plt.show()
        plt.waitforbuttonpress()
        plt.cla()
        plt.close('all')
        print('correlation value: ' + str(max_val))

    del res

    if max_val > 0.9:  #998:
        #keep only NCC results with high correlation values
        xyMatched = np.asarray([
            match_position_x + xyLowerLeft[0],
            match_position_y + xyLowerLeft[1]
        ],
                               dtype=np.float32)
        return xyMatched

    else:
        print('NCC matching not successful')
        return [-999, -999]
예제 #35
0
def go():
    opennpy.align_depth_to_rgb()
    while 1:
        preview()
        pylab.waitforbuttonpress(0.005)
예제 #36
0
def view_patches(Yr, A, C, b, f, d1, d2, YrA=None, secs=1):
    """view spatial and temporal components (secs=0 interactive)

     Args:
         Yr:        np.ndarray
                movie in format pixels (d) x frames (T)

         A:     sparse matrix
                    matrix of spatial components (d x K)

         C:     np.ndarray
                    matrix of temporal components (K x T)

         b:     np.ndarray
                    spatial background (vector of length d)

         f:     np.ndarray
                    temporal background (vector of length T)

         d1,d2: np.ndarray
                    frame dimensions

         YrA:   np.ndarray
                     ROI filtered residual as it is given from update_temporal_components
                     If not given, then it is computed (K x T)

         secs:  float
                    number of seconds in between component scrolling. secs=0 means interactive (click to scroll)

         imgs:  np.ndarray
                    background image for contour plotting. Default is the image of all spatial components (d1 x d2)

    """

    pl.ion()
    nr, T = C.shape
    nb = f.shape[0]
    A2 = A.copy()
    A2.data **= 2
    nA2 = np.sqrt(np.array(A2.sum(axis=0))).squeeze()
    if YrA is None:
        Y_r = np.array(A.T * np.matrix(Yr) -
                       (A.T * np.matrix(b[:, np.newaxis])) *
                       np.matrix(f[np.newaxis]) - (A.T.dot(A)) * np.matrix(C) +
                       C)
    else:
        Y_r = YrA + C

    A = A.todense()
    bkgrnd = np.reshape(b, (d1, d2) + (nb, ), order='F')
    fig = pl.figure()
    thismanager = pl.get_current_fig_manager()
    thismanager.toolbar.pan()
    print('In order to scroll components you need to click on the plot')
    sys.stdout.flush()
    for i in range(nr + 1):
        if i < nr:
            ax1 = fig.add_subplot(2, 1, 1)
            pl.imshow(np.reshape(old_div(np.array(A[:, i]), nA2[i]), (d1, d2),
                                 order='F'),
                      interpolation='None')
            ax1.set_title('Spatial component ' + str(i + 1))
            ax2 = fig.add_subplot(2, 1, 2)
            pl.plot(np.arange(T),
                    np.squeeze(np.array(Y_r[i, :])),
                    'c',
                    linewidth=3)
            pl.plot(np.arange(T),
                    np.squeeze(np.array(C[i, :])),
                    'r',
                    linewidth=2)
            ax2.set_title('Temporal component ' + str(i + 1))
            ax2.legend(labels=['Filtered raw data', 'Inferred trace'])

            if secs > 0:
                pl.pause(secs)
            else:
                pl.waitforbuttonpress()

            fig.delaxes(ax2)
        else:
            ax1 = fig.add_subplot(2, 1, 1)
            pl.imshow(bkgrnd[:, :, i - nr], interpolation='None')
            ax1.set_title('Spatial background ' + str(i - nr + 1))
            ax2 = fig.add_subplot(2, 1, 2)
            pl.plot(np.arange(T), np.squeeze(np.array(f[i - nr, :])))
            ax2.set_title('Temporal background ' + str(i - nr + 1))
def performFeatureTracking(template_size,
                           search_area,
                           initCooTemplate,
                           templateImage,
                           searchImage,
                           shiftSearchArea,
                           performLSM=True,
                           lsm_buffer=3,
                           thresh=0.001,
                           subpixel=False,
                           plot_result=False):
    #template_size: np.array([template_width, template_height])
    #search_area: np.array([search_area_x_CC, search_area_y_CC])
    #initCooTemplate: np.array([x,y])
    #shiftSearchArea: np.array([shiftFromCenter_x, shiftFromCenter_y])
    template_width = template_size[0]
    template_height = template_size[1]
    search_area_x = search_area[0]
    search_area_y = search_area[1]
    shiftSearchArea_x = shiftSearchArea[0]
    shiftSearchArea_y = shiftSearchArea[1]

    #check if template sizes even and correct correspondingly
    if int(template_width) % 2 == 0:
        template_width = template_width + 1
    if int(template_height) % 2 == 0:
        template_height = template_height + 1
    if int(search_area_x) % 2 == 0:
        search_area_x = search_area_x + 1
    if int(search_area_y) % 2 == 0:
        search_area_y = search_area_y + 1

    #get patch clip
    if plot_result:
        plt.imshow(templateImage)
        plt.plot(initCooTemplate[0], initCooTemplate[1], "r.", markersize=10)
        plt.waitforbuttonpress()
        plt.cla()
        plt.close('all')

    try:
        patch, _ = getTemplate(templateImage, initCooTemplate, template_width,
                               template_height, True)
    except Exception as e:
        #        _, _, exc_tb = sys.exc_info()
        #        print(e, 'line ' + str(exc_tb.tb_lineno))
        print('template patch reaches border')
        return 1 / 0

    #shift search area to corresponding position considering movement direction
    templateCoo_init_shift = np.array([
        initCooTemplate[0] + shiftSearchArea_x,
        initCooTemplate[1] + shiftSearchArea_y
    ])

    #get lsm search clip
    try:
        search_area, lowerLeftCoo_lsm_search = getTemplate(
            searchImage, templateCoo_init_shift, search_area_x, search_area_y,
            True)

    except Exception as e:
        #        _, _, exc_tb = sys.exc_info()
        #        print(e, 'line ' + str(exc_tb.tb_lineno))
        print('search patch reaches border')
        return 1 / 0

    if plot_result:
        plt.ion()

    CC_xy = crossCorrelation(search_area, patch, lowerLeftCoo_lsm_search,
                             plot_result, subpixel)
    if CC_xy[0] == -999:
        return 1 / 0

    if plot_result:
        plt.close('all')
        print(CC_xy)

    TrackedFeature = CC_xy

    if performLSM:
        #perform least square matching (subpixel accuracy possible)
        try:
            lsm_search, lowerLeftCoo_lsm_search = getTemplate(
                searchImage, CC_xy, search_area_x, search_area_y, True)
        except Exception as e:
            #            _, _, exc_tb = sys.exc_info()
            #            print(e, 'line ' + str(exc_tb.tb_lineno))
            print('lsm patch reaches border')
            return 1 / 0

        if plot_result:
            plt.imshow(lsm_search)
            plt.waitforbuttonpress()
            plt.close('all')

        pointAdjusted_ = pointAdjusted()

        try:
            result_lsm = lsm_matching(patch, lsm_search, pointAdjusted_,
                                      lsm_buffer, thresh)
            print('sigma LSM tracking: ' + str(result_lsm.s0))

            if plot_result:
                plt.imshow(searchImage, cmap='gray')
                plt.plot(result_lsm.y + lowerLeftCoo_lsm_search[0],
                         result_lsm.x + lowerLeftCoo_lsm_search[1],
                         "b.",
                         markersize=10)
                plt.waitforbuttonpress()
                plt.close('all')

            TrackedFeature = np.asarray([result_lsm.x, result_lsm.y])

        except Exception as e:
            #            _, _, exc_tb = sys.exc_info()
            #            print(e, 'line ' + str(exc_tb.tb_lineno))
            print('lsm failed')

    return TrackedFeature
예제 #38
0
    def play(self,
             gain=1,
             fr=None,
             magnification=1,
             offset=0,
             interpolation=cv2.INTER_LINEAR,
             backend='pylab',
             do_loop=False):
        """
         Play the movie using opencv

         Parameters
         ----------
         gain: adjust  movie brightness
         frate : playing speed if different from original (inter frame interval in seconds)
         backend: 'pylab' or 'opencv', the latter much faster
         """
        if backend is 'pylab':
            print '*** WARNING *** SPEED MIGHT BE LOW. USE opencv backend if available'

        gain *= 1.
        maxmov = np.max(self)

        if backend is 'pylab':
            plt.ion()
            fig = plt.figure(1)
            ax = fig.add_subplot(111)
            ax.set_title("Play Movie")
            im = ax.imshow((offset + self[0]) * gain / maxmov,
                           cmap=plt.cm.gray,
                           vmin=0,
                           vmax=1,
                           interpolation='none')  # Blank starting image
            fig.show()
            im.axes.figure.canvas.draw()
            plt.pause(1)

        if backend is 'notebook':
            # First set up the figure, the axis, and the plot element we want to animate
            fig = plt.figure()
            im = plt.imshow(self[0], interpolation='None', cmap=plt.cm.gray)
            plt.axis('off')

            def animate(i):
                im.set_data(self[i])
                return im,

            # call the animator.  blit=True means only re-draw the parts that have changed.
            anim = animation.FuncAnimation(fig,
                                           animate,
                                           frames=self.shape[0],
                                           interval=1,
                                           blit=True)

            # call our new function to display the animation
            return display_animation(anim, fps=fr)

        if fr == None:
            fr = self.fr

        looping = True

        terminated = False

        while looping:

            for iddxx, frame in enumerate(self):
                if backend is 'opencv':
                    if magnification != 1:
                        frame = cv2.resize(frame,
                                           None,
                                           fx=magnification,
                                           fy=magnification,
                                           interpolation=interpolation)

                    cv2.imshow('frame', (offset + frame) * gain / maxmov)
                    if cv2.waitKey(int(1. / fr * 1000)) & 0xFF == ord('q'):
                        cv2.destroyAllWindows()
                        looping = False
                        terminated = True
                        break

                elif backend is 'pylab':

                    im.set_data((offset + frame) * gain / maxmov)
                    ax.set_title(str(iddxx))
                    plt.axis('off')
                    fig.canvas.draw()
                    plt.pause(1. / fr * .5)
                    ev = plt.waitforbuttonpress(1. / fr * .5)
                    if ev is not None:
                        plt.close()
                        break

                elif backend is 'notebook':

                    print 'Animated via MP4'
                    break

                else:

                    raise Exception('Unknown backend!')

            if terminated:
                break

            if do_loop:
                looping = True

        if backend is 'opencv':
            cv2.destroyAllWindows()
예제 #39
0
 def calc(self, data):
     self.times += 1
     if self.display == True:
         #print "len: " + str(len(data))
         plt.figure(self.fig_idx)
         if self.fft == True:
             if self.len_data != len(data):  #1024
                 self.len_data = len(data)  #1024
                 #print self.len_data
                 x = np.arange(0, self.len_data, 1)
                 freq = np.fft.rfftfreq(x.shape[-1])
                 freq = freq * self.freq
                 plt.clf()
                 self.line, = plt.plot(freq, freq)
                 #self.position_calc()
                 plt.ylim(-140.0, 1.0)  # set the range for our plot
         else:
             L = 1024
             if self.len_data != L / 2:
                 self.len_data = L / 2
                 x = np.arange(0, self.len_data, 1)
                 plt.clf()
                 self.line, = plt.plot(x, x)
                 #self.position_calc()
                 #plt.ylim(-140,1.0)
                 plt.ylim(0, 1)
         if self.fft == True:
             sp0 = fft(data)
             #sp0 = fft(data[0:1024])
         else:
             sp0 = modulo(data[0:L])  # 20*np.log10(modulo(data[0:L])) #
             for n in range(len(sp0)):
                 if sp0[n] < -140:
                     sp0[n] = -150
             #print sp0
         print self.fig_idx, " signal level: ", max(sp0[10:-10]), "dBm"
         if self.average == True:
             self.sp0_acc = self.sp0_acc + sp0
             self.line.set_ydata(self.sp0_acc /
                                 float(self.times))  ##average
         else:
             self.line.set_ydata(sp0)  ##real-time
         if self.average == True:
             title_str = "Input: " + str(self.fig_idx) + " - Acq: " + str(
                 self.times) + " - Averaged"
         else:
             title_str = "Input: " + str(self.fig_idx) + " - Acq: " + str(
                 self.times)
         plt.title(title_str)
         plt.draw()
         self.active = 1
     if self.save == True:
         #if self.display == True:
         #fig_filename = self.output_folder + "fft_input_" + str(self.fig_idx) + ".png"
         #plt.savefig(fig_filename)
         raw_filename = self.output_folder + "input_" + str(
             self.fig_idx) + "_" + str(self.times - 1) + ".bin"
         raw_file = open(raw_filename, "wb")
         raw_file.write(data)
         raw_file.close()
     xx = pylab.waitforbuttonpress(timeout=0.001)
예제 #40
0
  y = x
  return x,y


class Plot1D:
  def __init__(self, x):
    self.x = x
    self.upd, = pl.plot(x, np.random.rand(x.shape[0]), 'c-+')

  def __call__(self, model):
    y = model.predict(self.x)
    self.upd.set_data(self.x, y)
    pl.gcf().canvas.draw()

if __name__ == '__main__':
  model = Net([Dense(1,20), Relu(),
              Dense(20,20), Sigmoid(),
              Dense(20,1)])
  model.build('regression')

  x, y = spline(10, 20)
  x.shape = (-1,1)
  y.shape = (-1,1)
  data_iter = DataIter(x, y)

  pl.ion()
  pl.scatter(x, y)
  model.fit(data_iter, 500, 1, every=1, lr=0.01, momentum=0, dstep=500000, shuffle=False,
      callbacks=[Plot1D(np.linspace(x.min(), x.max(), 50).reshape(-1,1))])
  pl.waitforbuttonpress()
예제 #41
0
                print 'Pair (', x, ', ', y, '): max Y_Error: ', maxe, ', avg Y_Error: ', avge
                print 'OUTPUT Deterministicness: ', projCorr[
                    pi[0],
                    pi[1]], 'Y_Error: ', avge, 'Correct: ', (xyuz
                                                             in true_Ys) * 1
                #                if (avge > 1.0) and (abs(projCorr[pi[0], pi[1]]) < 0.5):
                #                if (avge > 1.0):
                if 1:
                    PL.figure(2)
                    projBpi = permuteIndexes(projB, pi)
                    projSpi = permuteIndexes(projS, pi)
                    obspi = NP.array(obs[0, pi], ndmin=2)
                    visualizeADMG(projBpi, projSpi, obspi,
                                  abs(projBpi).max(),
                                  abs(projSpi).max(), 1)
                    PL.waitforbuttonpress()

    if 0:
        for x in range(Ngenes):
            for y in range(Ngenes):
                if xy_pos[x, y] > 30:
                    slope, intercept, r_value, p_value, std_err = SP.stats.linregress(
                        obs_data[:, x], obs_data[:, y])
                    if verbose:
                        print 'Pair (', x, ', ', y, '): xy_pos=', xy_pos[
                            x, y], ', Error: ', abs(int_data[iperm[x], y] - (
                                int_data[iperm[x], x] * slope + intercept))**2
                    PL.figure()
                    PL.plot(obs_data[:, x], obs_data[:, y], 'b.',
                            int_data[iperm[x], x], int_data[iperm[x], y], 'rx',
                            NP.arange(2) * 4 - 2,
def plot_tracking(frame, pos, target_sz, im, ground_truth):

    global \
        tracking_figure, tracking_figure_title, tracking_figure_axes, \
        tracking_rectangle, gt_point, \
        z_figure_axes, response_figure_axes

    timeout = 1e-6
    # timeout = 0.05  # uncomment to run slower
    if frame == 0:
        # pylab.ion()  # interactive mode on
        tracking_figure = pylab.figure()
        gs = pylab.GridSpec(1, 3, width_ratios=[3, 1, 1])

        tracking_figure_axes = tracking_figure.add_subplot(gs[0])
        tracking_figure_axes.set_title("Tracked object (and ground truth)")

        z_figure_axes = tracking_figure.add_subplot(gs[1])
        z_figure_axes.set_title("Template")

        response_figure_axes = tracking_figure.add_subplot(gs[2])
        response_figure_axes.set_title("Response")

        tracking_rectangle = pylab.Rectangle((0, 0), 0, 0)
        tracking_rectangle.set_color((1, 0, 0, 0.5))
        tracking_figure_axes.add_patch(tracking_rectangle)

        gt_point = pylab.Circle((0, 0), radius=5)
        gt_point.set_color((0, 0, 1, 0.5))
        tracking_figure_axes.add_patch(gt_point)

        tracking_figure_title = tracking_figure.suptitle("")

        pylab.show(block=False)

    elif tracking_figure is None:
        return  # we simply go faster by skipping the drawing
    elif not pylab.fignum_exists(tracking_figure.number):
        # print("Drawing window closed, end of game. "
        #      "Have a nice day !")
        # sys.exit()
        print("From now on drawing will be omitted, "
              "so that computation goes faster")
        tracking_figure = None
        return

    global z, response
    tracking_figure_axes.imshow(im, cmap=pylab.cm.gray)

    rect_y, rect_x = tuple(pos - target_sz / 2.0)
    rect_height, rect_width = target_sz
    tracking_rectangle.set_xy((rect_x, rect_y))
    tracking_rectangle.set_width(rect_width)
    tracking_rectangle.set_height(rect_height)

    if len(ground_truth) > 0:
        gt = ground_truth[frame]
        gt_y, gt_x = gt
        gt_point.center = (gt_x, gt_y)

    if z is not None:
        z_figure_axes.imshow(z, cmap=pylab.cm.hot)

    if response is not None:
        response_figure_axes.imshow(response, cmap=pylab.cm.hot)

    tracking_figure_title.set_text("Frame %i (out of %i)" %
                                   (frame + 1, len(ground_truth)))

    if debug and False and (frame % 1) == 0:
        print("Tracked pos ==", pos)

    # tracking_figure.canvas.draw()  # update
    pylab.draw()
    pylab.waitforbuttonpress(timeout=timeout)

    return
예제 #43
0
def once():
    global depth
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
    else:
        opennpy.sync_update()
        depth, _ = opennpy.sync_get_depth()

    def from_rect(m, rect):
        (l, t), (r, b) = rect
        return m[t:b, l:r]

    global mask, rect
    try:
        (mask, rect) = preprocess.threshold_and_mask(depth, config.bg)
    except IndexError:
        return

    normals.opencl.set_rect(rect)
    normals.normals_opencl(depth, mask, rect).wait()

    # Find the lattice orientation and then translation
    global R_oriented, R_aligned, R_correct
    R_oriented = lattice.orientation_opencl()

    if 'R_correct' in globals():
        # Correct the lattice ambiguity for 90 degree rotations just by
        # using the previous estimate. This is good enough for illustrations
        # but global alignment is preferred (see hashalign)
        R_oriented, _ = grid.nearest(R_correct, R_oriented)

    R_aligned = lattice.translation_opencl(R_oriented)
    R_correct = R_aligned

    # Find the color based on the labeling from lattice
    global face, Xo, Yo, Zo
    _, _, _, face = np.rollaxis(opencl.get_modelxyz(), 1)

    global cx, cy, cz
    cx, cy, cz, _ = np.rollaxis(
        np.frombuffer(np.array(face).data, dtype='i1').reshape(-1, 4), 1) - 1
    global R, G, B
    R, G, B = [np.abs(_).astype('f') for _ in cx, cy, cz]
    if 0:
        G *= 0
        R *= 0
        B *= 0
    else:
        pass

    # Draw the points collected on a sphere
    nw = normals.opencl.get_normals()
    global n, w
    n, w = nw[:, :, :3], nw[:, :, 3]

    if 1:  # Point cloud position mode
        X, Y, Z, _ = np.rollaxis(opencl.get_xyz(), 1)
    else:
        X, Y, Z = n[:, :, 0], n[:, :, 1], n[:, :, 2]
        X, Y, Z = map(lambda _: _.copy(), (X, Y, Z))

    # Render the points in 'table space' but colored with the axes from flatrot
    window.update_xyz(X, Y, Z, COLOR=(R, G * 0, B, R * 0 + w.flatten()))
    window.clearcolor = [1, 1, 1, 0]
    window.Refresh()
    #pylab.imshow(1./depth)
    pylab.waitforbuttonpress(0.01)
예제 #44
0
    pl.ylabel('A.U.')
    pl.subplot(1, 2, 2)
    pl.cla()
    on, off = A_on_thr[:, idx_tp_comp[poor]].copy(
    ), A_off_thr[:, idx_tp_gt[poor]].copy()
    on = on / np.max(on)
    off = off / np.max(off)
    pl.imshow(Cn, cmap='gray', vmax=np.percentile(Cn, 90))
    pl.imshow(on.reshape(dims_on, order='F'), cmap='Blues', alpha=.3, vmax=1)
    pl.imshow(off.reshape(dims_on, order='F'), cmap='hot', alpha=.3, vmax=3)
    print(idx_tp_gt[poor])
    pl.rcParams['pdf.fonttype'] = 42
    font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 20}
    pl.rc('font', **font)
    pl.pause(.1)
    pl.waitforbuttonpress(-1)
#%%
neur_to_print = [250, 310, 174]
poor_perf = np.where(np.logical_and(corrs > 0.75, corrs <= 1))[0]
for poor in poor_perf[:]:

    if idx_tp_gt[poor] not in neur_to_print:
        continue

    pl.figure()
    pl.subplot(2, 1, 1)
    on, off = A_on_thr[:, idx_tp_comp[poor]].copy(
    ), A_off_thr[:, idx_tp_gt[poor]].copy()
    pl.imshow(Cn, cmap='gray', vmax=np.percentile(Cn, 90))
    pl.imshow(on.reshape(dims_on, order='F'), cmap='Blues', alpha=.3, vmax=1)
    pl.imshow(off.reshape(dims_on, order='F'), cmap='hot', alpha=.3, vmax=3)
예제 #45
0
    INPUT = sys.argv[1]

    env = lmdb.open(INPUT)
    txn = env.begin()
    cursor = txn.cursor()

    count = 0
    for i in cursor:
        count += 1

    print count, " images"

    cursor = txn.cursor()

    for i in cursor:
        datum = caffe_pb2.Datum()
        datum.ParseFromString(cursor.value())
        try:
            img = np.array(bytearray(datum.data)).reshape(
                datum.channels, datum.height, datum.width).transpose((1, 2, 0))
        except:
            img = Image.open(StringIO.StringIO(datum.data))
        print(np.array(img).max())
        print(np.array(img).min())
        pylab.imshow(img)
        pylab.title(str(datum.label))
        pylab.draw()
        print datum.label
        pylab.waitforbuttonpress()
        pylab.cla()
    ## Starts creating the files and folders
    functionspop.createsave(file)
    with open("..\\unitswindow.txt", "r") as datafile:
        s = datafile.read().split()[0::3]
    for i in range(len(s)):
        os.mkdir("Unit_" + s[i])
        os.chdir("Unit_" + s[i])
        os.mkdir("Results")
        os.mkdir("Figures")
        os.chdir("..")

    #Gives you the option to plot the analogs/spiketrains
    if input("Want to run plotplots?").lower() == "y":
        smr = l + "\\" + file
        functionspop.plotplots(smr)
        py.waitforbuttonpress(60)
    else:
        pass

    infos = open("..\\info.txt", "r")
    infosr = infos.read().splitlines()
    songfile = infosr[0]
    raw = infosr[1]
    rawfiltered = infosr[2]
    basebeg = int(infosr[3])
    basend = int(infosr[4])
    motifile = "..\\" + "labels.txt"
    smr = "..\\" + file
    ### Get the tones/sybcuts for further analysis
    if not os.path.isfile("CheckSylsFreq.txt"):
        fich = open("CheckSylsFreq.txt", mode="w+")