Beispiel #1
0
class VideoModule:
    videoTitle =  time.strftime("%Y_%m_%d_%H_%M_%S")

    topic = ''

    continueRecord = True

    width = 300
    height = 300

    makefilmProcess = Process()

    disp = 0

    def getVideoTitle(self):
        return self.videoTitle

    def getVideoDisplay(self):
        return self.disp

    def recordVideo(self, cb, topic, length=5):
        global BUFFER_NAME

        BUFFER_NAME = topic + '_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(1, prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

        # Callback function
        cb()

    def getBufferName(self):
        global BUFFER_NAME
        return BUFFER_NAME

    def endCapture(self):
        self.continueRecord = False
        self.disp.quit()
        print "Set variable to false"

    def __init__(self, appendTitle):
        self.topic = appendTitle
        self.videoTitle += appendTitle + ".mp4"
Beispiel #2
0
def show_img(img):
    display = Display()
    img.show()

    # Wait for user to close the window or break out of it.
    while display.isNotDone():
        try:
            pass
        except KeyboardInterrupt:
            display.done = True
        if display.mouseRight:
            display.done = True
    display.quit()
Beispiel #3
0
class VideoModule:
    videoTitle =  time.strftime("%Y_%m_%d_%H_%M_%S_")

    continueRecord = True

    width = 300
    height = 300

    makefilmProcess = Process()

    disp = 0

    def getVideoTitle(self):
        return self.videoTitle

    def getVideoDisplay(self):
        return self.disp

    def recordVideo(self, length=5):
        BUFFER_NAME = 'buffer.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

    def endCapture(self):
        self.continueRecord = False
        print "Set variable to false"

    def __init__(self, appendTitle):
        self.videoTitle += appendTitle + ".mp4"
Beispiel #4
0
def record(filename):
    from SimpleCV import Camera, Display
    import time
    neg_dir = "rawdata/%s" % filename
    if not os.path.exists(neg_dir):
        os.makedirs(neg_dir)
    cam = Camera()
    dis = Display()
    time.sleep(2)
    targetFps = 15.0
    fps = targetFps
    sleepTime = 1/targetFps
    start = time.time()
    prevTime = None
    count = 0
    try:
        print "Recording... [keyboard interrupt to quit]"
        while dis.isNotDone():
            img = cam.getImage()
            img = scaleDown(img)
            if fps > targetFps + .5:
                sleepTime += 0.005
            elif fps < targetFps:
                sleepTime = max(sleepTime - 0.005, 0.01)
            if prevTime is not None:
                fps = 1.0 / (time.time() - prevTime)
            prevTime = time.time()
            img.save("%s/%05d.jpg" % (neg_dir, count + 1600))
            count += 1
            img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
            img.save(dis)
            if dis.mouseRight:
                dis.quit()
            time.sleep(sleepTime)

    except KeyboardInterrupt:
        print "Done recording"
Beispiel #5
0
        if ((tmpImg.width + tmpImg.height) == 2 * box_dim):
            #            cv2.imshow('classify',tmpImg.getNumpyCv2())
            tmpImg.save(display)
            result = tkMessageBox.askquestion(
                "Wildebeest!",
                "Is this one? (no if you don't know)",
                icon='warning',
                type='yesnocancel')
            if result == 'yes':
                save_path = "yes/img-" + str(counter) + ".png"
                tmpImg.save(save_path)
                fcount += 1
                counter += 1
            if result == 'no':
                save_path = "no/img-" + str(counter) + ".png"
                tmpImg.save(save_path)
                counter += 1
            if result == 'cancel':
                display.done = True
                break

        # Display the resulting frame
#       cv2.imshow('frame',frame)
#       if cv2.waitKey(1) & 0xFF == ord('q'):
#           break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
display.quit()
Beispiel #6
0
    def new_dewarp(self):
        vidpath = self.iVidPath  #get input video path

        # isInROI is deprecated and not used in this program
        def isInROI(x, y, R1, R2, Cx, Cy):
            isInOuter = False
            isInInner = False
            xv = x - Cx
            yv = y - Cy
            rt = (xv * xv) + (yv * yv)
            if (rt < R2 * R2):
                isInOuter = True
                if (rt < R1 * R1):
                    isInInner = True
            return isInOuter and not isInInner

        """ ws = width of input video
            hs = height of input video
            wd = width of destination/output video
            Hd = height of destinaton/output video
          
        """

        def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
            #the function throws type error, if Wd and Hd are not converted to integers
            Hd = int(Hd)
            Wd = int(Wd)
            map_x = np.zeros((Hd, Wd), np.float32)
            map_y = np.zeros((Hd, Wd), np.float32)
            rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
            thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
            sinMap = np.sin(thetaMap)
            cosMap = np.cos(thetaMap)

            for y in xrange(0, int(Hd - 1)):
                map_x[y] = Cx + rMap[y] * sinMap
                map_y[y] = Cy + rMap[y] * cosMap

            return map_x, map_y

        # do the unwarping
        def unwarp(img, xmap, ymap):
            output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
            result = Image(output, cv2image=True)
            # return result
            return result

        disp = Display(
            (800, 600))  #initialise a 800x600 simplecv display to show preview
        #disp = Display((1296,972))
        vals = []
        last = (0, 0)
        # Load the video
        vc = VirtualCamera(vidpath, "video")
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0, 10):
            img = vc.getImage()
            img.save(disp)
        # Show the user a frame let them left click the center
        #    of the "donut" and the right inner and outer edge
        # in that order. Press esc to exit the display
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if test != last and test is not None:
                last = test
                print "[360fy]------- center = {0}\n".format(last)

                vals.append(test)
        print "[360fy]------- Dewarping video and generating frames using center, offset1, offset2\n"

        Cx = vals[0][0]
        Cy = vals[0][1]
        #print str(Cx) + " " + str(Cy)
        # Inner donut radius
        R1x = vals[1][0]
        R1y = vals[1][1]
        R1 = R1x - Cx
        #print str(R1)
        # outer donut radius
        R2x = vals[2][0]
        R2y = vals[2][1]
        R2 = R2x - Cx
        #print str(R2)
        # our input and output image siZes
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        #Wd = 2.0*((R2+R1)/2)*np.pi
        #Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
        Hd = (R2 - R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP"

        xmap, ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
        print "MAP DONE"

        result = unwarp(img, xmap, ymap)

        result.save(disp)

        print "[360fy]------- Storing frames into ../temp_data/frames\n"
        i = 0
        while img is not None:
            print bcolors.OKBLUE + "\rFrame Number: {0}".format(
                i) + bcolors.ENDC,

            sys.stdout.flush(
            )  #flushes stdout so that frame numbers print continually without skipping
            #print " percent complete         \r",
            result = unwarp(img, xmap, ymap)
            result.save(disp)
            # Save to file
            fname = "../temp_data/frames/FY{num:06d}.png".format(num=i)
            result.save(fname)

            img = vc.getImage()
            i = i + 1
        print " \n"

        if img is None:
            self.statusText.setText(str("Status: Done"))
            disp.quit()
class FingerTrackerPeaks(FingerTracker):
    """Finger tracking using peak-findings
    """

    def __init__(self, camera=None):
        """Initialize the finger tracker

        :param TrackerIn ti: Tracker input
        :param camera: Camera index, filename, or None
        """
        FingerTracker.__init__(self, camera)
        self.display = None

    def crop_img(self, img):
        return img.crop(50, 150, img.width - 100, img.height - 150)
        return img.crop(490, 95, img.width - 1000, img.height - 290).rotate(90, fixed=False)

    def find_fingers3(self, img, prev_img):
        if img is None or prev_img is None:
            return []

        crop_spec = [0, 0, img.width, img.height]
        scale_factor = 2
        r1 = img.grayscale().crop(*crop_spec)
        r2 = prev_img.grayscale().crop(*crop_spec)

        # modified
        diff = (r2 - r1).binarize(40)
        edge_mask = diff.erode(5).dilate(5) - diff.erode(5)
        edge_mask = edge_mask.dilate(5)
        scaled = (diff.edges() & edge_mask).resize(r1.width / scale_factor)

        points = []
        for x in range(scaled.width):
            points.append(scaled.edgeIntersections((x, 0), (x, scaled.height))[0])
        points = [xy for xy in points if xy is not None]
        if not points:
            return []

        xs = range(scaled.width)
        ys = scipy.interp(range(scaled.width), [a[0] for a in points], [a[1] for a in points])
        peaks = scipy.signal.find_peaks_cwt(-ys, np.arange(7, 11))
        if len(peaks) == 0:
            return []

        positions = np.array(zip(peaks, np.array(ys)[peaks])) * scale_factor + np.array(crop_spec[:2])

        return positions

    def run_frame(self, ti, img):
        """Run the algorithm for one frame

        :param TrackerIn ti: TrackerIn object to send events to
        :return: True if I should be called with the next frame
        """
        img = self.crop_img(img)

        if self.display is None:
            # Consume one frame for the initialization
            self.display = Display(img.size())
            self.prev_img = img
            self.bg_img = None
            self.count = 20
            self.last_time = time.time()
            return True
        elif self.display.isDone():
            return False

        if self.bg_img is None and img:
            self.bg_img = img

        positions = self.find_fingers3(img, self.bg_img)
        if self.count > 0:
            self.bg_img = img
            self.count -= 1
            print "SETTING BG IMAGE"

        di = img  # (bg_img.grayscale() - img.grayscale()).binarize(40)
        for x, y in positions:
            di.dl().circle((int(x), int(y)), 15, color=Color.RED, width=3)
        self.add_positions(ti, positions)

        fps = 1.0 / (time.time() - self.last_time)
        di.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
        di.save(self.display)

        self.last_time = time.time()
        self.last_img = True

        if self.display.mouseLeft or self.display.mouseRight:
            self.display.done = True
            return False
        else:
            return True

    def finish(self):
        if self.display is not None:
            self.display.done = True
            self.display.quit()
Beispiel #8
0
        else:
            origImg.sideBySide(snapshot).save(display)
            origImg.sideBySide(snapshot).save(imgStreamer)
    else:
        origImg.save(display)
        origImg.save(imgStreamer)

    # Take a snapshot and save it when you press Return (aka Enter)
    # Exit the display when you press Escape (for some reason, pressing the "X" to close the window doesn't work)
    # Run the Color Detection code when you press Space
    # Press Space again to end Color Detection code
    for event in pygame.event.get():
        if event.type == pygame.KEYDOWN:           
            if event.key == pygame.K_RETURN:
                snapshot = cam.getImage()

                #Change this directory if needed
                snapshot.save('F:\stuff\img.png')
                
                snapshotTaken = True
                print 'Took snapshot'
            if event.key == pygame.K_ESCAPE:
                display.quit()
            if event.key == pygame.K_SPACE and snapshotTaken:
                print 'Image Processing initiated'
                if not doImageProcessing:
                        doImageProcessing = True
                else:
                        doImageProcessing = False

                    rate=44100,
                    output=True)

    snapshotTime = time.time()
    # check if capture device is OK
    # Initialize the camera
    cam = Camera()
    print 'Camera : ', cam.getProperty("height"), cam.getProperty("width")
    print ' Startup time ', (time.time() - snapshotTime)*1000, ' ms'
    snapshotTime = time.time()

    try:
        img = cam.getImage()
        disp = Display(img.size(), title = "SimpleCellDemo, Draw the RF's bounding box with mouse")
        bb = getBBFromUser(cam, disp)
        disp.quit()
        disp = Display(img.size())
        disp = Display(img.size(), title = "SimpleCellDemo, Press Esc to exit")
        img, RF = do_RF(bb)
        while disp.isNotDone():
            snapshotTime = time.time()
            img, im = do_RF(bb)
            corr, Vm = neuron(im, voltage, hist)
            print corr, Vm
            backshotTime = time.time()
            fps = 1. / (backshotTime - snapshotTime)
            img.drawText("FPS:" + str(fps), 10, 10, fontsize=30, color=Color.GREEN)
            img.show()
        disp.quit()

    finally:
Beispiel #10
0
    print (txt , 'click to continue')
    while d.isNotDone():
        if d.mouseLeft:
            d.done = True
        if d.mouseRight:
            #Gprb.append(Gfilename)
            rb = (d.rightButtonDownPosition())
            print(rb)
            if rb[1] < 15 and rb[1] > 0:
                 d.done = True
                 d.quit()
                 sys.exit(1)
                 pass
        time.sleep(.2) 

if  __name__ == '__main__':
    print ' rnum  module regression Test'
 #  Gd  = Display((1040,410))
    for tst in [Rtype]:      #'fat','wt', 'h2o', 
        img = Image(tst +'Test.png') 
        db = True #False  #True
        img.save(Gd)
        #cpause('test image',Gd)
        wt  = hunt(img,tst )     
        print  'result  is', wt 
        print 'iHunt',iHunt
##    print 'blob dictionary'
##    for k, v in d.iteritems():
##        print k, v
    Gd.quit()
Beispiel #11
0
    def _Convert(self):
        # selectfile = self.listView.DirModel.fileName(self.listView.currentIndex())
        selectfile = self.tree.DirModel.filePath(self.tree.currentIndex())
        sfilename = self.tree.DirModel.fileName(self.tree.currentIndex())
        strfn = sfilename[0:13]
        print selectfile + " convert"
        #os.system('sh /home/pi/panoBox/dewarp.sh')
        disp = Display((800,480)) #
        vals = []
        last = (0,0)
        # Load the video from the rpi
        vc = VirtualCamera(str(selectfile),"video")
        #vc = picamera.PiCamera()
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0,10):
            img = vc.getImage()
        #    img = vc.capture()
            img.save(disp)
        
        """
        cnt = 0
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if( test != last and test is not None):
                last= test
                vals.append(test)
                cnt += 1
                if cnt == 3:
                    break
        """
        ###############################################
        #480
        Cx = 260
        Cy = 195
        R1x = 320
        R1y = 195
        R2x = 380
        R2y = 195
        """
        #1200
        Cx = 645
        Cy = 490
        R1x = 787
        R1y = 490
        R2x = 937
        R2y = 490
        """
        ##############################################
        """
        Cx = vals[0][0]
        Cy = vals[0][1]
        R1x = vals[1][0]
        R1y = vals[1][1]
        R2x = vals[2][0]
        R2y = vals[2][1]
        print Cx
        print Cy 
        print R1x 
        print R1y 
        print R2x 
        print R2y
        """
        ##############################################
        R1 = R1x-Cx
        R2 = R2x-Cx
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        Hd = (R2-R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP!"
        xmap,ymap = buildMap(Ws,Hs,Wd,Hd,R1,R2,Cx,Cy)
        print "MAP DONE!"
        # do an unwarping and show it to us
        result = unwarp(img,xmap,ymap)
        result.save(disp)
        # I used these params for converting the raw frames to video
        # avconv -f image2 -r 30 -v:b 1024K -i samples/lapinsnipermin/%03d.jpeg output.mpeg
        i = 0
        while img is not None:
            print img.width, img.height
            result = unwarp(img,xmap,ymap)

            result.save(disp)
            # Save to file
            fname = "/home/pi/box1/panoImageFiles/FRAME{num:05d}.png".format(num=i)
            result.save(fname)
            #vs.writeFrame(derp)
            # get the next frame
            img = vc.getImage()
            i = i + 1
        disp.quit()
        ff = "sudo avconv -r 12 -i /home/pi/box1/panoImageFiles/FRAME%05d.png -vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2 , transpose=1, transpose=1' -c:v libx264 /home/pi/box1/storage/panoVideoFiles/"+str(strfn)+".mp4&&sudo rm /home/pi/box1/panoImageFiles/*.png"
        os.system(ff)