예제 #1
0
 def configure_gui(self):
     self.erreur=StringVar();self.erreur.set("")
     # Parametre de la camera 
     self.tint=DoubleVar()
     #self.tint.trace("w",self.TintModifie) => def TintModifie(self,*args):
     self.gain=IntVar()
     self.Stop=False
     # ------ Initialisation de la camera
     #self.cam=Camera(guid=2892819673481529) 
     self.cam=Camera(guid=self.guidCam) 
     self.lemodel=self.cam.model.decode()[-5:]
     # acquisition en cours
     self.Acquisition=0
     # axe x: lambda ou pixel:
     self.EnMicron=BooleanVar();self.EnMicron.set(False)
     # fit gaussienne
     self.fitgauss=BooleanVar();self.fitgauss.set(False)
     self.w0=IntVar();self.w0.set(80)
     # borner
     self.borner=BooleanVar();self.borner.set(False)
     self.Pix0=IntVar();self.Pix0.set(0)
     self.Pix1=IntVar();self.Pix1.set(1234)
     #init graph
     self.InitGraph=BooleanVar()
     self.InitGraph.set(True)
     # focale pixel/micron
     self.focale=DoubleVar();self.focale.set(6.6)
     #  Nom de base du Fichier de sauvegarde
     self.Nom=StringVar()
     # le dossier de travail
     self.WorkDir=StringVar()
     self.WorkDir.set(os.getcwd())
     #fichier reference
     self.FichRef=StringVar();self.FichRef.set("!! Ldb non definie !!")
 def __init__(self):
     F201B = 2892819673575448
     # ------ Initialisation de la camera
     self.cam0 = Camera(guid=F201B)
     #lemodel=cam.model.decode()[-5:]
     self.l4 = PR50CC.rotation()
     print("L4 ok")
     self.bs = BS.BabSoleil()
     print("BS ok")
     self.LHcamera()
     print("LxH={}x{}".format(self.Larg, self.Haut))
예제 #3
0
def main():
    from optparse import OptionParser

    p = OptionParser()
    # p.add_option()
    p.set_defaults(guid=None)
    o, a = p.parse_args()
    cam = Camera(guid=o.guid)
    cam.power(True)
    setup(cam, mode=2, gain=0, bright=0, shutter=0.01)
    capture(cam, 10)
    cam.power(False)
예제 #4
0
def main():
    from optparse import OptionParser
    p = OptionParser()
    # p.add_option()
    p.set_defaults(uid=None)
    o,a = p.parse_args()
    cam = Camera(o.uid)
    cam.power(True)
    info(cam)
    print()
    emva1288(cam, r=1000e3/20e-3)
    cam.power(False)
예제 #5
0
    def __init__(self, index, type, guid=0000000000000000, buffersize=10):
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionEnabled = False
        self._isSnapshotEnabled = False
        self._detectionAlgorithm = ''
        # data structure: {"filterName", "args"}, defined in the GUI
        # text editor
        self.filterRouting = []

        # instances of Agent class. You can define an array if you have
        # multiple agents.
        # Pass them to *processObjectDetection()*
        self.agent1 = Agent()
        self.agent2 = Agent()

        # drawings
        # data structure: {"drawingName", "args"}, defined in Subthread
        self.drawingRouting = []

        # video writing
        self._isVideoWritingEnabled = False
        self.videoWriter = None

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture(0)
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 600, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)
예제 #6
0
def initCam(lib, mode=None, fps=30, shutter=6.1, gain=1, isospeed = 800):
    global useColor
    cams = lib.enumerate_cameras()
    if len(cams)<=0:
        print("ERROR: no Camera found. Make sure it is plugged into the computer")
        sys.exit(1)
    cam0_handle = cams[0]
    print(cams)
    cam0 = Camera(lib, guid=cam0_handle['guid'], mode=None, framerate=fps, shutter=shutter, gain=gain)
    if useColor == True:
        cam0.mode = cam0.modes[0]# 3 is black and white
    else:
        cam0.mode = cam0.modes[3]
    cam0.start(interactive=True)
    setDefaults( cam0)
    print( "opening Camera")
    print(getInfo(cam0))
    return cam0
예제 #7
0
    def __init__(self, index, type, guid=0000000000000000, buffersize=10):
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionRunning = False
        self._detectionAlgorithm = ''
        self.filterRouting = [
        ]  # data structure: {"filterName", "args"}, defined in the editor
        # define the agents that you want to detect with objectDetection algorithm
        self.agent1 = Agent()
        self.agent2 = Agent()

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Vendor:", self.cam.vendor)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("Available modes", [mode.name for mode in self.cam.modes])
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture(0)
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 600, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)
예제 #8
0
파일: dc1394.py 프로젝트: wateryi/pydc1394
def main():
    from optparse import OptionParser
    p = OptionParser()
    # p.add_option()
    p.set_defaults(guid=None)
    o, a = p.parse_args()
    cam = Camera(guid=o.guid)
    cam.power(True)
    setup(cam, mode=2, gain=0, bright=0, shutter=0.01)
    capture(cam, 10)
    cam.power(False)
예제 #9
0
def main():
    from optparse import OptionParser
    p = OptionParser()
    # p.add_option()
    p.set_defaults(uid=None)
    o, a = p.parse_args()
    cam = Camera(o.uid)
    cam.power(True)
    info(cam)
    print()
    emva1288(cam, r=1000e3 / 20e-3)
    cam.power(False)
예제 #10
0
def _main(guid, name):
    app = QtGui.QApplication(sys.argv)
    cam = Camera(guid=guid, iso_speed=800)
    main = Main(cam)
    main.setWindowTitle('pyCAM -- %s' % name)

    main.show()
    try:
        main.start_camera()
        time.sleep(.5)
        main.process_images()
        #        cam.img.autoRange()
        #        cam.img.autoLevels()
        #        QtGui.app.instance().exec_()
        status = app.exec_()
    finally:
        main.stop_camera()
        main.deinit_camera()
        sys.exit(status)
예제 #11
0
class Vision(object):
    def __init__(self, index, type, guid=0000000000000000, buffersize=10):
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionEnabled = False
        self._isSnapshotEnabled = False
        self._detectionAlgorithm = ''
        # data structure: {"filterName", "args"}, defined in the GUI
        # text editor
        self.filterRouting = []

        # instances of Agent class. You can define an array if you have
        # multiple agents.
        # Pass them to *processObjectDetection()*
        self.agent1 = Agent()
        self.agent2 = Agent()

        # drawings
        # data structure: {"drawingName", "args"}, defined in Subthread
        self.drawingRouting = []

        # video writing
        self._isVideoWritingEnabled = False
        self.videoWriter = None

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture(0)
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 600, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)

    def updateFrame(self):
        if self.isFireWire():
            if self.isUpdating():
                frameOriginal = self.cam.dequeue()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionEnabled():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                if self.isDrawingEnabled():
                    frameProcessed = self.processDrawings(frameProcessed)
                if self.isSnapshotEnabled():
                    cv2.imwrite('snapshot.png',
                                filterlib.color(frameProcessed))
                    self.setStateSnapshotEnabled(False)
                if self.isVideoWritingEnabled():
                    self.videoWriter.write(filterlib.color(frameProcessed))
                cv2.imshow(self.windowName(), frameProcessed)
                frameOriginal.enqueue()
        else:
            if self.isUpdating():
                _, frameOriginal = self.cap.read()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionEnabled():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                if self.isDrawingEnabled():
                    frameProcessed = self.processDrawings(frameProcessed)
                if self.isSnapshotEnabled():
                    cv2.imwrite('snapshot.png',
                                filterlib.color(frameProcessed))
                    self.setStateSnapshotEnabled(False)
                if self.isVideoWritingEnabled():
                    self.videoWriter.write(filterlib.color(frameProcessed))
                cv2.imshow(self.windowName(), frameProcessed)

    def closeCamera(self):
        if not self.videoWriter == None:
            self.videoWriter.release()
            self.videoWriter = None
        if self.isFireWire():
            self.cam.stop_video()
        else:
            self.cap.release
        cv2.destroyWindow(self.windowName())

    #==========================================================================
    # obtain instance attributes
    #==========================================================================
    def windowName(self):
        return 'CamID:{} (Click to print coordinate)'.format(self._id)

    def isFireWire(self):
        return self._type.lower() == 'firewire'

    def isUpdating(self):
        return self._isUpdating

    def isFilterBypassed(self):
        return self._isFilterBypassed

    def isObjectDetectionEnabled(self):
        return self._isObjectDetectionEnabled

    def isDrawingEnabled(self):
        return not self.drawingRouting == []

    def isSnapshotEnabled(self):
        return self._isSnapshotEnabled

    def isVideoWritingEnabled(self):
        return self._isVideoWritingEnabled

    #==========================================================================
    # set instance attributes
    #==========================================================================
    def setStateUpdate(self, state):
        self._isUpdating = state

    def setStateFiltersBypassed(self, state):
        self._isFilterBypassed = state

    def setStateObjectDetection(self, state, algorithm):
        self._isObjectDetectionEnabled = state
        self._detectionAlgorithm = algorithm

    def setVideoWritingEnabled(self, state):
        self._isVideoWritingEnabled = state

    def setStateSnapshotEnabled(self, state):
        self._isSnapshotEnabled = state

    #==========================================================================
    # Video recording
    #==========================================================================
    def createVideoWriter(self, fileName):
        self.videoWriter = cv2.VideoWriter(
            fileName,
            fourcc=cv2.VideoWriter_fourcc(*'XVID'),
            fps=30.0,
            frameSize=(640, 480),
            isColor=True)

    def startRecording(self, fileName):
        self.createVideoWriter(fileName)
        self.setVideoWritingEnabled(True)
        print('Start recording' + fileName)

    def stopRecording(self):
        self.setStateSnapshotEnabled(False)
        self.videoWriter.release()
        print('Stop recording.')

    #==========================================================================
    # <Filters>
    # Define the filters in filterlib.py
    #==========================================================================
    def createFilterRouting(self, text):
        self.filterRouting = []
        for line in text:
            line = line.split('//')[0]  # strip after //
            line = line.strip()  # strip spaces at both ends
            match = re.match(r"(?P<function>[a-z0-9_]+)\((?P<args>.*)\)", line)
            if match:
                name = match.group('function')
                args = match.group('args')
                args = re.sub(r'\s+', '', args)  # strip spaces in args
                self.filterRouting.append({'filterName': name, 'args': args})

    def processFilters(self, image):
        for item in self.filterRouting:
            image = getattr(filterlib, item['filterName'],
                            filterlib.filterNotDefined)(image, item['args'])
        # You can add custom filters here if you don't want to use the editor
        return image

    #==========================================================================
    # <object detection>
    # Object detection algorithm is executed after all the filters
    # It is assumed that "imageFiltered" is used for detection purpose only;
    # the boundary of the detected object will be drawn on "imageOriginal".
    # information of detected objects can be stored in an instance of "Agent"
    # class.
    #==========================================================================
    def processObjectDetection(self, imageFiltered, imageOriginal):
        # convert to rgb so that coloured lines can be drawn on top
        imageOriginal = filterlib.color(imageOriginal)

        # object detection algorithm starts here
        # In this function, information about the agent will be updated, and
        # the original image with the detected objects highlighted will be
        # returned.
        algorithm = getattr(objectDetection, self._detectionAlgorithm,
                            objectDetection.algorithmNotDefined)
        # pass instances of Agent class if you want to update its info
        imageProcessed = algorithm(imageFiltered, imageOriginal, self.agent1)
        return imageProcessed

    #==========================================================================
    # <subthread drawing>
    # Used to draw lines etc. on a plot
    # For showing the path that the robot wants to follow
    #==========================================================================
    def clearDrawingRouting(self):
        self.drawingRouting = []

    def addDrawing(self, name, args=None):
        self.drawingRouting.append({'drawingName': name, 'args': args})

    def processDrawings(self, image):
        # convert to rgb so that coloured lines can be drawn on top
        image = filterlib.color(image)
        for item in self.drawingRouting:
            image = getattr(drawing, item['drawingName'],
                            drawing.drawingNotDefined)(image, item['args'])
        return image
예제 #12
0
class LeGUI(Frame):
    def __init__(self, master):
        self.master = master
        self.F146B=2892819673481529
        self.F201B=2892819673575448
        self.guidCam=self.F146B
        self.guidCam=self.F201B
        Frame.__init__(self, self.master)
        self.configure_gui()
        self.create_widgets()

    def configure_gui(self):
        self.erreur=StringVar();self.erreur.set("")
        # Parametre de la camera 
        self.tint=DoubleVar()
        #self.tint.trace("w",self.TintModifie) => def TintModifie(self,*args):
        self.gain=IntVar()
        self.Stop=False
        # ------ Initialisation de la camera
        #self.cam=Camera(guid=2892819673481529) 
        self.cam=Camera(guid=self.guidCam) 
        self.lemodel=self.cam.model.decode()[-5:]
        # acquisition en cours
        self.Acquisition=0
        # axe x: lambda ou pixel:
        self.EnMicron=BooleanVar();self.EnMicron.set(False)
        # fit gaussienne
        self.fitgauss=BooleanVar();self.fitgauss.set(False)
        self.w0=IntVar();self.w0.set(80)
        # borner
        self.borner=BooleanVar();self.borner.set(False)
        self.Pix0=IntVar();self.Pix0.set(0)
        self.Pix1=IntVar();self.Pix1.set(1234)
        #init graph
        self.InitGraph=BooleanVar()
        self.InitGraph.set(True)
        # focale pixel/micron
        self.focale=DoubleVar();self.focale.set(6.6)
        #  Nom de base du Fichier de sauvegarde
        self.Nom=StringVar()
        # le dossier de travail
        self.WorkDir=StringVar()
        self.WorkDir.set(os.getcwd())
        #fichier reference
        self.FichRef=StringVar();self.FichRef.set("!! Ldb non definie !!")

        
    def create_widgets(self):
        self.master.title("CPL Stingray "+self.lemodel)
        topframe=Frame(self.master)
        topframe.pack()
        nbook=ttk.Notebook(topframe)
        f1=ttk.Frame(nbook)
        f2=ttk.Frame(nbook)
        nbook.add(f1,text="Commandes")
        nbook.add(f2,text="Fichiers")
        nbook.pack()

        self.FrCommandes(f1).pack()
        Fichiers.TkFich(f2,self.Nom,self.FichRef, self.WorkDir)
        


        
        #Les  spectres dans une frame Toplevel: FrameTraceSp
        self.FrameTraceSp=Toplevel() # graphe a part
        self.FrameTraceSp.title("Image")
                   
        # de matplotlib.figure:
        self.Figspectre=Figure(figsize=(8,6),dpi=100) # figsize: H,V 
        self.plot1=self.Figspectre.add_subplot(111)
        # transfert la figure matplot dans un canvas Tk, inclus ds FrameTraceSp :
        self.canvas = FigureCanvasTkAgg(self.Figspectre,self.FrameTraceSp)
        self.canvas.draw()
        self.canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)
        toolbar = NavigationToolbar2Tk(self.canvas,self.FrameTraceSp)
        toolbar.update()
        self.canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=True)
        #self.canvas.draw
    
    #------------------------------------------------------------------
    # mise a jour du graphe par 
    # FuncAnimation(my_gui.Figspectre,my_gui.animate,interval=1000) 
    # à la fin
    #------------------------------------------------------------------
    
    def func(self,x,mean,std,maxi):
    	return maxi*np.exp(-2*(x-mean)**2 / std**2) 
    
    def updatefig(self,i):
        if  self.Acquisition:
            #capture une frame et la copy dans CamArray
            self.cam.start_one_shot()
            frame =self.cam.dequeue()
            self.CamArray = frame.copy()
            frame.enqueue()
            self.cam.stop_one_shot()
            # on recupere les info de la camera
            y1=self.CamArray.sum(axis=1)/self.CamArray.shape[1]
            # reduction des bornes :
            if self.borner.get():
                y1=y1[self.Pix0.get():self.Pix1.get()]
            #4.4microns/pixel pour la F201B
            #4.65 pour la F146B
            #calibration avec la mire de calibration Leitz pour le "1.3"
            if  self.EnMicron.get():
                G=200./self.focale.get()
               # print("focale:{}, G={}".format(self.focale.get()*1.,G))
                x=np.arange(y1.size)*4.4/G 
                larg =int(2* self.w0.get()/4.4*G)
               # print("largeur en pixel:{}".format(larg))
            else : 
             #   self.focale.get()
                x=np.arange(y1.size)
                larg =int( self.w0.get())
            self.data=np.column_stack((x,y1))
            maxi= np.max(np.array(y1))
            
            if self.fitgauss.get():
                x_data = x[np.argmax(y1)-larg: np.argmax(y1)+larg]
                y_data = y1[np.argmax(y1)-larg: np.argmax(y1)+larg]
                init= [x[np.argmax(y1)],larg,maxi]
                b1= [0.7*x[np.argmax(y1)],0.5*larg,0.7*maxi]
                b2= [1.3*x[np.argmax(y1)],2*larg,1.3*maxi]
                try :
                    popt, pcov = curve_fit(self.func, x_data, y_data,\
                                           p0 = init)
                                          #bounds=(b1,b2))
                except RuntimeError :
                    print("fit pas bon!!!")
                    self.fitgauss.set(False)
            #    print(popt)
         
            #--------------------- affichage
            if self.initaffiche or self.InitGraph.get():
                self.plot1.clear()
                #self.l1,=self.plot1.plot(x,y1,label="ecart type :{}".format(ecart_type))
                self.l1,=self.plot1.plot(x,y1,label="cam")
                if self.fitgauss.get():
                    self.plot1.plot( x_data,self.func(x_data, *popt),\
    label="mean : {:.3f}, 2w(1/e2) : {:.3f}".format(popt[0], 2*popt[1]))
                self.plot1.set_ylabel("Intensite")
                self.plot1.legend(loc="upper left")

                if self.EnMicron.get():
                    self.plot1.set_xlabel("micron")
                else :
                    self.plot1.set_xlabel("pixel")
                self.initaffiche=False
            else :
                self.l1.set_data(x,y1)
            #self.canvas.draw()

            

#-------------------------------------------------
#Démarre le spectre:
#    * desactivation des boutons
# la variable self.Acquisition donne l'état de l'acquisition
#-------------------------------------------------
    def goSp(self,spectre):
#        os.chdir(self.WorkDir.get())
#        Nom=self.Nom.get()+str(len(glob.glob(self.Nom.get()+"*")))
#        self.FichSauve=Nom
        self.erreur.set('')
        self.goSpectre.configure(state=DISABLED)
        #tps d attente pour interroger le buffer en ms: 
        #self.delay=50 if self.cam.shutter.absolute<0.05 else  self.cam.shutter.absolute
        self.delay=50
        time.sleep(0.2)
        self.iter=0
        #print(self.delay)
        self.cam.start_capture()
        #self.cam.start_video()
        self.Acquisition=1
        self.initaffiche=True

    def StopImage(self):
        if self.Acquisition:
            self.Acquisition=0
            time.sleep(0.1)
            self.cam.stop_capture()
            print("Stop")
            self.goSpectre.configure(state=NORMAL)
        
    def Quitte(self):
        if self.Acquisition:
            self.Acquisition=0
            time.sleep(self.delay*2e-3)
        self.cam.close()
        self.master.destroy()
        


    
#--------------------------------------------------------------
        # une frame pour les commandes 
#--------------------------------------------------------------
    def FrCommandes(self,mere):
        frame=Frame(mere, bd=3,pady=2)
       # frame.pack()
        col=0
        ligne=0
        # ---------------- 4 boutons
        self.goSpectre=Button(frame, text="Démarre",command=lambda : self.goSp(1))
        self.goSpectre.grid(row=ligne,column=col)
        ttk.Button(frame, text="Stop",command= lambda: self.StopImage())\
                .grid(row=ligne,column=col+1)
        ttk.Button(frame, text="Enr.",command= self.EnregistreSP)\
                .grid(row=ligne,column=col+2)
        Button(frame, text="Close", command= lambda: self.Quitte())\
                .grid(row=ligne,column=col+4)

        ligne+=1;col=0
        ttk.Separator(frame,orient='horizontal').grid(row=ligne,columnspan=5,sticky='EW')
        #------------- options fit gausse et autoscale :        
        ligne+=1;col=0
        Checkbutton(frame,variable=self.fitgauss,text="Fit Waist")\
                .grid(row=ligne,column=col)
        Entry(frame,textvariable=self.w0,width=4).grid(row=ligne,column=col+1)
        Checkbutton(frame,variable=self.InitGraph,text="AutoSc.")\
                .grid(row=ligne,column=col+2)
        # --- pixel/microns
        ligne+=1;col=0
        ttk.Separator(frame,orient='horizontal').grid(row=ligne,columnspan=5,sticky='EW')
        ligne+=1;col=0
        Label(frame,text="f(mm)=").grid(row=ligne,column=col,sticky='E')
        Entry(frame,textvariable=self.focale,width=4).grid(row=ligne,column=col+1,sticky='W')
        Checkbutton(frame,variable=self.EnMicron,text="Pix/mic.")\
                .grid(row=ligne,column=col+2)


        #-------------- les bornes
        ligne+=1;col=0
        ttk.Separator(frame,orient='horizontal').grid(row=ligne,columnspan=5,sticky='EW')
        ligne+=1
        lesbornes=LabelFrame(frame,text="Bornes en Pixel",bd=3,pady=4)
        lesbornes.grid(row=ligne,column=col,columnspan=5,sticky='EW')
        Checkbutton(lesbornes,text="Borne",variable=self.borner).pack(side=LEFT) 
        Entry(lesbornes,textvariable=self.Pix0,width=4).pack(side=LEFT)
        Entry(lesbornes,textvariable=self.Pix1,width=4).pack(side=LEFT)

        #-------------------- erreurs/info
        ligne+=1;col=0
      #  Label(frame,textvariable=self.FichRef).grid(row=ligne,column=col,columnspan=5)
        Label(frame,textvariable=self.erreur).grid(row=ligne+1,column=col,columnspan=4)

        return frame
#--------------------------------------------------------------
    def EnregistreSP(self):
        os.chdir(self.WorkDir.get())
        Nom=self.Nom.get()+str(len(glob.glob(self.Nom.get()+"*")))
        Nom+=".sp"
        print("Fichier sauvé sur "+Nom+" dans "+self.WorkDir.get())
        if self.EnMicron.get():
            entete="Microns Intensite"
        else :
            entete="Pixel Intensite"
        np.savetxt(Nom,self.data,fmt="%g",header=entete)
        return 1
예제 #13
0
# 
# You should have received a copy of the GNU Lesser General Public
# License along with pydc1394.  If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009, 2010 by Holger Rapp <*****@*****.**>
# and the pydc1394 contributors (see README File)

from __future__ import (print_function, unicode_literals, division,
        absolute_import)

from pydc1394 import Camera


print("Opening camera!")
cam0 = Camera()

print("Vendor:", cam0.vendor)
print("Model:", cam0.model)
print("GUID:", cam0.guid)
print("Mode:", cam0.mode)
print("Framerate: ", cam0.framerate.value)

print("Acquiring one frame!")
cam0.start_capture()
cam0.start_one_shot()
i = cam0.dequeue()
print("Shape:", i.shape)
print("Dtype:", i.dtype)
i.enqueue()
cam0.stop_one_shot()
예제 #14
0
    def __init__(self,
                 field,
                 index,
                 type,
                 guid=0000000000000000,
                 buffersize=10):
        self._field = field
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionEnabled = False
        self._isSnapshotEnabled = False
        self._detectionAlgorithm = ''
        self.filterRouting = [
        ]  # data structure: {"filterName", "args"}, defined in the GUI text editor
        self.frameCounter = 0

        # instances of Agent class. You can define an array if you have multiple agents.
        # Pass them to *processObjectDetection()*
        self.agent = [Agent(), Agent(), Agent(), Agent()]

        # drawings
        self._isFieldDrawingEnabled = True  # draw field indicators
        self._isAgentDrawingEnabled = True  # draw separation vector and COM for agents
        self._isCOMDrawingEnabled = True
        self.drawingRouting = [
        ]  # data structure: {"drawingName", "args"}, defined in Subthread

        # video writing
        self._isVideoWritingEnabled = False
        self.videoWriter = None

        # desired separation and desired orientation
        self.separation = None
        self.orientation = None
        self.psi_local = None
        self.time = 0
        self.desiredX = None
        self.desiredY = None
        self.COMx = None
        self.COMy = None

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture('rotor_Mar6.mp4')
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 100, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)
예제 #15
0
# You should have received a copy of the GNU Lesser General Public
# License along with pydc1394.  If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009, 2010 by Holger Rapp <*****@*****.**>
# and the pydc1394 contributors (see README File)



import sys
from time import sleep
from pydc1394 import Camera
import Image

print("Opening camera!")
cam0 = Camera()

print("Vendor:", cam0.vendor)
print("Model:", cam0.model)
print("GUID:", cam0.guid)
print("Mode:", cam0.mode)
print("Framerate: ", cam0.rate)
print("Available modes", cam0.modes)
print("Available features", cam0.features)

#those can be automated, the other are manual
try:
    cam0.brightness.mode = 'auto'
    cam0.exposure.mode = 'auto'
    cam0.white_balance.mode = 'auto'
except AttributeError: # thrown if the camera misses one of the features
#
# Copyright (C) 2009, 2010 by Holger Rapp <*****@*****.**>
# and the pydc1394 contributors (see README File)


import sys
from time import sleep
from pydc1394 import DC1394Library, Camera
import Image

l = DC1394Library()
cams = l.enumerate_cameras()
cam0_handle = cams[0]

print "Opening camera!"
cam0 = Camera(l, cam0_handle['guid'])

print "Vendor:", cam0.vendor
print "Model:", cam0.model
print "GUID:", cam0.guid
print "Mode:", cam0.mode
print "Framerate: ", cam0.fps
print "Available modes", cam0.modes
print "Available features", cam0.features

#those can be automated, the other are manual
try:
    cam0.brightness.mode = 'auto'
    cam0.exposure.mode = 'auto'
    cam0.white_balance.mode = 'auto'
except AttributeError: # thrown if the camera misses one of the features
예제 #17
0
class Vision(object):
    def __init__(self,
                 field,
                 index,
                 type,
                 guid=0000000000000000,
                 buffersize=10):
        self._field = field
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionEnabled = False
        self._isSnapshotEnabled = False
        self._detectionAlgorithm = ''
        self.filterRouting = [
        ]  # data structure: {"filterName", "args"}, defined in the GUI text editor
        self.frameCounter = 0

        # instances of Agent class. You can define an array if you have multiple agents.
        # Pass them to *processObjectDetection()*
        self.agent = [Agent(), Agent(), Agent(), Agent()]

        # drawings
        self._isFieldDrawingEnabled = True  # draw field indicators
        self._isAgentDrawingEnabled = True  # draw separation vector and COM for agents
        self._isCOMDrawingEnabled = True
        self.drawingRouting = [
        ]  # data structure: {"drawingName", "args"}, defined in Subthread

        # video writing
        self._isVideoWritingEnabled = False
        self.videoWriter = None

        # desired separation and desired orientation
        self.separation = None
        self.orientation = None
        self.psi_local = None
        self.time = 0
        self.desiredX = None
        self.desiredY = None
        self.COMx = None
        self.COMy = None

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture('rotor_Mar6.mp4')
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 100, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)

    def updateFrame(self):
        if self.isFireWire():
            if self.isUpdating():
                frameOriginal = self.cam.dequeue()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionEnabled():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                if self.isFieldDrawingEnabled():
                    self.fieldDrawing(self._field.x, self._field.y,
                                      self._field.freq, self._field.mag,
                                      self.time, self.separation,
                                      self.orientation)
                if self.isAgentDrawingEnabled():
                    for i in range(0, 4):
                        self.agentDrawing(self.agent[i].x, self.agent[i].y)
                if self.isCOMDrawingEnabled():
                    self.COMDrawing(self.agent[0].x, self.agent[0].y,
                                    self.agent[1].x, self.agent[1].y,
                                    self.agent[2].x, self.agent[2].y,
                                    self.agent[3].x, self.agent[3].y)
                if self.isDrawingEnabled():
                    frameProcessed = self.processDrawings(frameProcessed)
                if self.isSnapshotEnabled():
                    cv2.imwrite('snapshot.png',
                                filterlib.color(frameProcessed))
                    self.setStateSnapshotEnabled(False)
                if self.isVideoWritingEnabled():
                    self.videoWriter.write(filterlib.color(frameProcessed))

                cv2.imshow(self.windowName(), frameProcessed)
                frameOriginal.enqueue()
                self.clearDrawingRouting()
        else:
            if self.isUpdating():
                _, frameOriginal = self.cap.read()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionEnabled():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                if self.isFieldDrawingEnabled():
                    self.fieldDrawing(self._field.x, self._field.y,
                                      self._field.freq, self._field.mag,
                                      self.time, self.separation,
                                      self.orientation)
                if self.isAgentDrawingEnabled():
                    for i in range(0, 4):
                        self.agentDrawing(self.agent[i].x, self.agent[i].y)
                if self.isCOMDrawingEnabled():
                    self.COMDrawing(self.agent[0].x, self.agent[0].y,
                                    self.agent[1].x, self.agent[1].y,
                                    self.agent[2].x, self.agent[2].y,
                                    self.agent[3].x, self.agent[3].y)
                if self.isDrawingEnabled():
                    frameProcessed = self.processDrawings(frameProcessed)
                if self.isSnapshotEnabled():
                    cv2.imwrite('snapshot.png',
                                filterlib.color(frameProcessed))
                    self.setStateSnapshotEnabled(False)
                if self.isVideoWritingEnabled():
                    self.videoWriter.write(filterlib.color(frameProcessed))

                self.frameCounter += 1
                #If the last frame is reached, reset the capture and the frame_counter
                if self.frameCounter == self.cap.get(cv2.CAP_PROP_FRAME_COUNT):
                    self.frameCounter = 0  #Or whatever as long as it is the same as next line
                    self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                #print(self.frameCounter)

                cv2.imshow(self.windowName(), frameProcessed)
                self.clearDrawingRouting()

    def closeCamera(self):
        if not self.videoWriter == None:
            self.videoWriter.release()
            self.videoWriter = None
        if self.isFireWire():
            self.cam.stop_video()
        else:
            self.cap.release
        cv2.destroyWindow(self.windowName())

    #==============================================================================================
    # obtain instance attributes
    #==============================================================================================
    def windowName(self):
        return 'CamID:{} (Click to print coordinate)'.format(self._id)

    def isFireWire(self):
        return self._type.lower() == 'firewire'

    def isUpdating(self):
        return self._isUpdating

    def isFilterBypassed(self):
        return self._isFilterBypassed

    def isObjectDetectionEnabled(self):
        return self._isObjectDetectionEnabled

    def isDrawingEnabled(self):
        return not self.drawingRouting == []

    def isSnapshotEnabled(self):
        return self._isSnapshotEnabled

    def isVideoWritingEnabled(self):
        return self._isVideoWritingEnabled

    def isFieldDrawingEnabled(self):
        return self._isFieldDrawingEnabled

    def isAgentDrawingEnabled(self):
        return self._isAgentDrawingEnabled

    def isCOMDrawingEnabled(self):
        return self._isCOMDrawingEnabled

    #==============================================================================================
    # set instance attributes
    #==============================================================================================
    def setStateUpdate(self, state):
        self._isUpdating = state

    def setStateFiltersBypassed(self, state):
        self._isFilterBypassed = state

    def setStateObjectDetection(self, state, algorithm):
        self._isObjectDetectionEnabled = state
        self._detectionAlgorithm = algorithm

    def setVideoWritingEnabled(self, state):
        self._isVideoWritingEnabled = state

    def setStateSnapshotEnabled(self, state):
        self._isSnapshotEnabled = state

    def setSeparation(self, val):
        self.separation = val

    def setOrientation(self, val):
        self.orientation = val

    def setLocalAngle(self, val):
        self.psi_local = val

    def setTime(self, val):
        self.time = val

    def setDesiredCOM(self, x, y):
        self.desiredX = x
        self.desiredY = y

    def clearMouse(self):
        global mouseX
        global mouseY
        mouseX = mouseY = 0

    #==============================================================================================
    # Video recording
    #==============================================================================================
    def createVideoWriter(self, fileName):
        self.videoWriter = cv2.VideoWriter(
            fileName,
            fourcc=cv2.VideoWriter_fourcc(*'XVID'),
            fps=30.0,
            frameSize=(640, 480),
            isColor=True)

    def startRecording(self, fileName):
        self.createVideoWriter(fileName)
        self.setVideoWritingEnabled(True)
        print('Start recording' + fileName)

    def stopRecording(self):
        self.setStateSnapshotEnabled(False)
        self.videoWriter.release()
        print('Stop recording.')

    #==============================================================================================
    # <Filters>
    # Define the filters in filterlib.py
    #==============================================================================================
    def createFilterRouting(self, text):
        self.filterRouting = []
        for line in text:
            line = line.split('//')[0]  # strip after //
            line = line.strip()  # strip spaces at both ends
            match = re.match(r"(?P<function>[a-z0-9_]+)\((?P<args>.*)\)", line)
            if match:
                name = match.group('function')
                args = match.group('args')
                args = re.sub(r'\s+', '', args)  # strip spaces in args
                self.filterRouting.append({'filterName': name, 'args': args})

    def processFilters(self, image):
        for item in self.filterRouting[0:2]:
            image = getattr(filterlib, item['filterName'],
                            filterlib.filterNotDefined)(image, item['args'])
        # You can add custom filters here if you don't want to use the editor
        return image

    #==============================================================================================
    # <object detection>
    # Object detection algorithm is executed after all the filters
    # It is assumed that "imageFiltered" is used for detection purpose only;
    # the boundary of the detected object will be drawn on "imageOriginal".
    # information of detected objects can be stored in an instance of "Agent" class.
    #==============================================================================================
    def processObjectDetection(self, imageFiltered, imageOriginal):
        # convert to rgb so that coloured lines can be drawn on top
        imageOriginal = filterlib.color(imageOriginal)

        # object detection algorithm starts here
        # In this function, information about the agent will be updated, and the original image with
        # the detected objects highlighted will be returned
        algorithm = getattr(objectDetection, self._detectionAlgorithm,
                            objectDetection.algorithmNotDefined)
        imageProcessed = algorithm(
            imageFiltered, imageOriginal, self.agent,
            1)  # pass instances of Agent class if you want to update its info
        return imageProcessed

    #==============================================================================================
    # <subthread drawing>
    # Used to draw lines etc. on a plot
    # For showing the path that the robot wants to follow
    #==============================================================================================
    def clearAgentDrawing(self, state):
        if not state:
            self._isAgentDrawingEnabled = False
            self._isCOMDrawingEnabled = False
        else:
            self._isAgentDrawingEnabled = True
            self._isCOMDrawingEnabled = True

    def clearDrawingRouting(self):
        self.drawingRouting = []

    def addDrawing(self, name, args=None):
        self.drawingRouting.append({'drawingName': name, 'args': args})

    def processDrawings(self, image):
        # convert to rgb so that coloured lines can be drawn on top
        image = filterlib.color(image)
        for item in self.drawingRouting:
            image = getattr(drawing, item['drawingName'],
                            drawing.drawingNotDefined)(image, item['args'])
        return image

    def fieldDrawing(self, x, y, freq, mag, time, separation, orientation):
        #field_mag = sqrt(pow(x,2)+pow(y,2)+pow(z,2))
        magXY = sqrt(pow(x, 2) + pow(y, 2))
        angleXY = atan2(y, x)
        self.addDrawing('circle', (80, 400, 40, 1))
        self.addDrawing('circle', (80, 400, 2, 2))
        # self.addDrawing('circle',(200,400,50,1))
        # self.addDrawing('circle',(200,400,2,2))
        # if self.psi_local != None:
        #     self.addDrawing('arrow',(200,400,int(200+50*cos(self.psi_local)),int(400-50*sin(self.psi_local))))
        if x == 0 and y == 0:
            self.addDrawing('arrow', (80, 400, 80, 400, 1))
        else:
            self.addDrawing('arrow',
                            (80, 400, 80 + int(round(40 * cos(angleXY))),
                             400 - int(round(40 * sin(angleXY))), 1))

        # put text showing the field frequency and magnitude
        self.addDrawing('text', ('Frequency:{}Hz'.format(freq), 1))
        self.addDrawing('text', ('Magnitude:{}mT'.format(mag), 2))
        self.addDrawing('text', ('Time:{}s'.format(time), 3))

        # draw the direction of rotating field
        if sign(freq) > 0:
            self.addDrawing('arrow', (52, 372, 51, 373, 2))
        elif sign(freq) < 0:
            self.addDrawing('arrow', (108, 372, 109, 373, 2))

    def agentDrawing(self, x, y):
        # put text showing the number of agents
        if x != 0 and y != 0:
            self.addDrawing('circle', (x, y, 2, 3))

    def COMDrawing(self, x1, y1, x2, y2, x3, y3, x4, y4):
        x = int(1 / 4 * (x1 + x2 + x3 + x4))
        y = int(1 / 4 * (y1 + y2 + y3 + y4))
        self.COMx = x
        self.COMy = y
        self.addDrawing('circle', (x, y, 2, 4))

        # draw the pulling vector
        if mouseX != 0 and mouseY != 0:
            self.addDrawing('circle', (mouseX, mouseY, 2, 6))
            self.addDrawing('arrow', (int(x), int(y), mouseX, mouseY, 1))
        elif self.desiredX != None and self.desiredY != None:
            self.addDrawing('circle', (self.desiredX, self.desiredY, 5, 7))
            self.addDrawing('arrow',
                            (int(x), int(y), self.desiredX, self.desiredY, 1))

    def getDesiredCOM(self):
        if mouseX != 0 and mouseY != 0:
            desX = mouseX
            desY = mouseY
        else:
            desX = int(self.agent[0].x)
            desY = int(self.agent[0].y)
        return desX, desY
class CameraPolar:
    """ Cartographie de la polarisation
    import
    ClassAcquisitionStokesCamera;cp=ClassAcquisitionStokesCamera.CameraPolar()
        1- pensez aux zero du BS et lame l/4
        2- cp.bs.VaLbda(valeur)
        3-cp.Mesure(attente=1,fich="lesimages",Npas=37,pas=10)
         => enregistre les Npas+1 images pour l'angle de la L/4
         entre 0, pas, 2*pas ,... Npas*pas 
    """
    def __init__(self):
        F201B = 2892819673575448
        # ------ Initialisation de la camera
        self.cam0 = Camera(guid=F201B)
        #lemodel=cam.model.decode()[-5:]
        self.l4 = PR50CC.rotation()
        print("L4 ok")
        self.bs = BS.BabSoleil()
        print("BS ok")
        self.LHcamera()
        print("LxH={}x{}".format(self.Larg, self.Haut))

    def __repr__(self):
        """ Affichage cool """
        reponse = "-" * 20 + "\n"
        reponse += "*----- Lambda/4:\n"
        reponse += "cp.l4.VaPos(-64.15);cp.l4.Zero() \n"
        reponse += "*----  Babinet soleil\n"
        reponse += "Pos. en fraction de lbda: cp.bs.VaLbda(0.) \n"
        reponse += " Pour initialiser 0lbda et 1lbda du BS, voir BS.py \n"
        reponse += "cp.bs.Litpos()= {0} mm \n".format(self.bs.LitPos())
        reponse += "cp.bs.LitLbda()= {} Lbda \n".format(self.bs.LitLbda())
        reponse += "0lambda (cp.bs.ZeroLbda): {}\n".format(self.bs.ZeroLbda)
        reponse += "1lambda (cp.bs.UnLbda): {}\n".format(self.bs.UnLbda)
        reponse += "LxH= <--cp.LHcamera(): "
        reponse += "-" * 20 + "\n"
        reponse += 'Usage: \n cp.Mesure(attente=1,fich="lesimages",Npas=37,pas=10)'
        reponse += "Traitement des data via: ClassTraitStokesCamera"
        return reponse

    def LHcamera(self):
        """ return hauteur et largeur """
        self.cam0.start_capture()
        self.cam0.start_one_shot()
        frame = self.cam0.dequeue()
        (self.Larg, self.Haut) = frame.shape
        frame.enqueue()
        self.cam0.stop_one_shot()
        self.cam0.stop_capture()
        return (self.Larg, self.Haut)

    def Mesure(self, attente=1, fich="lesimages", Npas=37, pas=10):
        self.l4.VaPos(0)
        self.LHcamera()  # on verifie hauteur et largeur

        #-- initialisation des data: empilement des images
        #ds un tableau 3D
        lesdata=np.zeros((Npas+1,self.Larg,self.Haut),\
                              dtype=np.uint16)
        Npixel = self.Larg * self.Haut
        # ------ informations dans le premier tableau
        #parametres de Stokes de l'entree
        phase = self.bs.LitLbda() * 2 * np.pi  #phase du BS
        lesdata[0, 0, 0] = 65535  #S0
        lesdata[0, 1, 0] = int((1 + np.cos(phase)) * 32767)  #S1
        lesdata[0, 2, 0] = 0  #S2
        lesdata[0, 3, 0] = int((1 + np.sin(phase)) * 32767)  #S3
        #pas en "degree x 10"
        lesdata[0, 0, 1] = int(pas * 10)
        #phase BS en frac de lambda
        lesdata[0, 0, 2] = int(self.bs.LitLbda() * 65535)
        # ----- on démarre l'acquisition :
        self.cam0.start_capture()
        for i in range(Npas):
            x = self.l4.VaPos(pas * i)
            sleep(attente)
            self.cam0.start_one_shot()
            frame = self.cam0.dequeue()
            lesdata[i + 1, :, :] = frame.copy()
            Nsat = (lesdata[i + 1, :, :] > 50000).sum()
            print(
                "Frame number ({}) has {:.2f}% ({}pixels) au dessus de 50 milles "
                .format(i + 1, Nsat / Npixel * 100, Nsat))
            if Nsat > 0.05 * Npixel:
                print("!!! Attention {} pixels au dessus de 50 milles ".format(
                    Nsat))
                self.cam0.stop_one_shot()
                self.cam0.stop_capture()
                exit(0)

            frame.enqueue()
            self.cam0.stop_one_shot()
        self.cam0.stop_capture()

        np.save(fich, lesdata)
        #comme on a fait un tour, on peut remettre la L/4 à zero :
        self.l4.VaPos(360)
        self.l4.Zero()
        return 1
# You should have received a copy of the GNU Lesser General Public
# License along with pydc1394.  If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009, 2010 by Holger Rapp <*****@*****.**>
# and the pydc1394 contributors (see README File)


from pydc1394 import DC1394Library, Camera

l = DC1394Library()
cams = l.enumerate_cameras()
cam0_handle = cams[0]

print "Opening camera!"
cam0 = Camera(l, cam0_handle['guid'])

print "Vendor:", cam0.vendor
print "Model:", cam0.model
print "GUID:", cam0.guid
print "Mode:", cam0.mode
print "Framerate: ", cam0.framerate.val

print "Acquiring one frame!"
cam0.start()
i = cam0.shot()
print "Shape:", i.shape
print "Dtype:", i.dtype
cam0.stop()

print "All done!"
예제 #20
0
#--------------------------------
# Les frames d'entrées
from pydc1394 import Camera
# la camera est appellée par son uid
#pour l'avoir:
#from pydc1394.camera2 import Context
#for c in Context().cameras:
#    print(c)
#(2892819673481529, 0) => F146B
#(2892819673575448, 0) => F201B

#from FakeCamera import Camera
F201B = 2892819673575448
guidCam = F201B
cam = Camera(guid=guidCam)
lemodel = cam.model.decode()[-5:]
w0 = 80

bs = BS.BabSoleil()

cam.start_capture()
Npts = 70
step = 0.001
AngleStart = 0.21
data = np.zeros((Npts, 2))
bs.VaLbda(AngleStart)
time.sleep(0.2)

for i in range(Npts):
    #capture une frame et la copy dans CamArray
예제 #21
0
class Vision(object):
    def __init__(self, index, type, guid=0000000000000000, buffersize=10):
        self._id = index
        self._type = type
        self._guid = guid
        self._isUpdating = True
        self._isFilterBypassed = True
        self._isObjectDetectionRunning = False
        self._detectionAlgorithm = ''
        self.filterRouting = [
        ]  # data structure: {"filterName", "args"}, defined in the editor
        # define the agents that you want to detect with objectDetection algorithm
        self.agent1 = Agent()
        self.agent2 = Agent()

        if self.isFireWire():
            self.cam = Camera(guid=self._guid)
            print("====================================================")
            print("CameraId:", self._id)
            print("Vendor:", self.cam.vendor)
            print("Model:", self.cam.model)
            print("GUID:", self.cam.guid)
            print("Mode:", self.cam.mode)
            print("Framerate: ", self.cam.rate)
            print("Available modes", [mode.name for mode in self.cam.modes])
            print("====================================================")
            self.cam.start_capture(bufsize=buffersize)
            self.cam.start_video()
        else:
            self.cap = cv2.VideoCapture(0)
            if not self.cap.isOpened():
                print('Camera is not detected. End program.')
                self.cap.release()
                sys.exit()

        cv2.namedWindow(self.windowName(), 16)  # cv2.GUI_NORMAL = 16
        cv2.moveWindow(self.windowName(), 600, -320 + 340 * self._id)
        cv2.setMouseCallback(self.windowName(), showClickedCoordinate)

    def updateFrame(self):
        if self.isFireWire():
            if self.isUpdating():
                frameOriginal = self.cam.dequeue()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionRunning():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                cv2.imshow(self.windowName(), frameProcessed)
                frameOriginal.enqueue()
        else:
            if self.isUpdating():
                _, frameOriginal = self.cap.read()
                if not self.isFilterBypassed(
                ) and not self.filterRouting == []:
                    frameFiltered = self.processFilters(frameOriginal.copy())
                else:
                    frameFiltered = frameOriginal
                if self.isObjectDetectionRunning():
                    frameProcessed = self.processObjectDetection(
                        frameFiltered, frameOriginal)
                else:
                    frameProcessed = frameFiltered
                cv2.imshow(self.windowName(), frameProcessed)

    #==============================================================================================
    # obtain instance attributes
    #==============================================================================================
    def windowName(self):
        return 'CamID:{} (Click to print coordinate)'.format(self._id)

    def isFireWire(self):
        return self._type.lower() == 'firewire'

    def isUpdating(self):
        return self._isUpdating

    def isFilterBypassed(self):
        return self._isFilterBypassed

    def isObjectDetectionRunning(self):
        return self._isObjectDetectionRunning

    #==============================================================================================
    # set instance attributes
    #==============================================================================================
    def setStateUpdate(self, state):
        self._isUpdating = state

    def setStateFiltersBypassed(self, state):
        self._isFilterBypassed = state

    def setStateObjectDetection(self, state, algorithm):
        self._isObjectDetectionRunning = state
        self._detectionAlgorithm = algorithm

    #==============================================================================================
    # <Filters>
    # Define the filters in filterlib.py
    #==============================================================================================
    def createFilterRouting(self, text):
        self.filterRouting = []
        for line in text:
            line = line.split('//')[0]  # strip after //
            line = line.strip()  # strip spaces at both ends
            match = re.match(r"(?P<function>[a-z0-9_]+)\((?P<args>.*)\)", line)
            if match:
                name = match.group('function')
                args = match.group('args')
                args = re.sub(r'\s+', '', args)  # strip spaces in args
                self.filterRouting.append({'filterName': name, 'args': args})

    def processFilters(self, image):
        for item in self.filterRouting:
            image = getattr(filterlib, item['filterName'],
                            filterlib.filterNotDefined)(image, item['args'])
        # You can add custom filters here if you don't want to use the editor
        return image

    #==============================================================================================
    # <object detection>
    # Object detection algorithm is executed after all the filters
    # It is assumed that "imageFiltered" is used for detection purpose only;
    # the boundary of the detected object will be drawn on "imageOriginal".
    # information of detected objects can be stored in an instance of "Agent" class.
    #==============================================================================================
    def processObjectDetection(self, imageFiltered, imageOriginal):
        # convert to rgb so that coloured lines can be drawn on top
        imageOriginal = cv2.cvtColor(imageOriginal, cv2.COLOR_GRAY2RGB)

        # object detection algorithm starts here
        # In this function, information about the agent will be updated, and the original image with
        # the detected objects highlighted will be returned
        algorithm = getattr(objectDetection, self._detectionAlgorithm,
                            objectDetection.algorithmNotDefined)
        imageProcessed = algorithm(
            imageFiltered, imageOriginal, self.agent1
        )  # pass instances of Agent class if you want to update its info
        return imageProcessed
예제 #22
0
                break
        if frame is None:
            return
        im = frame.copy().T
        frame.enqueue()
        self.img.setImage(
            im, autoRange=False, autoLevels=False, autoHistogramRange=False
        )

    def stop_camera(self):
        self.camera.stop_video()
        self.camera.stop_capture()

    def deinit_camera(self):
        pass


if __name__ == "__main__":
    app = QtGui.QApplication([])
    cam = CameraPlot(Camera())
    try:
        cam.start_camera()
        time.sleep(0.5)
        cam.process_images()
        cam.img.autoRange()
        cam.img.autoLevels()
        QtGui.QApplication.instance().exec_()
    finally:
        cam.stop_camera()
        cam.deinit_camera()