def __init__(self, cam_num=0, rotate=None, crop=None, exposure=None, format='Y800 (720x540)'): ''' Params ------ cam_num = int; camera number (int) default = 0 crop = dict; contains ints named top, left, height, width for cropping default = None, uses default parameters specific to camera ''' self.ic_ic = IC_ImagingControl() self.ic_ic.init_library() self.cam_num = cam_num self.rotate = rotate if rotate is not None else cam_details[str( self.cam_num)]['rotate'] self.crop = crop if crop is not None else cam_details[str( self.cam_num)]['crop'] self.exposure = exposure if exposure is not None else cam_details[str( self.cam_num)]['exposure'] self.cam = self.ic_ic.get_device( self.ic_ic.get_unique_device_names()[self.cam_num]) self.cam.open() self.cam.set_video_format(format) self.add_filters()
def __init__(self, cameraNo, exposure, gain, brightness): super().__init__() self.properties['subarray_vpos'] = 0 self.properties['subarray_hpos'] = 0 self.properties['exposure_time'] = 0.03 self.properties['subarray_vsize'] = 1024 self.properties['subarray_hsize'] = 1280 from pyicic import IC_ImagingControl ic_ic = IC_ImagingControl.IC_ImagingControl() ic_ic.init_library() cam_names = ic_ic.get_unique_device_names() print(cam_names) self.cam = ic_ic.get_device(cam_names[cameraNo]) # print(self.cam.list_property_names()) self.cam.open() self.cam.colorenable = 0 self.cam.gain.auto = False self.cam.exposure.auto = False if cameraNo == 1: self.cam.exposure = exposure # exposure in ms self.cam.gain = gain # gain in dB self.cam.brightness = brightness # brightness in arbitrary units self.properties['subarray_vsize'] = 2048 self.properties['subarray_hsize'] = 2448 self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.start_live(show_display=False) # start imaging # self.cam.enable_trigger(True) # camera will wait for trigger # self.cam.send_trigger() if not self.cam.callback_registered: self.cam.register_frame_ready_callback( ) # needed to wait for frame ready callback
def getFrame(self, t_exp=None, bitdepth=None): t_exp = t_exp or self.t_exp bitdepth = bitdepth or self.bitdepth # open lib ic_ic = IC_ImagingControl() ic_ic.init_library() cam_names = ic_ic.get_unique_device_names() cam = ic_ic.get_device(cam_names[0]) cam.open() # change camera properties cam.gain.auto = False # enable auto gain cam.gain.value = cam.gain.min t_exp_reg = int( np.round(np.log2(t_exp)) ) # convert exposure time into register value (nearest power of 2) if t_exp_reg in range(cam.exposure.min, cam.exposure.max + 1): cam.exposure.value = int(np.round(np.log2(t_exp))) else: cam.exposure.value = int(cam.exposure.max + cam.exposure.min) / 2 print('Exposure out of range. Setting to half of exposure range') cam.formats = cam.list_video_formats() cam.sensor_height = 1080 cam.sensor_width = 1920 cam.set_video_format( b'Y800 (1920x1080)') # use first available video format cam.enable_continuous_mode(True) # image in continuous mode cam.start_live(show_display=False) # start imaging cam.enable_trigger(True) # camera will wait for trigger if not cam.callback_registered: cam.register_frame_ready_callback( ) # needed to wait for frame ready callback cam.reset_frame_ready() # reset frame ready flag # send hardware trigger OR call cam.send_trigger() here cam.send_trigger() # get image data... cam.wait_til_frame_ready(10) # wait for frame ready due to trigger im = cam.get_image_data() img = np.ndarray(buffer=im[0], dtype=np.uint8, shape=(cam.sensor_height, cam.sensor_width, 3)) cam.stop_live() cam.close() ic_ic.close_library()
def __init__(self, cameraNo): super().__init__() ic_ic = IC_ImagingControl.IC_ImagingControl() ic_ic.init_library() cam_names = ic_ic.get_unique_device_names() self.model = cam_names[cameraNo] self.cam = ic_ic.get_device(cam_names[cameraNo]) self.cam.open() self.shape = (0, 0) self.cam.colorenable = 0 self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.enable_trigger(False) # camera will wait for trigger self.roi_filter = self.cam.create_frame_filter('ROI'.encode('utf-8')) self.cam.add_frame_filter_to_device(self.roi_filter)
import time import numpy as np from pyicic.IC_ImagingControl import * from pyicic.IC_Camera import C_FRAME_READY_CALLBACK ic = IC_ImagingControl() ic.init_library() cam_names = ic.get_unique_device_names() print cam_names device_name = cam_names[0] print device_name cam = ic.get_device(device_name) cam.open() cam.reset_properties() formats = cam.list_video_formats() print formats cam.set_video_format(formats[2]) print 'get_available_frame_filter_count:', cam.get_available_frame_filter_count() print 'get_available_frame_filters:', cam.get_available_frame_filters(cam.get_available_frame_filter_count()) h = cam.create_frame_filter('Rotate Flip') h2 = cam.create_frame_filter('ROI') cam.add_frame_filter_to_device(h) cam.add_frame_filter_to_device(h2)
class DAQ_2DViewer_TIS(DAQ_Viewer_base): """ =============== ================== **Attributes** **Type** *params* dictionnary list *x_axis* 1D numpy array *y_axis* 1D numpy array =============== ================== See Also -------- utility_classes.DAQ_Viewer_base """ from pyicic import IC_ImagingControl ic = IC_ImagingControl.IC_ImagingControl() ic.init_library() cameras = [cam.decode() for cam in ic.get_unique_device_names()] params = comon_parameters + \ [{'title': 'Cam. names:', 'name': 'cam_name', 'type': 'list', 'limits': cameras}, {'title': 'Video Formats:', 'name': 'video_formats', 'type': 'list'}, {'title': 'Gray scale:', 'name': 'gray_scale', 'type': 'bool', 'value': False}, {'title': 'Cam. Prop.:', 'name': 'cam_settings', 'type': 'group', 'children': [ {'title': 'Brightness:', 'name': 'brightness', 'type': 'int'}, {'title': 'Contrast:', 'name': 'contrast', 'type': 'int'}, {'title': 'Hue:', 'name': 'hue', 'type': 'int'}, {'title': 'Saturation:', 'name': 'saturation', 'type': 'int'}, {'title': 'Sharpness:', 'name': 'sharpness', 'type': 'int'}, {'title': 'Gamma:', 'name': 'gamma', 'type': 'int'}, {'title': 'Color?:', 'name': 'colorenable', 'type': 'bool'}, {'title': 'Whitebalance:', 'name': 'whitebalance', 'type': 'int'}, {'title': 'Black light compensation:', 'name': 'blacklightcompensation', 'type': 'int'}, {'title': 'Gain:', 'name': 'gain', 'type': 'int'}, {'title': 'Pan:', 'name': 'pan', 'type': 'int'}, {'title': 'Tilt:', 'name': 'tilt', 'type': 'int'}, {'title': 'Roll:', 'name': 'roll', 'type': 'int'}, {'title': 'Zoom:', 'name': 'zoom', 'type': 'int'}, {'title': 'Exposure:', 'name': 'exposure', 'type': 'int'}, {'title': 'Iris:', 'name': 'iris', 'type': 'int'}, {'title': 'Focus:', 'name': 'focus', 'type': 'int'}, ]}, ] def __init__( self, parent=None, params_state=None ): # init_params is a list of tuple where each tuple contains info on a 1D channel (Ntps,amplitude, width, position and noise) super(DAQ_2DViewer_TIS, self).__init__(parent, params_state) self.x_axis = None self.y_axis = None self.live = False from pyicic import IC_Structures GrabberHandlePtr = ctypes.POINTER(IC_Structures.GrabberHandle) # c function type for frame callback # outside of class so it can be called by unbound function callback = ctypes.WINFUNCTYPE(None, GrabberHandlePtr, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_ulong, ctypes.c_void_p) self.__data_ready = callback(self._data_ready) def _data_ready(self, handle_ptr, p_data, frame_num, data): dat = self.controller.get_image_data() data = np.array(dat[0][:], dtype=np.uint8) data = data.reshape((dat[2], dat[1], 3)) self.data_grabed_signal.emit([ DataFromPlugins(name='TIS ', data=[data[:, :, 0], data[:, :, 1], data[:, :, 2]], dim='Data2D'), ]) def commit_settings(self, param): """ Activate parameters changes on the hardware. =============== ================================ =========================== **Parameters** **Type** **Description** *param* instance of pyqtgraph Parameter the parameter to activate =============== ================================ =========================== See Also -------- set_Mock_data """ try: if param.parent().name() == 'cam_settings': getattr(self.controller, param.name()).value = param.value() param.setValue(getattr(self.controller, param.name()).value) elif param.name() == 'video_formats': self.controller.stop_live() self.controller.set_video_format(param.value().encode()) # if 'Y' in param.value(): # self.controller.set_format(0) # else: # self.controller.set_format(1) self.controller.start_live() except Exception as e: self.emit_status( ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) def ini_detector(self, controller=None): """ Initialisation procedure of the detector initializing the status dictionnary. See Also -------- daq_utils.ThreadCommand, get_xaxis, get_yaxis """ self.status.update( edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None)) try: if self.settings.child(('controller_status')).value() == "Slave": if controller is None: raise Exception( 'no controller has been defined externally while this detector is a slave one' ) else: self.controller = controller else: self.controller = self.ic.get_device( self.settings.child(('cam_name')).value().encode()) self.controller.open() properties = self.controller.list_property_names() for prop in properties: if prop in [ child.name() for child in self.settings.child(( 'cam_settings')).children() ]: if getattr(self.controller, prop).available: param = self.settings.child('cam_settings', prop) if param.opts['type'] == 'int' or param.opts[ 'type'] == 'float': range = getattr(self.controller, prop).range param.setOpts(limits=range) try: getattr(self.controller, prop).auto = False except: pass value = getattr(self.controller, prop).value param.setValue(value) else: self.settings.child('cam_settings', prop).hide() formats = [ form.decode() for form in self.controller.list_video_formats() ] # if 'RGB'.encode() in form] self.settings.child(('video_formats')).setOpts(limits=formats) self.settings.child(('video_formats')).setValue(formats[8]) self.controller.set_video_format( formats[8].encode()) # use first available video format self.controller.enable_continuous_mode( True) # image in continuous mode self.controller.start_live(show_display=False) # start imaging self.controller.enable_trigger( True) # camera will wait for trigger if not self.controller.callback_registered: self.controller.register_frame_ready_callback( self.__data_ready ) # needed to wait for frame ready callback self.controller.send_trigger() self.x_axis = self.get_xaxis() self.y_axis = self.get_yaxis() # initialize viewers with the future type of data self.data_grabed_signal_temp.emit([ DataFromPlugins( name='TIS', data=[np.zeros((len(self.y_axis), len(self.x_axis)))], dim='Data2D'), ]) self.status.x_axis = self.x_axis self.status.y_axis = self.y_axis self.status.initialized = True self.status.controller = self.controller return self.status except Exception as e: self.emit_status( ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) self.status.info = getLineInfo() + str(e) self.status.initialized = False return self.status def close(self): """ not implemented. """ self.controller.stop_live() self.controller.close() self.ic.close_library() def get_xaxis(self): """ Get the current x_axis from the Mock data setting. Returns ------- 1D numpy array the current x_axis. See Also -------- set_Mock_data """ Nx = self.controller.get_video_format_width() self.x_axis = np.linspace(0, Nx - 1, Nx, dtype=np.int32) return self.x_axis def get_yaxis(self): """ Get the current y_axis from the Mock data setting. Returns ------- 1D numpy array the current y_axis. See Also -------- set_Mock_data """ Ny = self.controller.get_video_format_height() self.y_axis = np.linspace(0, Ny - 1, Ny, dtype=np.int32) return self.y_axis def grab_data(self, Naverage=1, **kwargs): """ | For each integer step of naverage range set mock data. | Construct the data matrix and send the data_grabed_signal once done. =============== ======== =============================================== **Parameters** **Type** **Description** *Naverage* int The number of images to average. specify the threshold of the mean calculation =============== ======== =============================================== See Also -------- set_Mock_data """ # self.controller.reset_frame_ready() if not self.controller.is_live(): self.controller.enable_continuous_mode( True) # image in continuous mode self.controller.start_live(show_display=False) # start imaging self.controller.enable_trigger(True) self.controller.send_trigger() # self.controller.wait_til_frame_ready(1000) # self.controller.snap_image() def stop(self): """ not implemented. """ self.controller.stop_live() return ""
def __init__(self, device_string, fom_num): """ Initialize the Imaging Source camera. Set all of the parameters for the chosen Imaging Source camera so that it is ready to take an image. Parameters ---------- device_string : str A string naming which daq device to use. fom_num : int An int denoting which figure of merit this daq device will be using. """ super().__init__(device_string, fom_num) # call the daq_device __init__ function self.ic_ic = IC_ImagingControl( ) # initialize the imaging control grabber self.ic_ic.init_library( ) # Use the grabber to initialze the library of IC functions we can use # Determine the Imaging Source cameras connected to the computer cam_names = self.ic_ic.get_unique_device_names() if (len(cam_names) == 0): # no IC camera is connected print("Error: No IC cameras connected to the computer.") exit() print("\nThese are the available cameras:") print(cam_names) print( "Please select an IC camera to use by inputting the index of the camera." ) print("The indices go from 0 to ", len(cam_names) - 1) # Iterate through an infinite loop until the user defines which camera they want to use while True: index = int(input()) if ((index <= len(cam_names) - 1) and (index >= 0)): self.cam = self.ic_ic.get_device(cam_names[index]) break else: print("You didn't enter a correct index.") self.cam.open() # open the camera they chose # Go through an infinite loop with user to decide to initialize with the .ini file or by setting all of the values print( "\nWould you like to set all of the camera initialization values yourself, or use the IC properties.ini file?" ) print( 'Enter either "set" for setting all of the values or "ini" for the .ini file' ) while True: init = input() if (init == "set"): set_all = True break elif (init == "ini"): set_all = False break else: print("You didn't enter 'set' or 'ini'. Try again.") # reset all properties before setting them self.cam.reset_properties() # Go through each property available and set its value cam_properties = self.cam.list_property_names() print( "Note: this only goes through the camera properties available for this specific camera." ) for attribute_index in range( len(cam_properties )): # for loop through each of the camera properties if (getattr(self.cam, cam_properties[attribute_index]).available == True): # if the attribute can be set if (set_all == True): # if the user wants to set everything print("You are setting the", cam_properties[attribute_index]) print( "Its current value is ", getattr(self.cam, cam_properties[attribute_index]).value) print( "The range of values you can set this to is ", getattr(self.cam, cam_properties[attribute_index]).range) print("What would you like to set this property to?") while True: change_value = input() print("You entered", change_value, "\nIs this okay? (enter 'y' or 'n')") input_is_good = input() if (input_is_good == 'y'): break elif (input_is_good == 'n'): print( "Type in what you'd like to change this property to instead" ) else: print( "You didn't enter a y or an n. Enter what value you'd like to change the property to again." ) else: # if the user is using the .ini file if (self.initialize_array[attribute_index] == "auto" ): # if the .ini file has "auto" for this property if (getattr( self.cam, cam_properties[attribute_index]).auto_available == True): getattr( self.cam, cam_properties[attribute_index]).auto = True print("Set the camera", cam_properties[attribute_index], "to auto") else: print("Auto setting unavailable for", cam_properties[attribute_index]) print("Did not set", cam_properties[attribute_index]) elif (self.initialize_array[attribute_index] == "none" ): # if the .ini file has "none" for this property print("Did not set", cam_properties[attribute_index]) else: # if the .ini file has a value for its setting if (type( getattr(self.cam, cam_properties[attribute_index]).value) == int): getattr( self.cam, cam_properties[attribute_index]).value = int( self.initialize_array[attribute_index]) if (type( getattr(self.cam, cam_properties[attribute_index]).value) == float): getattr( self.cam, cam_properties[attribute_index]).value = float( self.initialize_array[attribute_index]) print( "Set the camera", cam_properties[attribute_index], "to", getattr(self.cam, cam_properties[attribute_index]).value, "within the range", getattr(self.cam, cam_properties[attribute_index]).range) # the last property in the .ini file is whether the person wants the trigger self.software_trigger = ( self.initialize_array[len(cam_properties)] ) == "True" # set the software trigger to True or False # Determine the video format the user would like to use formats = self.cam.list_video_formats() print("\nThese are the available video formats:") print(formats) print( "Please select video format to use by inputting the index of the format." ) print("The indices go from 0 to ", len(formats) - 1) # Iterate through an infinite loop until the user defines which video format they want to use while True: self.video_index = int(input()) if ((self.video_index <= len(formats) - 1) and (self.video_index >= 0)): self.cam.set_video_format(formats[self.video_index]) break else: print("You didn't enter a correct index.") current_video_format = self.cam.get_video_format( self.video_index) # set the video format # if the video format stores the pixels left to right and top to bottom, set flip_image to True if any(string in str(current_video_format) for string in PIXEL_FORMAT_TOP_DOWN): self.flip_image = True else: self.flip_image = False self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.start_live(show_display=False) # start imaging self.cam.enable_trigger( self.software_trigger) # camera will wait for trigger if not self.cam.callback_registered: self.cam.register_frame_ready_callback( ) # needed to wait for frame ready callback # determine the image dimensions self.width, self.height, depth, color_format = self.cam.get_image_description( ) self.depth = depth // 8 # I have no idea why the open source pyicic library does this # take an image because for some cameras, the first image doesn't work correctly and then the rest work self.__acquire()
class IC(daq_device): """ This class sets up data acquisition for (theoretically) all cameras from the imaging source. This sets all of the camera properties relevant to a given camera. It utilizes the py-ic-ic wrapper downloaded from github for the IC C library given on the imaging source website. It seems to mostly work, but it takes a few tries to figure out how to use this program with each model of camera. Test out different parameters in the .ini file until you get a good image from the camera. Attributes ---------- ic_ic : idk what type IC library grabber. cam : idk what type Camera being used. software_trigger : bool A boolean value which determines whether to use a software trigger or not. video_index : int An index which determines which video format the camera will be using. width : int The horizontal number of pixels used in the image being captured. height : int The vertical number of pixels used in the image being captured. depth : int The number of color channels used in the image being captured. Methods ------- figure_of_merit Measure the figure of merit of the system. shut_down Shut down the Imaging Source camera. """ def __init__(self, device_string, fom_num): """ Initialize the Imaging Source camera. Set all of the parameters for the chosen Imaging Source camera so that it is ready to take an image. Parameters ---------- device_string : str A string naming which daq device to use. fom_num : int An int denoting which figure of merit this daq device will be using. """ super().__init__(device_string, fom_num) # call the daq_device __init__ function self.ic_ic = IC_ImagingControl( ) # initialize the imaging control grabber self.ic_ic.init_library( ) # Use the grabber to initialze the library of IC functions we can use # Determine the Imaging Source cameras connected to the computer cam_names = self.ic_ic.get_unique_device_names() if (len(cam_names) == 0): # no IC camera is connected print("Error: No IC cameras connected to the computer.") exit() print("\nThese are the available cameras:") print(cam_names) print( "Please select an IC camera to use by inputting the index of the camera." ) print("The indices go from 0 to ", len(cam_names) - 1) # Iterate through an infinite loop until the user defines which camera they want to use while True: index = int(input()) if ((index <= len(cam_names) - 1) and (index >= 0)): self.cam = self.ic_ic.get_device(cam_names[index]) break else: print("You didn't enter a correct index.") self.cam.open() # open the camera they chose # Go through an infinite loop with user to decide to initialize with the .ini file or by setting all of the values print( "\nWould you like to set all of the camera initialization values yourself, or use the IC properties.ini file?" ) print( 'Enter either "set" for setting all of the values or "ini" for the .ini file' ) while True: init = input() if (init == "set"): set_all = True break elif (init == "ini"): set_all = False break else: print("You didn't enter 'set' or 'ini'. Try again.") # reset all properties before setting them self.cam.reset_properties() # Go through each property available and set its value cam_properties = self.cam.list_property_names() print( "Note: this only goes through the camera properties available for this specific camera." ) for attribute_index in range( len(cam_properties )): # for loop through each of the camera properties if (getattr(self.cam, cam_properties[attribute_index]).available == True): # if the attribute can be set if (set_all == True): # if the user wants to set everything print("You are setting the", cam_properties[attribute_index]) print( "Its current value is ", getattr(self.cam, cam_properties[attribute_index]).value) print( "The range of values you can set this to is ", getattr(self.cam, cam_properties[attribute_index]).range) print("What would you like to set this property to?") while True: change_value = input() print("You entered", change_value, "\nIs this okay? (enter 'y' or 'n')") input_is_good = input() if (input_is_good == 'y'): break elif (input_is_good == 'n'): print( "Type in what you'd like to change this property to instead" ) else: print( "You didn't enter a y or an n. Enter what value you'd like to change the property to again." ) else: # if the user is using the .ini file if (self.initialize_array[attribute_index] == "auto" ): # if the .ini file has "auto" for this property if (getattr( self.cam, cam_properties[attribute_index]).auto_available == True): getattr( self.cam, cam_properties[attribute_index]).auto = True print("Set the camera", cam_properties[attribute_index], "to auto") else: print("Auto setting unavailable for", cam_properties[attribute_index]) print("Did not set", cam_properties[attribute_index]) elif (self.initialize_array[attribute_index] == "none" ): # if the .ini file has "none" for this property print("Did not set", cam_properties[attribute_index]) else: # if the .ini file has a value for its setting if (type( getattr(self.cam, cam_properties[attribute_index]).value) == int): getattr( self.cam, cam_properties[attribute_index]).value = int( self.initialize_array[attribute_index]) if (type( getattr(self.cam, cam_properties[attribute_index]).value) == float): getattr( self.cam, cam_properties[attribute_index]).value = float( self.initialize_array[attribute_index]) print( "Set the camera", cam_properties[attribute_index], "to", getattr(self.cam, cam_properties[attribute_index]).value, "within the range", getattr(self.cam, cam_properties[attribute_index]).range) # the last property in the .ini file is whether the person wants the trigger self.software_trigger = ( self.initialize_array[len(cam_properties)] ) == "True" # set the software trigger to True or False # Determine the video format the user would like to use formats = self.cam.list_video_formats() print("\nThese are the available video formats:") print(formats) print( "Please select video format to use by inputting the index of the format." ) print("The indices go from 0 to ", len(formats) - 1) # Iterate through an infinite loop until the user defines which video format they want to use while True: self.video_index = int(input()) if ((self.video_index <= len(formats) - 1) and (self.video_index >= 0)): self.cam.set_video_format(formats[self.video_index]) break else: print("You didn't enter a correct index.") current_video_format = self.cam.get_video_format( self.video_index) # set the video format # if the video format stores the pixels left to right and top to bottom, set flip_image to True if any(string in str(current_video_format) for string in PIXEL_FORMAT_TOP_DOWN): self.flip_image = True else: self.flip_image = False self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.start_live(show_display=False) # start imaging self.cam.enable_trigger( self.software_trigger) # camera will wait for trigger if not self.cam.callback_registered: self.cam.register_frame_ready_callback( ) # needed to wait for frame ready callback # determine the image dimensions self.width, self.height, depth, color_format = self.cam.get_image_description( ) self.depth = depth // 8 # I have no idea why the open source pyicic library does this # take an image because for some cameras, the first image doesn't work correctly and then the rest work self.__acquire() def figure_of_merit(self): """ Determine the figure of merit using the Imaging Source camera. Capture an image using the imaging source camera and then calculate the figure of merit. """ self.__acquire() # acquire an image from the camera return figure_of_merit_f.ic_FOM(self.frameout, self.fom_num) def __acquire(self): """ Capture an image. Capture an image using the given IC camera and then save the image as frameout. """ self.cam.reset_frame_ready( ) # reset the frame ready flag to False so that we can wait for the frame to be ready if (self.software_trigger): self.cam.send_trigger( ) # send a software trigger if it was specified to do so self.cam.wait_til_frame_ready( 1000) # wait for frame ready due to trigger data, width, height, depth = self.cam.get_image_data( ) # Get the image from the camera frame = np.ndarray( buffer=data, dtype=np.uint8, shape=(self.height, self.width, self.depth)) # turn this buffer into a numpy array frameout = copy.deepcopy( frame ) # deep copy this data so that if anything overwrites the camera memory, we still have the image if self.flip_image: # if the pixels are written top to bottom, flip the image upside down frameout = np.flipud(frameout) del frame # take care of memory allocation self.frameout = frameout # save the image def shut_down(self): """ Shut down the camera and close the IC grabber. """ self.cam.stop_live() # stop capturing video from the camera self.cam.close() # shut down the camera self.ic_ic.close_library() # stop accessing the IC dll
class ICCam(object): def __init__(self, cam_num=0, rotate=None, crop=None, exposure=None, format='Y800 (720x540)'): ''' Params ------ cam_num = int; camera number (int) default = 0 crop = dict; contains ints named top, left, height, width for cropping default = None, uses default parameters specific to camera ''' self.ic_ic = IC_ImagingControl() self.ic_ic.init_library() self.cam_num = cam_num self.rotate = rotate if rotate is not None else cam_details[str( self.cam_num)]['rotate'] self.crop = crop if crop is not None else cam_details[str( self.cam_num)]['crop'] self.exposure = exposure if exposure is not None else cam_details[str( self.cam_num)]['exposure'] self.cam = self.ic_ic.get_device( self.ic_ic.get_unique_device_names()[self.cam_num]) self.cam.open() self.cam.set_video_format(format) self.add_filters() def add_filters(self): if self.rotate != 0: h_r = self.cam.create_frame_filter('Rotate Flip') self.cam.add_frame_filter_to_device(h_r) self.cam.frame_filter_set_parameter(h_r, 'Rotation Angle', self.rotate) h_c = self.cam.create_frame_filter('ROI') self.cam.add_frame_filter_to_device(h_c) self.cam.frame_filter_set_parameter(h_c, 'Top', self.crop['top']) self.cam.frame_filter_set_parameter(h_c, 'Left', self.crop['left']) self.cam.frame_filter_set_parameter(h_c, 'Height', self.crop['height']) self.cam.frame_filter_set_parameter(h_c, 'Width', self.crop['width']) self.size = (self.crop['width'], self.crop['height']) self.cam.gain.auto = False self.cam.exposure.auto = False self.cam.exposure.value = self.exposure def set_exposure(self, val): try: val = int(round(val)) val = val if val < self.cam.exposure.max - 1 else self.cam.exposure.max - 1 val = val if val > self.cam.exposure.min else self.cam.exposure.min self.cam.exposure.value = val except: pass def get_exposure(self): return self.cam.exposure.value def get_image(self): data, width, height, depth = self.cam.get_image_data() frame = np.ndarray(buffer=data, dtype=np.uint8, shape=(height, width, depth)) return cv2.flip(frame, 0) def get_image_dimensions(self): _, width, height, _ = self.cam.get_image_data() return (width, height) def start(self, show_display=True): self.cam.enable_continuous_mode(True) self.cam.start_live(show_display=show_display) def close(self): self.cam.stop_live() self.cam.close()
def _previewCam(self, t_exp, bitdepth, displaythresh): t_exp = t_exp or self.t_exp bitdepth = bitdepth or self.bitdepth # open lib ic_ic = IC_ImagingControl() ic_ic.init_library() cam_names = ic_ic.get_unique_device_names() cam = ic_ic.get_device(cam_names[0]) cam.open() # change camera properties cam.gain.auto = False # enable auto gain cam.gain.value = cam.gain.min t_exp_reg = int( np.round(np.log2(t_exp)) ) # convert exposure time into register value (nearest power of 2) if t_exp_reg in range(cam.exposure.min, cam.exposure.max + 1): cam.exposure.value = int(np.round(np.log2(t_exp))) else: cam.exposure.value = int(cam.exposure.max + cam.exposure.min) / 2 print('Exposure out of range. Setting to half of exposure range') cam.formats = cam.list_video_formats() cam.sensor_height = 1080 cam.sensor_width = 1920 cam.set_video_format( b'Y800 (1920x1080)') # use first available video format cam.enable_continuous_mode(True) # image in continuous mode cam.start_live(show_display=False) # start imaging cam.enable_trigger(True) # camera will wait for trigger if not cam.callback_registered: cam.register_frame_ready_callback( ) # needed to wait for frame ready callback window_width = 800 resize_scale = window_width / cam.sensor_width rescaled_size = (window_width, int(cam.sensor_height * resize_scale)) cv2.namedWindow('Camera Preview', flags=cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('Camera Preview', rescaled_size[0], rescaled_size[1]) class MouseControl: def __init__(self): self.xy = [0, 0] self.im_val = 0 self.stopflag = 0 self.markers = 0 def process_events(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: self.stopflag = not (self.stopflag) elif event == cv2.EVENT_MOUSEMOVE: x_val = int(x) y_val = int(y) self.xy = [x_val, y_val] elif event == cv2.EVENT_RBUTTONDOWN: self.markers = (self.markers + 1) % 3 #instantiate class mouse = MouseControl() cv2.setMouseCallback('Camera Preview', mouse.process_events) while (cv2.getWindowProperty('Camera Preview', 0) >= 0): try: if not (mouse.stopflag): t = time.time() cam.reset_frame_ready() # reset frame ready flag # send hardware trigger OR call cam.send_trigger() here cam.send_trigger() # get image data... cam.wait_til_frame_ready( 10) # wait for frame ready due to trigger im = cam.get_image_data() img = np.ndarray(buffer=im[0], dtype=np.uint8, shape=(cam.sensor_height, cam.sensor_width, 3)) cv2.imshow('Camera Preview', img) k = cv2.waitKey(1) if k == 0x1b: cv2.destroyAllWindows() break fps = 1 / (time.time() - t) else: cv2.imshow('Camera Preview', img) k = cv2.waitKey(1) if k == 0x1b: cv2.destroyAllWindows() break except: print('There was an error') cam.stop_live() cam.close() ic_ic.close_library() raise cam.stop_live() cam.close() ic_ic.close_library()
def ic(): ic_ic = IC_ImagingControl() ic_ic.init_library() # open first available camera device cam_names = ic_ic.get_unique_device_names() # print(cam_names) cam = ic_ic.get_device(cam_names[0]) cam.open() cam.reset_properties() # change camera properties # print(cam.list_property_names()) # ['gain', 'exposure', 'hue', etc...] cam.gain.auto = False # enable auto gain cam.exposure.value = -5 # change camera settings formats = cam.list_video_formats() # print formats cam.set_video_format(formats[0]) # use first available video format cam.enable_continuous_mode(True) # image in continuous mode cam.start_live(show_display=False) # start imaging cam.enable_trigger(True) # camera will wait for trigger if not cam.callback_registered: cam.register_frame_ready_callback( ) # needed to wait for frame ready callback cam.reset_frame_ready() cam.send_trigger() cam.wait_til_frame_ready(1000) # wait for frame ready due to trigger data, width, height, depth = cam.get_image_data() frame = np.ndarray(buffer=data, dtype=np.uint8, shape=(height, width, depth)) frameout = copy.deepcopy(frame).astype(float) del frame # print(frameout.max()) cam.stop_live() cam.close() ic_ic.close_library() imgray = rgb2gray(frameout) # convert rgb image into grayscale satu = imgray[imgray > 254].shape[0] if satu > 0: print('Image saturated with %d pixels' % satu) return 0 else: # FOM1 # I = abs(imgray)**2 # x = np.arange(imgray.shape[1]).astype(float) # y = np.arange(imgray.shape[0]).astype(float) # mu0 = np.trapz(np.trapz(I, x ),y) # mean_x = np.trapz(np.trapz(I * x, x), y)/mu0 # mean_y = np.trapz(np.trapz(I, x)*y, y)/mu0 # r0 = 50 # X, Y= np.meshgrid(x,y) # r = (Y - mean_y)**2 + (X - mean_x)**2 # fom = (1-np.sum(imgray[r>=r0**2]) / np.sum(imgray) ) * np.sum(imgray[r<r0**2]) # y_peak, x_peak = np.unravel_index(imgray.argmax(), imgray.shape) # find the target position for FOM calculation, here the maximum point is the target position #FOM2 (Image Moment) # x_peak = 520 # y_peak = 554 # xx = np.arange(imgray.shape[1]).astype(float) # yy = np.arange(imgray.shape[0]).astype(float) # X, Y= np.meshgrid(xx,yy) # d1 = (Y - y_peak)**2 # d2 = (X - x_peak)**2 # d = (d1+d2)**4 # d[y_peak,x_peak]=1 # fom = imgray / d # fom[y_peak,x_peak]=0 # fom = np.sum(fom) #FOM3 # fom = np.sum(imgray**2); #FOM4 fom = np.sum(imgray) print(frameout.max(), fom) return fom
def ic(): ic_ic = IC_ImagingControl() ic_ic.init_library() cam_names = ic_ic.get_unique_device_names() print("These are the available cameras:") print(cam_names) print( "Please select an IC camera to use by inputting the index of the camera." ) print("The indices go from 0 to ", len(cam_names) - 1) while True: index = int(input()) if ((index <= len(cam_names) - 1) and (index >= 0)): cam = ic_ic.get_device(cam_names[index]) break else: print("You didn't enter a correct index.") cam.open() cam.reset_properties() # change camera properties print(cam.list_property_names()) # ['gain', 'exposure', 'hue', etc...] cam.gain.auto = False # enable auto gain cam.exposure.value = -5 # change camera settings formats = cam.list_video_formats() # print formats print(formats) cam.set_video_format(formats[0]) # use first available video format cam.enable_continuous_mode(True) # image in continuous mode cam.start_live(show_display=False) # start imaging cam.enable_trigger(True) # camera will wait for trigger if not cam.callback_registered: cam.register_frame_ready_callback( ) # needed to wait for frame ready callback cam.reset_frame_ready() cam.send_trigger() cam.wait_til_frame_ready(1000) # wait for frame ready due to trigger data, width, height, depth = cam.get_image_data() frame = np.ndarray(buffer=data, dtype=np.uint8, shape=(height, width, depth)) frameout = copy.deepcopy(frame).astype(float) plt.imsave('figure_of_merit.png', frameout, cmap=cm.gray) del frame # print(frameout.max()) cam.stop_live() # stop capturing video from the camera cam.close() # shut down the camera ic_ic.close_library() # stop accessing the IC dll return frameout
class data_acqusition(object): """This object is used to initialize, acquire data from, and shut down various different hardware for data acquisition """ def __init__(self, device, fom_num): self.device = device # save which acquisition device is being used initialize_array = file_f.read_initialization_variables( "\\" + self.device + "\\" + self.device + " properties.ini" ) # read in the initialization information from the file at device/device properites.ini self.fom_num = fom_num # initialize the specific device being called if (device == DAQ_DEVICES[0]): # if the device name is "Andor" self.__initialize_andor(initialize_array) elif (device == DAQ_DEVICES[1]): # if the device name is "NI_DAQ" self.__initialize_NI_DAQ(initialize_array) elif (device == DAQ_DEVICES[2]): # if the device name is "IC" self.__initialize_IC(initialize_array) else: print( "Error: The device you entered into data acquisition wasn't valid" ) print("The possible devices are: ", DAQ_DEVICES) exit() def __check_success(self, error_value, function_name): """Check whether or not the program was able to perform the given function for the Andor camera Parameters ---------- error_value : error value, int This is the error value returned from any Andor function function_name : function name, string This is a string which denotes which function returned this error value """ if (error_value != DRV_SUCCESS): # if the error value wasn't success print("Andor", function_name, "Error", error_value) exit() def __initialize_andor(self, initialize_array): """Initialize the Andor camera to be ready to capture images Parameters ---------- initialize_array : initialization array, numpy array This contains information from the "Andor/Andor properties.ini" file about what Andor properties to use for image capture """ # get the information from the .ini file read_mode_top = int( initialize_array[0] ) # readout mode options: 0 Full Vertical binning; 1 Multi-Track; 2 Random-Track; 3 Single-Track; 4 Image; acquisition_mode_top = int( initialize_array[1] ) # acquisition mode options: 1 Single scan; 2 Accumulate; 3 Kinetics; 4 Fast Kinetics; 5 Run till abort; exposure_time_top = float(initialize_array[2]) # time in seconds trigger_mode_top = int( initialize_array[3] ) # trigger mode options: 0 internal; 1 external; 6 external start; 7 external exposure (bulb); 9 external FVB EM; 10 software trigger; 12 external charge shifting; horizontal_binning_top = int( initialize_array[4]) # set the horizontal binning vertical_binning_top = int( initialize_array[5]) # set the vertical binning horizontal_start_top = int( initialize_array[6] ) # set the horizontal start pixel of the subregion of the camera which to take a picture from horizontal_end_top = int( initialize_array[7]) # set the horizontal end pixel vertical_start_top = int( initialize_array[8]) # set the vertical start pixel vertical_end_top = int( initialize_array[9]) # set the vertical end pixel # Load the atmcd64.dll file directory_path = os.path.dirname( os.path.abspath(__file__)) # get the current directory's path self.andor_dll = ctypes.windll.LoadLibrary( directory_path + '\\Andor\\atmcd32d.dll' ) # load the andor dll from the directory Andor/ # Initialize camera aBuffer = ctypes.c_char_p( ) # The buffer tells the initialize function where the driver files are. Currently, they're in the same folder as this .py file error_value = self.andor_dll.Initialize(aBuffer) self.__check_success(error_value, "Initialize") # Determine size (in pixels of camera) gblXPixels = ctypes.c_int() # Total number of horizontal pixels gblYPixels = ctypes.c_int() # Total number of vertical pixels error_value = self.andor_dll.GetDetector(ctypes.byref(gblXPixels), ctypes.byref(gblYPixels)) self.__check_success(error_value, "GetDetector") # Set vertical shift speed to recommended value vertical_shift_index = ctypes.c_int( ) # the index to access specific vertical shift speeds vertical_speed = ctypes.c_float( ) # speed of the vertical speed shift in microseconds per pixel shift error_value = self.andor_dll.GetFastestRecommendedVSSpeed( ctypes.byref(vertical_shift_index), ctypes.byref(vertical_speed)) self.__check_success(error_value, "Get Fastest Recommended Vertical Shift Speed") error_value = self.andor_dll.SetVSSpeed(vertical_shift_index) self.__check_success(error_value, "Set Vertical Shift Speed") # Set horizontal shift speed to the maximum horizontal_shift_index = ctypes.c_int( 0) # the index to access specific horizontal shift speeds AD_converter_index = ctypes.c_int( ) # the specific index to access a given A-D converter number_AD = ctypes.c_int( 0) # the number of A-D converters in the camera number_speeds = ctypes.c_int() # number of speeds available horizontal_speed = ctypes.c_float() # horizontal shift speed max_horizontal_speed = ctypes.c_float(0) # maximum horizontal speed error_value = self.andor_dll.GetNumberADChannels( ctypes.byref(number_AD)) self.__check_success(error_value, "Get Number AD Channels") for each_AD in range(number_AD.value): error_value = self.andor_dll.GetNumberHSSpeeds( AD_converter_index, ctypes.c_int(0), ctypes.byref(number_speeds)) self.__check_success(error_value, "Get Number Horizontal Shift Speeds") for each_speed_index in range(number_speeds.value): error_value = self.andor_dll.GetHSSpeed( ctypes.c_int(each_AD), ctypes.c_int(0), ctypes.c_int(each_speed_index), ctypes.byref(horizontal_speed)) self.__check_success(error_value, "Get Horizontal Shift Speed") if (horizontal_speed.value > max_horizontal_speed.value): max_horizontal_speed.value = horizontal_speed.value horizontal_shift_index = ctypes.c_int(each_speed_index) AD_converter_index = ctypes.c_int(each_AD) error_value = self.andor_dll.SetADChannel(AD_converter_index) self.__check_success(error_value, "Set AD Channel") error_value = self.andor_dll.SetHSSpeed(ctypes.c_int(0), horizontal_shift_index) self.__check_success(error_value, "Set Horizontal Speed Index") # Turn the camera cooler on error_value = self.andor_dll.CoolerON() self.__check_success(error_value, "Turn Cooler On") # Check to make sure cooler is on cooler_on = ctypes.c_int() error_value = self.andor_dll.IsCoolerOn(ctypes.byref(cooler_on)) self.__check_success(error_value, "Check if cooler is on") if (cooler_on.value != 1): print("Error: Cooler not on", "Exiting...") exit() # Set the readout mode of the camera read_mode = ctypes.c_int(read_mode_top) error_value = self.andor_dll.SetReadMode(read_mode) self.__check_success(error_value, "Set Read Mode") # Set the acquisition mode acquisition_mode = ctypes.c_int(acquisition_mode_top) error_value = self.andor_dll.SetAcquisitionMode(acquisition_mode) self.__check_success(error_value, "Set Acquisition Mode") # Set exposure time exposure_time = ctypes.c_float(exposure_time_top) # time in seconds error_value = self.andor_dll.SetExposureTime(exposure_time) self.__check_success(error_value, "Set Exposure Time") # Set trigger mode trigger_mode = ctypes.c_int(trigger_mode_top) error_value = self.andor_dll.SetTriggerMode(trigger_mode) self.__check_success(error_value, "Set Trigger Mode") # TODO Set up accumulation and kinetic capture & probs not video """ // only needed for accumulation acquisition //float accumulation_cycle_time = .1; // seconds //errorValue = SetAccumulationCycleTime(accumulation_cycle_time); //if (errorValue != DRV_SUCCESS) { //std::cout << "Set accumulation cycle time Error\n"; //std::cout << "Error: " << errorValue << "\n"; //} //Only needed for kinetic capture //errorValue = SetBaselineClamp(1); //if (errorValue != DRV_SUCCESS) { //std::cout << "Set Baseline Clamp Error\n"; //std::cout << "Error: " << errorValue << "\n"; //} """ # Determine the actual times the camera is using for acquisition actual_exposure_time = ctypes.c_float() actual_accumulate_time = ctypes.c_float() actual_kinetic_time = ctypes.c_float() error_value = self.andor_dll.GetAcquisitionTimings( ctypes.byref(actual_exposure_time), ctypes.byref(actual_accumulate_time), ctypes.byref(actual_kinetic_time)) self.__check_success(error_value, "Get Acquisition Timings") print('Exposure time is ', actual_exposure_time.value) # Set the horizontal and vertical binning and the area of the image to be captured horizontal_binning = ctypes.c_int( horizontal_binning_top) # Number of pixels to bin horizontally vertical_binning = ctypes.c_int( vertical_binning_top) # Number of pixels to bin vertically horizontal_start = ctypes.c_int( horizontal_start_top ) # Start column of image to be taken (inclusive) horizontal_end = ctypes.c_int( horizontal_end_top) # End column of image to be taken (inclusive) vertical_start = ctypes.c_int( vertical_start_top) # Start row of image to be taken (inclusive) vertical_end = ctypes.c_int( vertical_end_top) # End row of image to be taken (inclusive) # Determine number of horizontal and vertical pixels, and set the region and settings for image capture self.number_x_pixels = horizontal_end_top - horizontal_start_top + 1 self.number_y_pixels = vertical_end_top - vertical_start_top + 1 error_value = self.andor_dll.SetImage(horizontal_binning, vertical_binning, horizontal_start, horizontal_end, vertical_start, vertical_end) self.__check_success(error_value, "Set Image") def __initialize_NI_DAQ(self, initialize_array): """Initialize the NI daq to be ready to read in voltages Parameters ---------- initialize_array : initialization array, numpy array This contains information from the "NI_DAQ/NI_DAQ properties.ini" file """ self.number_of_reads = int( initialize_array[0]) # determine number voltages to average over directory_path = os.path.dirname( os.path.abspath(__file__)) # get the current directory's path LabVIEW = win32com.client.Dispatch( "Labview.Application") # Start running Labview self.pci0VI = LabVIEW.getvireference( directory_path + '\\NI_DAQ\\get_average_photodiode_voltage.vi' ) # get the path to the LabVIEW VI def __initialize_IC(self, initialize_array): """TODO comments""" self.ic_ic = IC_ImagingControl() self.ic_ic.init_library() cam_names = self.ic_ic.get_unique_device_names() if (len(cam_names) == 0): print("Error: No IC cameras connected to the computer.") exit() print("\nThese are the available cameras:") print(cam_names) print( "Please select an IC camera to use by inputting the index of the camera." ) print("The indices go from 0 to ", len(cam_names) - 1) while True: index = int(input()) if ((index <= len(cam_names) - 1) and (index >= 0)): self.cam = self.ic_ic.get_device(cam_names[index]) break else: print("You didn't enter a correct index.") self.cam.open() print( "\nWould you like to set all of the camera initialization values yourself, or use the IC properties.ini file?" ) print( 'Enter either "set" for setting all of the values or "ini" for the .ini file' ) while True: init = input() if (init == "set"): set_all = True break elif (init == "ini"): set_all = False break else: print("You didn't enter 'set' or 'ini'. Try again.") self.cam.reset_properties() cam_properties = self.cam.list_property_names() print( "Note: this only goes through the camera properties available for this specific camera." ) for attribute_index in range(len(cam_properties)): if (getattr(self.cam, cam_properties[attribute_index]).available == True): if (set_all == True): # if they want to go through everything print("You are setting the", cam_properties[attribute_index]) print( "Its current value is ", getattr(self.cam, cam_properties[attribute_index]).value) print( "The range of values you can set this to is ", getattr(self.cam, cam_properties[attribute_index]).range) print("What would you like to set this property to?") while True: change_value = input() print("You entered", change_value, "\nIs this okay? (enter 'y' or 'n')") input_is_good = input() if (input_is_good == 'y'): break elif (input_is_good == 'n'): print( "Type in what you'd like to change this property to instead" ) else: print( "You didn't enter a y or an n. Enter what value you'd like to change the property to again." ) else: if (initialize_array[attribute_index] == "auto"): if (getattr( self.cam, cam_properties[attribute_index]).auto_available == True): getattr( self.cam, cam_properties[attribute_index]).auto = True print("Set the camera", cam_properties[attribute_index], "to auto") else: print("Auto setting unavailable for", cam_properties[attribute_index]) print("Did not set", cam_properties[attribute_index]) elif (initialize_array[attribute_index] == "none"): print("Did not set", cam_properties[attribute_index]) else: if (type( getattr(self.cam, cam_properties[attribute_index]).value) == int): getattr( self.cam, cam_properties[attribute_index]).value = int( initialize_array[attribute_index]) if (type( getattr(self.cam, cam_properties[attribute_index]).value) == float): getattr( self.cam, cam_properties[attribute_index]).value = float( initialize_array[attribute_index]) print( "Set the camera", cam_properties[attribute_index], "to", getattr(self.cam, cam_properties[attribute_index]).value, "within the range", getattr(self.cam, cam_properties[attribute_index]).range) formats = self.cam.list_video_formats() print("\nThese are the available video formats:") print(formats) print( "Please select video format to use by inputting the index of the format." ) print("The indices go from 0 to ", len(formats) - 1) while True: index = int(input()) if ((index <= len(formats) - 1) and (index >= 0)): self.cam.set_video_format(formats[index]) break else: print("You didn't enter a correct index.") self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.start_live(show_display=False) # start imaging self.cam.enable_trigger(True) # camera will wait for trigger if not self.cam.callback_registered: self.cam.register_frame_ready_callback( ) # needed to wait for frame ready callback self.width, self.height, depth, color_format = self.cam.get_image_description( ) self.depth = depth // 8 # I have no idea why the library does this self.acquire() def figure_of_merit(self): """Determine the figure of merit using the selected device Parameters ---------- fom_num: figure of merit number, int This determines which calculation to use when calculating the figure of merit """ self.acquire() if (self.device == DAQ_DEVICES[0]): # if the device name is "Andor" figure_of_merit_f.Andor_FOM(self.image, self.fom_num) elif (self.device == DAQ_DEVICES[1]): # if the device name is "NI_DAQ" figure_of_merit_f.NI_DAQ_FOM(self.voltage, self.fom_num) elif (self.device == DAQ_DEVICES[2]): # if the device name is "IC" figure_of_merit_f.ic_FOM(self.frameout, self.fom_num) def acquire(self): """Acquire data from the appropriate data acquisition hardware """ if (self.device == DAQ_DEVICES[0]): # if the device name is "Andor" self.__acquire_andor() elif (self.device == DAQ_DEVICES[1]): # if the device name is "NI_DAQ" self.__acquire_NI_DAQ() elif (self.device == DAQ_DEVICES[2]): # if the device name is "IC" self.__acquire_IC() def __acquire_andor(self): """This function acquires an image from the andor camera and returns the image Returns ------- image : image, numpy 2d array This is the image which the Andor camera captured """ # Wait until the camera is in an idle state camera_status = ctypes.c_int() error_value = self.andor_dll.GetStatus(ctypes.byref(camera_status)) self.__check_success(error_value, "Get Camera Status") while (camera_status.value != DRV_IDLE): error_value = self.andor_dll.GetStatus(ctypes.byref(camera_status)) self.__check_success(error_value, "Get Camera Status") # Start the acquisition process error_value = self.andor_dll.StartAcquisition() acquiring = self.__check_success(error_value, "Start Acquisition") if (acquiring == False): self.andor_dll.AbortAcquisition() # Wait until the acquisition is complete error_value = self.andor_dll.GetStatus(ctypes.byref(camera_status)) self.__check_success(error_value, "Get Camera Status") while (camera_status.value != DRV_IDLE): error_value = self.andor_dll.GetStatus(ctypes.byref(camera_status)) self.__check_success(error_value, "Get Camera Status") # Get the image data from the camera size = ctypes.c_int(self.number_x_pixels * self.number_y_pixels) image_pointer = ctypes.cast( ctypes.create_string_buffer(size.value * ctypes.sizeof(ctypes.c_long())), ctypes.POINTER(ctypes.c_long)) error_value = self.andor_dll.GetAcquiredData(image_pointer, size) self.__check_success(error_value, "Get Acquired Data") # Deep copy the image from dereferencing a pointer to a numpy array image = np.zeros((self.number_y_pixels, self.number_x_pixels)) for x in range(self.number_x_pixels): for y in range(self.number_y_pixels): image[y, x] = image_pointer[x + y * self.number_x_pixels] self.image = image def __acquire_NI_DAQ(self): """Compute figure of merit that is average voltage reading from DAQ Returns ------- voltage: voltage, variable type unknown -> maybe float the averaged voltage read by the NI DAQ hardware """ self.pci0VI._FlagAsMethod( "Call") # Flag "Call" as the method to run the VI in this path self.pci0VI.setcontrolvalue('error in (no error)', 0) # set error in self.pci0VI.setcontrolvalue('number of reads', self.number_of_reads) # set addresses self.pci0VI.Call() # Run the VI voltage = self.pci0VI.getcontrolvalue('voltage') # retrieve error out error = self.pci0VI.getcontrolvalue('error out') # retrieve error out if (error[1] != 0): # check whether there was an error print('There was an error writing to board 0 at PXI4::5::INSTR') print('Error: ', error) print('Press anything and enter to exit...') input() exit() self.voltage = voltage def __acquire_IC(self): self.cam.reset_frame_ready() self.cam.send_trigger() self.cam.wait_til_frame_ready( 1000) # wait for frame ready due to trigger data, width, height, depth = self.cam.get_image_data() frame = np.ndarray(buffer=data, dtype=np.uint8, shape=(self.height, self.width, self.depth)) frameout = copy.deepcopy(frame) plt.imshow(frameout) plt.colorbar() plt.show() # self.cam.save_image(b"image.jpg", 1) del frame self.frameout = frameout def shut_down(self): """Shut down the appropriate data acquisition hardware """ if (self.device == DAQ_DEVICES[0]): # if the device name is "Andor" self.__shut_down_andor() elif (self.device == DAQ_DEVICES[1]): # if the device name is "NI_DAQ" self.__shut_down_NI_DAQ() elif (self.device == DAQ_DEVICES[2]): # if the device name is "IC" self.__shut_down_IC() def __shut_down_andor(self): """Shut down the Andor camera """ error_value = self.andor_dll.ShutDown() self.__check_success(error_value, "Shut down") def __shut_down_NI_DAQ(self): """Nothing needs to be done to shut down the NI DAQ device """ return def __shut_down_IC(self): """TODO""" self.cam.stop_live() # stop capturing video from the camera self.cam.close() # shut down the camera self.ic_ic.close_library() # stop accessing the IC dll
def __initialize_IC(self, initialize_array): """TODO comments""" self.ic_ic = IC_ImagingControl() self.ic_ic.init_library() cam_names = self.ic_ic.get_unique_device_names() if (len(cam_names) == 0): print("Error: No IC cameras connected to the computer.") exit() print("\nThese are the available cameras:") print(cam_names) print( "Please select an IC camera to use by inputting the index of the camera." ) print("The indices go from 0 to ", len(cam_names) - 1) while True: index = int(input()) if ((index <= len(cam_names) - 1) and (index >= 0)): self.cam = self.ic_ic.get_device(cam_names[index]) break else: print("You didn't enter a correct index.") self.cam.open() print( "\nWould you like to set all of the camera initialization values yourself, or use the IC properties.ini file?" ) print( 'Enter either "set" for setting all of the values or "ini" for the .ini file' ) while True: init = input() if (init == "set"): set_all = True break elif (init == "ini"): set_all = False break else: print("You didn't enter 'set' or 'ini'. Try again.") self.cam.reset_properties() cam_properties = self.cam.list_property_names() print( "Note: this only goes through the camera properties available for this specific camera." ) for attribute_index in range(len(cam_properties)): if (getattr(self.cam, cam_properties[attribute_index]).available == True): if (set_all == True): # if they want to go through everything print("You are setting the", cam_properties[attribute_index]) print( "Its current value is ", getattr(self.cam, cam_properties[attribute_index]).value) print( "The range of values you can set this to is ", getattr(self.cam, cam_properties[attribute_index]).range) print("What would you like to set this property to?") while True: change_value = input() print("You entered", change_value, "\nIs this okay? (enter 'y' or 'n')") input_is_good = input() if (input_is_good == 'y'): break elif (input_is_good == 'n'): print( "Type in what you'd like to change this property to instead" ) else: print( "You didn't enter a y or an n. Enter what value you'd like to change the property to again." ) else: if (initialize_array[attribute_index] == "auto"): if (getattr( self.cam, cam_properties[attribute_index]).auto_available == True): getattr( self.cam, cam_properties[attribute_index]).auto = True print("Set the camera", cam_properties[attribute_index], "to auto") else: print("Auto setting unavailable for", cam_properties[attribute_index]) print("Did not set", cam_properties[attribute_index]) elif (initialize_array[attribute_index] == "none"): print("Did not set", cam_properties[attribute_index]) else: if (type( getattr(self.cam, cam_properties[attribute_index]).value) == int): getattr( self.cam, cam_properties[attribute_index]).value = int( initialize_array[attribute_index]) if (type( getattr(self.cam, cam_properties[attribute_index]).value) == float): getattr( self.cam, cam_properties[attribute_index]).value = float( initialize_array[attribute_index]) print( "Set the camera", cam_properties[attribute_index], "to", getattr(self.cam, cam_properties[attribute_index]).value, "within the range", getattr(self.cam, cam_properties[attribute_index]).range) formats = self.cam.list_video_formats() print("\nThese are the available video formats:") print(formats) print( "Please select video format to use by inputting the index of the format." ) print("The indices go from 0 to ", len(formats) - 1) while True: index = int(input()) if ((index <= len(formats) - 1) and (index >= 0)): self.cam.set_video_format(formats[index]) break else: print("You didn't enter a correct index.") self.cam.enable_continuous_mode(True) # image in continuous mode self.cam.start_live(show_display=False) # start imaging self.cam.enable_trigger(True) # camera will wait for trigger if not self.cam.callback_registered: self.cam.register_frame_ready_callback( ) # needed to wait for frame ready callback self.width, self.height, depth, color_format = self.cam.get_image_description( ) self.depth = depth // 8 # I have no idea why the library does this self.acquire()