Example #1
0
    def export_withCounter(self, frame: Frame):
        """
		Frame handler that exports frames to a given path with 
		a counter for numbering frames.

		"""

        if self._frame_limit == None:

            if frame.data.receiveStatus == -1:

                self.incompleteFrameErrorMsg()

            else:

                image = frame.buffer_data_numpy()

                try:
                    image = cv2.cvtColor(
                        image, self.pixelFormatConversions[frame.pixel_format])
                except:
                    pass

                filename = str(self._counter) + ".jpg"

                cv2.imwrite(os.path.join(self.path + filename), image)

                self._counter += 1

        else:

            if self._counter <= self._frame_limit:

                if frame.data.receiveStatus == -1:

                    self.incompleteFrameErrorMsg()

                else:

                    image = frame.buffer_data_numpy()

                    try:
                        image = cv2.cvtColor(
                            image,
                            self.pixelFormatConversions[frame.pixel_format])
                    except:
                        pass

                    filename = str(self._counter) + ".jpg"

                    cv2.imwrite(os.path.join(self.path + filename), image)

                    self._counter += 1

            elif self._counter > self._frame_limit:

                self.shutdown_event.set()

                return
Example #2
0
	def export(self, frame: Frame):

		"""
		Frame handler that exports frames to a given path.

		"""

		if self._frame_limit == None:

			if frame.data.receiveStatus == -1:

				self.incompleteFrameErrorMsg()

			else:

				image = frame.buffer_data_numpy()

				try:
					image = cv2.cvtColor(image, self.pixelFormatConversions[frame.pixel_format])
				except:
				 	pass

				timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss%fµs')
				filename = str(timestamp) + ".jpg"

				cv2.imwrite(os.path.join(self.path + filename), image)

				self._counter += 1

		else: 

			if self._counter <= self._frame_limit:

				if frame.data.receiveStatus == -1:

					self.incompleteFrameErrorMsg()

				else:

					image = frame.buffer_data_numpy()

					try:
						image = cv2.cvtColor(image, self.pixelFormatConversions[frame.pixel_format])
					except:
					 	pass

					timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss%fµs')
					filename = str(timestamp) + ".jpg"

					cv2.imwrite(os.path.join(self.path + filename), image)

					self._counter += 1

			elif self._counter > self._frame_limit:

				self.shutdown_event.set()

				return
Example #3
0
def save_frame(frame: Frame):
    # Callable for camera.arm(), saves frame as numpy file
    global total
    frame_num = 'frame_{}'.format(frame.data.frameID)
    print("\rProgress: {:2.1%}".format(frame.data.frameID/total), end='\r')

    image = frame.buffer_data_numpy()
    np.save(frame_num, image)
    frame.queue_for_capture(frame_callback=save_frame)
Example #4
0
    def display(self, frame: Frame):
        """
		Frame handler that displays frames.

		"""

        if frame.data.receiveStatus == -1:

            self.incompleteFrameErrorMsg()

        else:

            image = frame.buffer_data_numpy()

            try:
                image = cv2.cvtColor(
                    image, self.pixelFormatConversions[frame.pixel_format])

            except:
                pass

            msg = "Capturing from \'{}\'."
            im_resize = cv2.resize(image,
                                   (self.liveViewWidth, self.liveViewHeight))
            cv2.imshow(msg.format(self.cameraID), im_resize)

            key = cv2.waitKey(1)
Example #5
0
    def continous_cb(self, frame: Frame):
        """Callback for receiving frames when they're ready

        Args:
            frame: The frame object

        """

        self.img_IDs.append([frame.data.frameID, False])

        # If the frame is incomplte, discard it (VmbFrame_t.receiveStatus does not equal VmbFrameStatusComplete)
        if frame.data.receiveStatus == -1:
            print(f"Incomplete frame: ID{frame.data.frameID}")
            return

        # get a copy of the frame data
        try:
            image = frame.buffer_data_numpy()
        except NotImplementedError:
            print(f"Empty frame: ID{frame.data.frameID}")
            return

        # convert colour space if desired
        try:
            image = cv.cvtColor(
                image, self.PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
        except KeyError:
            pass

        self.img_IDs[-1][1] = True
        self.framerate_sum += self.camera.AcquisitionFrameRateAbs
        self.img_buffer.append(image)
    def frame_callback(self, frame: Frame, delay: Optional[int] = 1) -> None:
        """
        Displays the acquired frame.
        :param frame: The frame object to display.
        :param delay: Display delay in milliseconds, use 0 for indefinite.
        """
        # get a copy of the frame data
        image = frame.buffer_data_numpy()

        # convert colour space if desired
        try:
            image = cv2.cvtColor(image, PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
            image = self.frame_fixer.fix_frame(image)

            self.buffer.put(image)
            self.time_buffer.put(time.time())
        except KeyError:
            pass
Example #7
0
    def frame_callback(self, frame: Frame) -> None:
        # TODO: only support 8bit pixel format
        # Details can refer to .\examples\camera\opencv_display_various_image.py
        img = frame.buffer_data_numpy()
        # convert color space if desired
        if (frame.pixel_format == 'BayerRG8'):
            img = cv2.cvtColor(img,
                               PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
            #img = cv2.resize(img, (400, 300))
            cv2.imshow("cam-{}".format(self.idx), img)
        else:
            #img = cv2.resize(img, (400, 300))
            cv2.imshow("cam-{}".format(self.idx), img)

        c = cv2.waitKey(5)
        if c == ord('q'):
            global flag
            flag = False
Example #8
0
    def acquire_frame(self, frame: Frame, delay: int = 1) -> None:
        """
        Routes the capture frame to two destinations:
        1: EyeLoop for online processing
        2: frame save for offline processing

        :param frame: The frame object to display.
        :param delay: Display delay in milliseconds, use 0 for indefinite.
        """

        image = frame.buffer_data_numpy()

        # image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)

        image = self.resize(image)
        self.rotate_(image, config.engine.angle)
        config.engine.update_feed(image)
        self.save(image)

        self.frame += 1
Example #9
0
def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:
    """
    Displays the acquired frame.
    :param frame: The frame object to display.
    :param delay: Display delay in milliseconds, use 0 for indefinite.
    """
    print('frame {}'.format(frame.data.frameID))

    # get a copy of the frame data
    image = frame.buffer_data_numpy()

    # convert colour space if desired
    try:
        image = cv2.cvtColor(image, PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
    except KeyError:
        pass

    # display image
    cv2.imshow('Image', image)
    cv2.waitKey(delay)
Example #10
0
def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:
    """
    Displays the acquired frame.
    :param frame: The frame object to display.
    :param delay: Display delay in milliseconds, use 0 for indefinite.
    """
    print(f'frame {frame.data.frameID}')

    # get a copy of the frame data
    image = frame.buffer_data_numpy()

    # convert colour space if desired
    try:
        image = cv2.cvtColor(image, PIXEL_FORMATS_CONVERSIONS[frame.pixel_format])
    except KeyError:
        pass

    # display image
    cv2.imshow('Image', image)
    cv2.waitKey(delay)
def image_processing_callback(frame: Frame):

    #set pos value (equal to last_pos to create circle in all images)
    global pos
    global last_pos
    pos = last_pos

    #get frame number
    global i
    i = int(format(frame.data.frameID))
    #print('\nFrame', i)

    #get a copy of the frame data
    global image
    image = frame.buffer_data_numpy()
    frame_time = time.time()

    #Search area
    global search_area
    y = search_area[0]
    y1 = search_area[1]
    x = search_area[2]
    x1 = search_area[3]
    global search_area_image
    search_area_image = image[y:y1, x:x1]
    
    #Give color
    color_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

    if i%(16*user_time_interval) == 0 and (pos < 4000):
        
        ## PROCESSING PART

        #Time interval
        global last_time
        global time_interval
        time_interval = frame_time - last_time
        last_time = frame_time
        print('Time interval: ', time_interval)

        #Apply Threshold
        global threshold_value
        global threshold_image
        #blur = cv2.GaussianBlur(search_area_image,(5,5),0)
        ret,threshold_image = cv2.threshold(search_area_image,threshold_value,255,cv2.THRESH_BINARY)

        #Find Contours
        global color_image_contours
        global contours
        contours,hierarchy = cv2.findContours(threshold_image,1,cv2.CHAIN_APPROX_NONE)
        color = cv2.cvtColor(search_area_image, cv2.COLOR_GRAY2BGR)
        color_image_contours = cv2.drawContours(color, contours, -1, (0,255,0), 2)

        #Find meniscus base position
        global scale
        global Area
        global middle_y
        global x_coord
        global mean_flow
        global flow
        global vector_of_flow
        global vector_of_dist
        global text_flow

        middle = 100

        try:
            for a in range (len(contours)):         #discover points with y equal to middle_y
                for b in range (len(contours[a])):
                    for c in range (len(contours[a][b])):
                        if contours[a][b][c][1] == middle or contours[a][b][c][1] == (middle+1) or contours[a][b][c][1] == (middle-1):
                            x_coord.append(contours[a][b][c][0])
        except Warning:
            print ('No flow')

        try:
            x_coord = list(set(x_coord))
            x_coord.remove(max(x_coord))
            pos = max(x_coord)            
        except ValueError:
            pos = 0

        x_coord = []

        #Calculate distance
        dist = [(last_pos - pos),(time_interval)]
        last_pos = pos

        if pos != 0 and last_pos != 0:
            
            vector_of_dist.append(dist)
            
            #Calculate instant flow
            flow = ((dist[0]*scale*Area)/(time_interval)*3.6)
            #format(flow, '.6f')
            #print('\n', flow)
            #print( dist)
            print ('Pos: ', pos)

            
            vector_of_flow.append(flow)
            try:
                text_flow = np.mean(vector_of_flow)
            except Exception:
                text_flow = 0
                pass

    #Create point in the image
    cv2.circle(color_image,(pos,middle_y),20,(255,0,0),-1)
    
    #display image
    color_image_scaled = cv2.resize(color_image,(1280,720))
    #Show instant average flow
    cv2.putText(color_image_scaled, 'Average Flow: ' + str(text_flow) + ' mL/h          Instant flow: ' + str(flow) + ' mL/h', (50,50), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,0), 1)
    cv2.imshow('Video Stream', color_image_scaled)
    cv2.waitKey(1)
def save_frame(frame: Frame):
    # Callable for camera.arm(), saves frame as numpy file
    frame_num = 'frame_{}'.format(frame.data.frameID)
    print(frame_num)
    image = frame.buffer_data_numpy()
    np.save(frame_num, image)
def get_frame_array(frame: Frame) -> np.ndarray:
    return np.ndarray(buffer=frame.buffer_data(),
                      dtype=np.uint16,
                      shape=(frame.data.height, frame.data.width))