Example #1
0
def main(args):
    try:
        cvBridgeDemo()
        rospy.spin()
    except KeyboardInterrupt:
        rospy.loginfo('Cerrando Nodos')
        cv2.DestroyAllWindows()
def main(args):       
    try:
        cvBridgeDemo()
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down vision node."
        cv2.DestroyAllWindows()
Example #3
0
def main(args):
    try:
        turtlebot_openCV()
        rospy.spin()
    except KeyboardInterrupt:
        print("Shutting down")
        cv2.DestroyAllWindows()
Example #4
0
def show_inference(model, image_path):
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    #image_np = np.array(Image.open(image_path))

    while True:
        rep, image_np = cap.read()
        # Actual detection.
        output_dict = run_inference_for_single_image(model, image_np)
        # Visualization of the results of a detection.
        counting_mode = vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            category_index,
            instance_masks=output_dict.get('detection_masks_reframed', None),
            use_normalized_coordinates=True,
            line_thickness=4)

        print(counting_mode)

        cv2.imshow("detect", cv2.resize(image_np, (800, 600)))
        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.DestroyAllWindows()
            break
def main():
    try:
        fp_matching_Demo()
        rospy.spin()
    except KeyboardInterrupt:
        print("Shutting down vision node.")
        cv2.DestroyAllWindows()
Example #6
0
def main(args):
    try:
        BlobTracker()
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down vision node."
        cv2.DestroyAllWindows()
Example #7
0
def main():
    windowName = 'video capture'
    cv2.namedWindow(windowName)
    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        ret, img = cap.read()
    else:
        ret = False

    while ret:

        ret, img = cap.read()
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        #blue color
        low = np.array([100, 50, 50])
        high = np.array([140, 255, 255])
        mask = cv2.inRange(hsv, low, high)
        res = cv2.bitwise_and(img, img, mask=mask)

        cv2.imshow(windowName, img)
        cv2.imshow('mask', mask)
        cv2.imshow('res', res)
        if cv2.waitKey(1) == 27:
            break

    cap.release()
    cv2.DestroyAllWindows()
def do_capture(args):
    print("Capturing", args.url)
    capture = cv2.VideoCapture(args.url)
    i = 0
    while True:
        if args.nframes is not None and i >= args.nframes:
            print("Done")
            break
        i += 1

        frame = capture.read()
        if frame is None:
            print('Camera not found')
            break
        if frame[1] is None or frame[1].size == 0:
            print('WARNING: empty frame - restarting video capture')
            capture = cv2.VideoCapture(args.url)
            continue
        else:
            if args.display:
                cv2.imshow("Display", frame[1])

            if args.skip is not None:
                if i % (args.skip + 1) != 0:
                    continue
            path = "%s_%s.%s" % (args.prefix, "{:%Y_%m_%d_%H_%M_%S_%f}".format(
                datetime.now()), args.fmt)
            print(path)
            cv2.imwrite(path, frame[1])
            if cv2.waitKey(22) & 0xFF == ord('q'):
                break

    capture.release()
    cv2.DestroyAllWindows()
    print("Exit")
Example #9
0
def main(args):
    try:
        node_name = "ros2opencv2"
        ROS2OpenCV2(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down ros2opencv node."
        cv2.DestroyAllWindows()
Example #10
0
def main(args):
    rospy.init_node('HOG', anonymous=True)
    HOG(args[1])
    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down"
    cv2.DestroyAllWindows()
Example #11
0
def main(args):
    rospy.init_node('camStream', anonymous = True)
    try:
        camStream().cameraStream()
        rospy.spin()
    except KeyboardInterrupt:
        print("Shutting down vision node.")
        cv2.DestroyAllWindows()
def CLOSE():
    if (KEY == 11):
        Arduino.write(EMSTOP)
        if (CAM == 1):
            CV.DestroyAllWindows()
        MW.quit()
        pygame.quit()
        sys.exit()
Example #13
0
def main(args):
    ic = image_converter()
    rospy.init_node('image_converter', anonymous=True)
    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down"
    cv.DestroyAllWindows()
def main(args):
  
  rospy.init_node('measure_fish', anonymous=True)
  ic = measure_fish()
  
  try:
    rospy.spin()
  except KeyboardInterrupt:
    print "Shutting down"
  cv2.DestroyAllWindows()
Example #15
0
def main(args):

    rospy.init_node('fish_image', anonymous=True)
    ic = target_image_overlay()

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down"
    cv2.DestroyAllWindows()
def random_meta_check(dataset_dicts, dataset_metadata, name='Test'):
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=dataset_metadata,
                                scale=0.5)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow(name, vis.get_image()[:, :, ::-1])
        k = cv2.waitKey(0)
        if k == 27:
            cv2.DestroyAllWindows()
Example #17
0
def main(args):
    try:
        node_name = "ros2opencv2"
        ros2opencv = ROS2OpenCV2(node_name)
        while not rospy.is_shutdown():
            if ros2opencv.display_image is not None:
                ros2opencv.show_image(ros2opencv.cv_window_name,
                                      ros2opencv.display_image)

    except KeyboardInterrupt:
        print "Shutting down ros2opencv node."
        cv.DestroyAllWindows()
Example #18
0
def CLOSE():
    global KEY
    global Arduino
    global SIGNAL
    global CAM
    global EMSTOP
    if (KEY == 11):
        Arduino.write(EMSTOP)
        Arduino.close()
        SIGNAL = 'EMSTOP'
        if (CAM == 1):
            cv2.DestroyAllWindows()
        pygame.quit()
        sys.exit()
    def show_image(self, image=0):
        """ Plot an image

        :param image: This parameter is used to determine which image is going
                      to be displayed:

                        a. 0 --> Show the last image

                        b. 1 --> Show the first image

        :type image: Integer

        """
        if image == 0:
            cv2.imshow(self.cv_window_name, self.image_2)
        else:
            cv2.imshow(self.cv_window_name, self.image_1)
        self.keystroke = cv2.waitKey(0) & 0XFF
        if self.keystroke == 27:
            cv2.DestroyAllWindows()
        elif self.keystroke == ord('s'):
            cv2.imwrite('Image.png', self.image_2)
            cv2.DestroyAllWindows()
Example #20
0
def get_plate_num_images(files,
                         show=False,
                         validation=False,
                         path='input',
                         lite=False):
    """
	Finds all the potential car plates among given photos
	:files type: list
	:show type: bool
	:validation type: bool
	:rtype: True
	"""

    cascade = cv2.CascadeClassifier('licence_plate.xml')

    if not exists('result'):
        makedirs('result')

    result_file_name = 0
    for file in files:

        if file.endswith('.mp4') or file.endswith('.avi') or file.endswith(
                '.mpeg'):
            split_video_into_frames(file)
            continue

        img = cv2.imread(join('input', file))

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        number_plate = cascade.detectMultiScale(gray, 1.3, 5)

        for (x, y, w, h) in number_plate:
            croped_licence_plate = img[y:y + h, x:x + w]
            cv2.imwrite(join('result', '{}.png'.format(result_file_name)),
                        croped_licence_plate)
            find_plate_number('{}.png'.format(result_file_name),
                              validation=validation)
            result_file_name += 1

            if show:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if show:
            cv2.imshow('img', img)
            k = cv2.waitKey(0)

    if show:
        cv2.DestroyAllWindows()
Example #21
0
def print_img(r):
    path_img = "C:/Anaconda codes/speaker reco/something new/for hack/images/"
    path_files = os.listdir(path_img)

    print(path_files)
    t = r
    img = cv2.imread(path_img + r)

    #for i in range(0,len(path_files)):
    #if t==path_files[i].lower():
    #plt.imshow(img)
    #plt.show()

    for i in range(0, len(path_files)):
        if t == path_files[i].lower():
            cv2.imshow("image", img)
            cv2.waitKey(0)
            cv2.DestroyAllWindows()
Example #22
0
def publisher_test():
    capture = cv2.VideoCapture(0)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
    check, img = capture.read()

    rospy.init_node("camtest")
    pub = rospy.Publisher("pub_test", String, queue_size=1)

    while True:
        ret, frame = capture.read()
        cv2.imshow('pls wrk', frame)
        pub.publish("I'm working!")

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.DestroyAllWindows()
    capture.release
Example #23
0
def get_angle_of_bdisk(images_list):

    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()

    if ret:

        frame = segment_blue_disk(frame)
        frame = cv2.resize(frame, (215, 215))
        frame = cv2.medianBlur(frame, 3)
        frame = cv2.fastNlMeansDenoisingColored(frame, None, 10, 10, 7, 21)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        angle = get_degree_value(new_IM, im)
        get_output(angle)
        print(angle)
        cv2.imshow('frame', frame)
        cv2.waitKey(0)
        cap.release()
        cv2.DestroyAllWindows()
Example #24
0
 def showImage(self):
     while True:
         try:
             self._imgData = self._videoProxy.getImageRemote(
                 self._imgClient)
             width = self._imgData[0]
             height = self._imgData[1]
             #self._img=array(self._imgData[6]).astype(int)
             ima_data = self._imgData[6]
             Nao_ima = Image.frombytes('RGB', (width, height), ima_data)
             Nao_ima = cv.cvtColor(array(Nao_ima), COLOR_RGB2BGRA)
             cv.imshow("Camera_OpenCV2", Nao_ima)
         except KeyboardInterrupt:
             break
         except:
             pass
         if cv.waitKey(20) == 27:
             break
     cv.DestroyAllWindows()
     self._unregisterImageClient()
def decodeTag():
    cap = cv2.VideoCapture('./data/Tag0.mp4')
    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # Read until video is completed
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.bilateralFilter(gray_frame, 15, 75, 75)
            ret, gray = cv2.threshold(gray_frame, 200, 255, 0)
            cnts, _ = cv2.findContours(gray.copy(), cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
            squares = []
            for cnt in cnts:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.1 * cnt_len, True)
                if len(cnt) == 4:
                    if 2000 < cv2.contourArea(cnt) < 17500:
                        squares.append(cnt)

            cv2.drawContours(frame, squares, -1, (255, 128, 0), 3)
            for cnt in squares:
                H = find_homography()

            tag_angle, tag_id = decode_tag(warped)
            final = image_overlay(frame, squares[0].reshape((4, 2)), tag_angle)
            cv2.imshow('final', final)
            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break  # Break the loop
        else:
            break
    cap.release()
    cv2.DestroyAllWindows()
Example #26
0
def depthbs():
    # read first frame for kinect sensor
    depframe = get_depth()
    vidframe = get_video()
    depthbg = depframe
    # depthbg = cv2.cvtColor(depthbg,cv2.COLOR_BGR2GRAY)
    depth0 = depthbg.copy()

    while (1):
        # read from kinect sensor
        t1 = time.time()
        depframe = get_depth()
        vidframe = get_video()
        depth = depframe
        # depth = cv2.cvtColor(depth,cv2.COLOR_BGR2GRAY)
        # compare current frame to t-1 and bg
        depmask1 = cv2.absdiff(depth, depth0)
        depmask3 = cv2.absdiff(depth, depthbg)
        # threshold difference images
        ret, depmask1 = cv2.threshold(depmask1, 0, 255, cv2.THRESH_TRIANGLE)
        ret, depmask3 = cv2.threshold(depmask3, 0, 255, cv2.THRESH_TRIANGLE)
        # OR operation
        depmaskf3 = cv2.bitwise_or(depmask1, depmask3)
        # update previous frame
        depth0 = depth
        # show window
        cv2.imshow('foreground mask', depmaskf3)
        cv2.imshow('RGB', vidframe)
        if cv2.waitKey(1) == ord('q'):
            break
        t2 = time.time()
        tf = (t2 - t1)
        print(tf)
    # clean up video
    cv2.DestroyAllWindows()
    cv2.waitKey(1)
import cv2
import numpy as np

digits = cv2.imread("digits.png", cv2.IMREAD_GRAYSCALE)
rows = np.vsplit(digits, 50)
cv2.imshow("row 0", rows[0])
cv2.waitKey(0)
cv2.DestroyAllWindows()
Example #28
0
import rospy
from sensor_msgs.msg import Image
import cv2 as cv
from cv_bridge import CvBridge, CvBridgeError

if __name__ == "__main__":

    rospy.init_node('VideoPublisher', anonymous=True)
    bridge = CvBridge()
    VideoRaw = rospy.Publisher('/camera/rgb/image_raw', Image, queue_size=2)
    rate = rospy.Rate(1)
    cam = cv.VideoCapture(
        '/home/ismayil/catkin_ws/src/ui_interpretation/Data/images/video.avi')
    if (cam.isOpened() == False):
        print("Error opening video stream of file")
    while (cam.isOpened()):
        meta, frame = cam.read()
        if meta == True:

            try:
                msg_frame = bridge.cv2_to_imgmsg(frame)
                VideoRaw.publish(msg_frame, "bgr8")
            except CvBridgeError as e:
                print(e)
            cv.imshow("goruntu", frame)
            cv.waitKey(3)
            #rate.sleep()

    cv.release()
    cv.DestroyAllWindows()
Example #29
0
    def run(self):
        #initiate font
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        # instantiate images
        hsv_img = cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)), 8, 3)
        threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        i = 0
        writer = cv.CreateVideoWriter('angle_tracking.avi', cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, cv.GetSize(hsv_img), 1)

        while True:
            # capture the image from the cam
            img = cv.QueryFrame(self.capture)

            # convert the image to HSV
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            # threshold the image to isolate two colors
            cv.InRangeS(hsv_img, (165, 145, 100), (250, 210, 160), threshold_img1)  # red
            cv.InRangeS(hsv_img, (0, 145, 100), (10, 210, 160), threshold_img1a)  # red again
            cv.Add(threshold_img1, threshold_img1a, threshold_img1)  # this is combining the two limits for red
            cv.InRangeS(hsv_img, (105, 180, 40), (120, 260, 100), threshold_img2)  # blue

            # determine the moments of the two objects
            threshold_img1 = cv.GetMat(threshold_img1)
            threshold_img2 = cv.GetMat(threshold_img2)
            moments1 = cv.Moments(threshold_img1, 0)
            moments2 = cv.Moments(threshold_img2, 0)
            area1 = cv.GetCentralMoment(moments1, 0, 0)
            area2 = cv.GetCentralMoment(moments2, 0, 0)

            # initialize x and y
            x1, y1, x2, y2 = (1, 2, 3, 4)
            coord_list = [x1, y1, x2, y2]
            for x in coord_list:
                x = 0

            # there can be noise in the video so ignore objects with small areas
            if (area1 > 200000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x1 = int(cv.GetSpatialMoment(moments1, 1, 0) / area1)
                y1 = int(cv.GetSpatialMoment(moments1, 0, 1) / area1)

            # draw circle
            cv.Circle(img, (x1, y1), 2, (0, 255, 0), 20)

            # write x and y position
            cv.PutText(img, str(x1) +', '+str(y1), (x1, y1 + 20), font, 255)  # Draw the text

            if (area2 > 100000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x2 = int(cv.GetSpatialMoment(moments2, 1, 0) / area2)
                y2 = int(cv.GetSpatialMoment(moments2, 0, 1) / area2)

                # draw circle
                cv.Circle(img, (x2, y2), 2, (0, 255, 0), 20)

            cv.PutText(img, str(x2) +', '+str(y2), (x2, y2 + 20), font, 255)  # Draw the text
            cv.Line(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv.CV_AA)
            # draw line and angle
            cv.Line(img, (x1, y1), (cv.GetSize(img)[0], y1), (100, 100, 100, 100), 4, cv.CV_AA)
            x1 = float(x1)
            y1 = float(y1)
            x2 = float(x2)
            y2 = float(y2)
            angle = int(math.atan((y1 - y2) / (x2 - x1)) * 180 / math.pi)
            cv.PutText(img, str(angle), (int(x1) + 50, (int(y2) + int(y1)) / 2), font, 255)

            # cv.WriteFrame(writer,img)

            # display frames to users
            cv.ShowImage('Target', img)
            cv.ShowImage('Threshold1', threshold_img1)
            cv.ShowImage('Threshold2', threshold_img2)
            cv.ShowImage('hsv', hsv_img)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            cv.DestroyAllWindows()
Example #30
0
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    #Initial, global variables for counting
    current_request_id = 0
    start_time = 0
    last_count = 0
    total_count = 0
    
    
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold

    ###  Load the model through `infer_network` ###
    infer_network.load_model(args.model, args.device, current_request_id, args.cpu_extension)
    model_input_shape = infer_network.get_input_shape()

    ### Handle the input stream ###
    single_image_mode = False
    
    while args.input == 'CAM':
        input_stream = 0
        
    if args.input.endswith('.jpg') or args.input.endswith('.png') or args.input.endswith('.bmp'):
        single_image_mode = True
        input_stream = args.input
        
    else:
        input_stream = args.input
        assert os.path.isfile(args.input),"The input file does not exist"
        
    cap = cv2.VideoCapture(input_stream)
    
    if input_stream:
        cap.open(input_stream)
        
    if not cap.IsOpened():
        log.error('Error! The video file/source is not opening' )
    
    #inital width and height taken from the input
    initial_width = int(cap.get(3))
    initial_height = int(cap.get(4))
     ###  Loop until stream is over ###   
    while cap.isOpened():
         ###  Read from the video capture ###
        flag, frame = cap.read()
        
        if not flag:
            break
            
        pressed_key = cv2.waitKey(60)
        ### Pre-process the image as needed ###
        width = model_input_shape[3]
        height = model_input_shape[2]
        processed_input_image = cv2.resize(frame,(width, height))
        processed_input_image = processed_input_image.transpose((2, 0, 1))
        processed_input_image = processed_input_image.reshape(model_input_shape[0], model_input_shape[1], height, width)
        ###  Start asynchronous inference for specified request ###
        start_of_inference = time.time()
        infer_network.exec_net(current_request_id, processed_input_image)
        
        ###  Wait for the result ###
        if infer_network.wait(current_request_id) == 0:
            detection_time = int(time.time() - start_of_inference) * 1000
            ###  Get the results of the inference request ###
            result = infer_network.get_output(current_request_id)
            ### Extract any desired stats from the results ###
            frame, present_count = draw_rectangular_box(frame, result, initial_width, initial_height, prob_threshold)
            ##Find out the inference time and write the result on the video as text.
            inf_time_msg = "Inference time: {:.5f}ms".format(detection_time)
            cv2.putText(frame, inf_time_msg, (20,10), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            #Person's count is calculated here
            if present_count > last_count:
                start_time = time.time()
                total_count += present_count - last_count
                client.publish('person', json.dumps({"total": total_count}))
            #Duration is calculated here
            if present_count < last_count:
                person_duration = int(time.time() - start_time)
                # This is to prevent double counting. Higher value to ensure that the app does not get oversensitive#
                if person_duration > 5:
                    total_count -= 1
                client.publish('person/duration', json.dumps({"duration": person_duration}))
            
                #if present_count >=4:
                #print('Alert! Number of people exceeds the limit! Please take necessary action.')
                
                
            client.publish('person', json.dumps({"count": present_count}))
            last_count = present_count
            # End if escape key is pressed
            if pressed_key == 27:
                break
         ###  Send the frame to the FFMPEG server ###    
        sys.stdout.buffer.write(frame)
        sys.stdout.flush()
        ### Write an output image if `single_image_mode` ###
        if single_image_mode:
            cv2.imWrite('output_image.jpg', frame)
        
        cap.release()
        cv2.DestroyAllWindows()
        client.disconnect()
        infer_network.clean()