コード例 #1
0
ファイル: test——.py プロジェクト: bluebloodAxe/tello_pose
def main():
    drone = tellopy.Tello()
    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 300
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2GRAY)
                cv2.imshow('Original', image)
                cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                cv2.waitKey(1)
                frame_skip = int((time.time() - start_time) / frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #2
0
def show_lines(img, lista):

    new = np.copy(img)

    for linea in [e[1] for e in enumerate(lista)]:
        cv.line(new, (linea[0], linea[1]), (linea[2], linea[3]), (0, 0, 255),
                3, cv.LINE_AA)

    cv.namedWindow("window", cv.WND_PROP_FULLSCREEN)
    cv.setWindowProperty("window", cv.WND_PROP_FULLSCREEN,
                         cv.WINDOW_FULLSCREEN)
    cv.imshow("window", new)
    cv.waitKey(0)
コード例 #3
0
ファイル: main.py プロジェクト: ldbl1/BarCodeReader
def display(im, decodedObjects):

    # Loop over all decoded objects
    for decodedObject in decodedObjects:
        points = decodedObject.polygon

        # If the points do not form a quad, find convex hull
        if len(points) > 4:
            hull = cv2.convexHull(
                np.array([point for point in points], dtype=np.float32))
            hull = list(map(tuple, np.squeeze(hull)))
        else:
            hull = points

        # Number of points in the convex hull
        n = len(hull)

        # Draw the convext hull
        for j in range(0, n):
            cv2.line(im, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)

    # Display results
    cv2.imshow("Results", im)
    cv2.waitKey(0)
コード例 #4
0
# Loop over the detected barcodes
for barcode in barcodes:
    # Extract the bounding box location of the barcode and draw the
    # bounding box surrounding te barcode on the image
    (x, y, w, h) = barcode.rect
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)

    # The barcode data is a bytes objects so if we want to draw it on
    # our output image we need to convert itto a string first
    barcodeData = barcode.data.decode("utf-8")
    barcodeType = barcode.type

    # Draw the barcode data and barcode type on the image
    text = "{} ({})".format(barcodeData, barcodeType)
    cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 255), 2)

    # Print the barcode type and data to the terminal
    print("[INFO] Found {} barcode: {}".format(barcodeType, barcodeData))

# Show the output image
cv2.imshow("Image", image)
cv2.waitkey(0)
'''
camera = PiCamera()

camera.start_preview()
sleep(20)
camera.stop_preview()
'''
コード例 #5
0
import sys, opencv as cv
img = cv.imread(sys.argv[1], 1)
cv.imshow("original", img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (7, 7), 1.5)
edges = cv.Canny(gray, 0, 50)
cv.imshow("edges", edges)
cv.waitKey()
コード例 #6
0
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 16:29:35 2017

@author: Gus
"""

import opencv as cv2

cap = cv2.VideoCapture(0)

while True:
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Display the resulting frame
    cv2.imshow('frame', gray)
    if cv2.waitKey(0.01) & 0xFF == ord('q'):
        break
コード例 #7
0
ファイル: demo_cpm_hand.py プロジェクト: elincorona/project2
def main(argv):
    tf_device = '/gpu:0'
    with tf.device(tf_device):
        """Build graph
        """
        if FLAGS.color_channel == 'RGB':
            input_data = tf.placeholder(
                dtype=tf.float32,
                shape=[None, FLAGS.input_size, FLAGS.input_size, 3],
                name='input_image')
        else:
            input_data = tf.placeholder(
                dtype=tf.float32,
                shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
                name='input_image')

        center_map = tf.placeholder(
            dtype=tf.float32,
            shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
            name='center_map')

        model = cpm_hand_slim.CPM_Model(FLAGS.stages, FLAGS.joints + 1)
        model.build_model(input_data, center_map, 1)

    saver = tf.train.Saver()
    """Create session and restore weights
    """
    sess = tf.Session()

    sess.run(tf.global_variables_initializer())
    if FLAGS.model_path.endswith('pkl'):
        model.load_weights_from_file(FLAGS.model_path, sess, False)
    else:
        saver.restore(sess, FLAGS.model_path)

    test_center_map = cpm_utils.gaussian_img(FLAGS.input_size,
                                             FLAGS.input_size,
                                             FLAGS.input_size / 2,
                                             FLAGS.input_size / 2,
                                             FLAGS.cmap_radius)
    test_center_map = np.reshape(test_center_map,
                                 [1, FLAGS.input_size, FLAGS.input_size, 1])

    # Check weights
    for variable in tf.trainable_variables():
        with tf.variable_scope('', reuse=True):
            var = tf.get_variable(variable.name.split(':0')[0])
            print(variable.name, np.mean(sess.run(var)))

    if not FLAGS.DEMO_TYPE.endswith(('png', 'jpg')):
        cam = cv2.VideoCapture(FLAGS.cam_num)

    # Create kalman filters
    if FLAGS.KALMAN_ON:
        kalman_filter_array = [
            cv2.KalmanFilter(4, 2) for _ in range(FLAGS.joints)
        ]
        for _, joint_kalman_filter in enumerate(kalman_filter_array):
            joint_kalman_filter.transitionMatrix = np.array(
                [[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]],
                np.float32)
            joint_kalman_filter.measurementMatrix = np.array(
                [[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
            joint_kalman_filter.processNoiseCov = np.array(
                [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
                np.float32) * FLAGS.kalman_noise
    else:
        kalman_filter_array = None

    with tf.device(tf_device):

        while True:
            t1 = time.time()
            if FLAGS.DEMO_TYPE.endswith(('png', 'jpg')):
                test_img = cpm_utils.read_image(FLAGS.DEMO_TYPE, [],
                                                FLAGS.input_size, 'IMAGE')
            else:
                test_img = cpm_utils.read_image([], cam, FLAGS.input_size,
                                                'WEBCAM')

            test_img_resize = cv2.resize(test_img,
                                         (FLAGS.input_size, FLAGS.input_size))
            print('img read time %f' % (time.time() - t1))

            if FLAGS.color_channel == 'GRAY':
                test_img_resize = np.dot(test_img_resize[..., :3],
                                         [0.299, 0.587, 0.114]).reshape(
                                             (FLAGS.input_size,
                                              FLAGS.input_size, 1))
                cv2.imshow('color', test_img.astype(np.uint8))
                cv2.imshow('gray', test_img_resize.astype(np.uint8))
                cv2.waitKey(1)

            test_img_input = test_img_resize / 256.0 - 0.5
            test_img_input = np.expand_dims(test_img_input, axis=0)

            if FLAGS.DEMO_TYPE.endswith(('png', 'jpg')):
                # Inference
                t1 = time.time()
                predict_heatmap, stage_heatmap_np = sess.run(
                    [
                        model.current_heatmap,
                        model.stage_heatmap,
                    ],
                    feed_dict={
                        'input_image:0': test_img_input,
                        'center_map:0': test_center_map
                    })

                # Show visualized image
                demo_img = visualize_result(test_img, FLAGS, stage_heatmap_np,
                                            kalman_filter_array)
                cv2.imshow('demo_img', demo_img.astype(np.uint8))
                if cv2.waitKey(0) == ord('q'): break
                print('fps: %.2f' % (1 / (time.time() - t1)))
            elif FLAGS.DEMO_TYPE == 'MULTI':

                # Inference
                t1 = time.time()
                predict_heatmap, stage_heatmap_np = sess.run(
                    [
                        model.current_heatmap,
                        model.stage_heatmap,
                    ],
                    feed_dict={
                        'input_image:0': test_img_input,
                        'center_map:0': test_center_map
                    })

                # Show visualized image
                demo_img = visualize_result(test_img, FLAGS, stage_heatmap_np,
                                            kalman_filter_array)
                cv2.imshow('demo_img', demo_img.astype(np.uint8))
                if cv2.waitKey(1) == ord('q'): break
                print('fps: %.2f' % (1 / (time.time() - t1)))

            elif FLAGS.DEMO_TYPE == 'SINGLE':

                # Inference
                t1 = time.time()
                stage_heatmap_np = sess.run(
                    [model.stage_heatmap[5]],
                    feed_dict={
                        'input_image:0': test_img_input,
                        'center_map:0': test_center_map
                    })

                # Show visualized image
                demo_img = visualize_result(test_img, FLAGS, stage_heatmap_np,
                                            kalman_filter_array)
                cv2.imshow('current heatmap', (demo_img).astype(np.uint8))
                if cv2.waitKey(1) == ord('q'): break
                print('fps: %.2f' % (1 / (time.time() - t1)))

            elif FLAGS.DEMO_TYPE == 'HM':

                # Inference
                t1 = time.time()
                stage_heatmap_np = sess.run(
                    [model.stage_heatmap[FLAGS.stages - 1]],
                    feed_dict={
                        'input_image:0': test_img_input,
                        'center_map:0': test_center_map
                    })
                print('fps: %.2f' % (1 / (time.time() - t1)))

                demo_stage_heatmap = stage_heatmap_np[
                    len(stage_heatmap_np) - 1][0, :, :,
                                               0:FLAGS.joints].reshape(
                                                   (FLAGS.hmap_size,
                                                    FLAGS.hmap_size,
                                                    FLAGS.joints))
                demo_stage_heatmap = cv2.resize(
                    demo_stage_heatmap, (FLAGS.input_size, FLAGS.input_size))

                vertical_imgs = []
                tmp_img = None
                joint_coord_set = np.zeros((FLAGS.joints, 2))

                for joint_num in range(FLAGS.joints):
                    # Concat until 4 img
                    if (joint_num % 4) == 0 and joint_num != 0:
                        vertical_imgs.append(tmp_img)
                        tmp_img = None

                    demo_stage_heatmap[:, :, joint_num] *= (
                        255 / np.max(demo_stage_heatmap[:, :, joint_num]))

                    # Plot color joints
                    if np.min(demo_stage_heatmap[:, :, joint_num]) > -50:
                        joint_coord = np.unravel_index(
                            np.argmax(demo_stage_heatmap[:, :, joint_num]),
                            (FLAGS.input_size, FLAGS.input_size))
                        joint_coord_set[joint_num, :] = joint_coord
                        color_code_num = (joint_num // 4)

                        if joint_num in [0, 4, 8, 12, 16]:
                            if PYTHON_VERSION == 3:
                                joint_color = list(
                                    map(lambda x: x + 35 * (joint_num % 4),
                                        joint_color_code[color_code_num]))
                            else:
                                joint_color = map(
                                    lambda x: x + 35 * (joint_num % 4),
                                    joint_color_code[color_code_num])

                            cv2.circle(test_img,
                                       center=(joint_coord[1], joint_coord[0]),
                                       radius=3,
                                       color=joint_color,
                                       thickness=-1)
                        else:
                            if PYTHON_VERSION == 3:
                                joint_color = list(
                                    map(lambda x: x + 35 * (joint_num % 4),
                                        joint_color_code[color_code_num]))
                            else:
                                joint_color = map(
                                    lambda x: x + 35 * (joint_num % 4),
                                    joint_color_code[color_code_num])

                            cv2.circle(test_img,
                                       center=(joint_coord[1], joint_coord[0]),
                                       radius=3,
                                       color=joint_color,
                                       thickness=-1)

                    # Put text
                    tmp = demo_stage_heatmap[:, :, joint_num].astype(np.uint8)
                    tmp = cv2.putText(
                        tmp,
                        'Min:' +
                        str(np.min(demo_stage_heatmap[:, :, joint_num])),
                        org=(5, 20),
                        fontFace=cv2.FONT_HERSHEY_COMPLEX,
                        fontScale=0.3,
                        color=150)
                    tmp = cv2.putText(
                        tmp,
                        'Mean:' +
                        str(np.mean(demo_stage_heatmap[:, :, joint_num])),
                        org=(5, 30),
                        fontFace=cv2.FONT_HERSHEY_COMPLEX,
                        fontScale=0.3,
                        color=150)
                    tmp_img = np.concatenate((tmp_img, tmp), axis=0) \
                        if tmp_img is not None else tmp

                # Plot limbs
                for limb_num in range(len(limbs)):
                    if np.min(
                            demo_stage_heatmap[:, :, limbs[limb_num][0]]
                    ) > -2000 and np.min(
                            demo_stage_heatmap[:, :,
                                               limbs[limb_num][1]]) > -2000:
                        x1 = joint_coord_set[limbs[limb_num][0], 0]
                        y1 = joint_coord_set[limbs[limb_num][0], 1]
                        x2 = joint_coord_set[limbs[limb_num][1], 0]
                        y2 = joint_coord_set[limbs[limb_num][1], 1]
                        length = ((x1 - x2)**2 + (y1 - y2)**2)**0.5
                        if length < 10000 and length > 5:
                            deg = math.degrees(math.atan2(x1 - x2, y1 - y2))
                            polygon = cv2.ellipse2Poly((int(
                                (y1 + y2) / 2), int((x1 + x2) / 2)),
                                                       (int(length / 2), 3),
                                                       int(deg), 0, 360, 1)
                            color_code_num = limb_num // 4
                            if PYTHON_VERSION == 3:
                                limb_color = list(
                                    map(lambda x: x + 35 * (limb_num % 4),
                                        joint_color_code[color_code_num]))
                            else:
                                limb_color = map(
                                    lambda x: x + 35 * (limb_num % 4),
                                    joint_color_code[color_code_num])

                            cv2.fillConvexPoly(test_img,
                                               polygon,
                                               color=limb_color)

                if tmp_img is not None:
                    tmp_img = np.lib.pad(
                        tmp_img,
                        ((0, vertical_imgs[0].shape[0] - tmp_img.shape[0]),
                         (0, 0)),
                        'constant',
                        constant_values=(0, 0))
                    vertical_imgs.append(tmp_img)

                # Concat horizontally
                output_img = None
                for col in range(len(vertical_imgs)):
                    output_img = np.concatenate((output_img, vertical_imgs[col]), axis=1) if output_img is not None else \
                        vertical_imgs[col]

                output_img = output_img.astype(np.uint8)
                output_img = cv2.applyColorMap(output_img, cv2.COLORMAP_JET)
                test_img = cv2.resize(test_img, (300, 300), cv2.INTER_LANCZOS4)
                cv2.imshow('hm', output_img)
                cv2.moveWindow('hm', 2000, 200)
                cv2.imshow('rgb', test_img)
                cv2.moveWindow('rgb', 2000, 750)
                if cv2.waitKey(1) == ord('q'): break
コード例 #8
0
ファイル: test——.py プロジェクト: bluebloodAxe/tello_pose
        drone.quit()
        cv2.destroyAllWindows()


cap = cv2.VideoCapture(0)

if __name__ == '__main__':
    main()

# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        frame = cv2.flip(frame, 0)

        # write the flipped frame
        out.write(frame)

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
コード例 #9
0
ファイル: vision.py プロジェクト: nerdophile/Smart-Mirror
if __name__ == "__main__":
    faceCascade = cv2.CascadeClassifier("models/facial_recognition_model.xml")

    video_capture = cv2.VideoCapture(0)

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(30, 30),
                                             flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()