Example #1
0
 def stopTracking(self):
     GlProp.frametimer.Stop()
     GlProp.processratetimer.Stop()
     GlProp.trackingON = False
     trckParameters = {"Contrast_Difference_Threshold":GlProp.trckThrsh,
                       "MinimumSize":GlProp.szThrsh,
                       "BackgroundUpdateAlpha":GlProp.alpha/1000.,
                       "NeighborDistance":GlProp.mDist,
                       "ImageProcessing":GlProp.TrckParameter}
     #writeMatlab(GlProp.trackData,trckParameters,self.dirname+'\\'+'mtD_'+os.path.splitext(GlProp.vidfile[GlProp.vidN])[0]+".mat")
     writeMatlab(GlProp.trackData,trckParameters,os.path.join(self.dirname,'mtD_',os.path.splitext(GlProp.vidfile[GlProp.vidN])[0],".mat"))
     print "done saving data for "+GlProp.vidfile[GlProp.vidN]
     #cPickle.dump(GlProp.trackData,GlProp.output,-1)
     #tObjList.clear()
     #GlProp.trackData.clear()
     #gc.collect()
     for button in self.playctrlbuttons:
         button.Enable()
     if GlProp.vidN+1 < len(GlProp.vidfile):
         GlProp.vidstream.release()
         GlProp.vidN += 1
         try:
             GlProp.vidWriter.release()
             GlProp.vidWriter.open(self.getTrckVid_Name(),cv.CV_FOURCC('X','V','I','D'),30.,(cFrame.shape[1],cFrame.shape[0]),True)
         except:
             pass
         self.onTrack(None)
     else:
         GlProp.vidN = 0
         GlProp.vidstream.release()
         try:
             GlProp.vidWriter.release()
         except:
             pass
Example #2
0
def InitializeVideoOutput(filename, frameInfo):
    """Initializes and returns a video writer object"""
    fourcc = cv.CV_FOURCC('X', 'V', 'I', 'D')
    #fourcc=-1
    fps = 30.
    vObject = cv2.VideoWriter(filename, fourcc, fps, frameInfo, True)
    return vObject
Example #3
0
 def initRecorder(self):  #Create the recorder
     codec = cv.CV_FOURCC('M', 'J', 'P', 'G')
     self.writer = cv.CreateVideoWriter(
         datetime.now().strftime("%b-%d_%H_%M_%S") + ".wmv", codec, 5,
         cv.GetSize(self.frame), 1)
     #FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs
     self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2,
                             8)  #Creates a font
Example #4
0
 def initRecorder(self):  #Create the recorder
     codec = cv.CV_FOURCC('D', 'I', 'V', 'X')
     #codec = cv.CV_FOURCC("D", "I", "B", " ")
     self.writer = cv.CreateVideoWriter(
         datetime.now().strftime("%b-%d_%H:%M:%S") + ".avi", codec, 15,
         cv.GetSize(self.frame), 1)
     #FPS set at 15 because it seems to be the fps of my cam but should be ajusted to your needs
     self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2,
                             8)  #Creates a font
Example #5
0
def create_video_writer(video_outputfile, frame_rate, format='IYUV', video_dim=(640,480)):
    if hasattr(cv2, 'VideoWriter_fourcc'):
        fourcc = cv2.VideoWriter_fourcc(*format)
    else:
        fourcc = cv2.CV_FOURCC(*format)

    video_writer = cv2.VideoWriter(video_outputfile, fourcc, frame_rate, video_dim)
    if not video_writer.isOpened():
        raise IOError('Failed to open video writer to: ' + video_outputfile)
    return video_writer
def download_and_process_video(save_path, row):
    video_id = row['VideoID']
    video_path = row['video_path']
    full_path = os.path.join(save_path, video_path)
    if os.path.exists(full_path):
        return

    start = row['Start']
    end = row['End']

    print video_id

    if os.path.exists('tmp.mp4'):
        os.system('rm tmp.mp4')

    try:
        youtube = YouTube("https://www.youtube.com/watch?v=" + video_id)
    except:
        print "error"
        return

    youtube.set_filename('tmp')

    try:
        video = youtube.get('mp4', '360p')
    except:
        ipdb.set_trace()
    video.download('.')

    cap = cv2.VideoCapture('tmp.mp4')
    fps = cap.get(cv2.CV_CAP_PROP_FPS)
    fourcc = int(cap.get(cv2.CV_FOURCC(*'XVID')))
    w = int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT))

    out = cv2.VideoWriter(full_path, fourcc, fps, (w, h))

    start_frame = int(fps * start)
    end_frame = int(fps * end)

    frame_count = 0
    while frame_count < end_frame:
        ret, frame = cap.read()
        frame_count += 1

        if frame_count >= start_frame:
            out.write(frame)

    cap.release()
    out.release()
def video_together(input=".",
                   fps=10,
                   outdir='video',
                   fmt='.avi',
                   outname='outvideo',
                   outshape=(640, 360)):
    """
    图像拼接成视频函数
    :param inputdir:
    :param fps:
    :param outdir:
    :param fmt:
    :param outname:
    :param outshape:
    :return:
    """
    if not os.path.exists(outdir):
        os.makedirs(outdir)
        print("系统自动创建输出文件夹%s" % outdir)
    print("拼接结果保存在%s中" % outdir)
    # fourcc = cv2.VideoWriter_fourcc(*'XVID')   #XVID是一个开放源代码的MPEG-4视频编解码器
    fourcc = cv2.CV_FOURCC('P', 'I', 'M', '1')  # MPEG-1 codec
    # fourcc = cv2.CV_FOURCC('M', 'J', 'P', 'G') # motion-jpeg codec
    # fourcc = cv2.CV_FOURCC('M', 'P', '4', '2') # MPEG-4.2 codec
    # fourcc = cv2.CV_FOURCC('D', 'I', 'V', '3') # MPEG-4.3 codec
    # fourcc = cv2.CV_FOURCC('D', 'I', 'V', 'X') # MPEG-4 codec
    # fourcc = cv2.CV_FOURCC('U', '2', '6', '3') # H263 codec
    # fourcc = cv2.CV_FOURCC('I', '2', '6', '3') # H263I codec
    # fourcc = cv2.CV_FOURCC('F', 'L', 'V', '1') # FLV1 codec
    # fourcc = cv2.CV_FOURCC('M', 'J', 'P', 'G')
    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', '2')
    files = os.listdir(input)
    videoWriter = cv2.VideoWriter(os.path.join(outdir, outname + fmt), fourcc,
                                  fps, outshape)
    for i, o in enumerate(files):
        img = cv2.imread(o)
        print(o)
    videoWriter.write(img)
    videoWriter.release()
    print("拼接结果保存在%s中" % outdir)
    return
Example #8
0
    def record(self, path):
        if path is None:
            if self.video_writer is not None:
                self.video_writer.release()
                self.video_writer = None
            return

        fourcc = None
        if callable(cv2.CV_FOURCC):
            fourcc = cv2.CV_FOURCC(*'MJPG')
        else:
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        self.video_writer = cv2.VideoWriter(
            path,
            fourcc,
            self.framerate,  # fps
            (
                int(self.resolution[0]),  # width
                int(self.resolution[1]),  # height
            ))
# coding:utf-8
import cv2
import sys
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
cap.set(1, 10.0)
#此处fourcc的在MAC上有效,如果视频保存为空,那么可以改一下这个参数试试, 也可以是-1
fourcc = cv2.CV_FOURCC('m', 'p', '4', 'v')
# 第三个参数则是镜头快慢的,10为正常,小于10为慢镜头
out = cv2.VideoWriter('output2.avi', fourcc, 10, (640, 480))
cap.release()
out.release()
Example #10
0
        predictions = np.argmax(logits, axis=3)
        for i in range(batch_size):
            pred_img = predictions[i]
            pred_img_color = label_img_to_color(pred_img)

            img = batch_imgs[i] + train_mean_channels

            img_file_name = img_paths[i].split("/")[-1]
            img_name = img_file_name.split(".png")[0]
            pred_path = results_dir + img_name + "_pred.png"

            overlayed_img = 0.3 * img + 0.7 * pred_img_color

            cv2.imwrite(pred_path, overlayed_img)

# create a video of all the resulting overlayed images:
fourcc = cv2.CV_FOURCC("M", "J", "P", "G")
out = cv2.VideoWriter(results_dir + "cityscapes_stuttgart_02_pred.avi", fourcc,
                      20.0, (img_width, img_height))

frame_names = sorted(os.listdir(results_dir))
for step, frame_name in enumerate(frame_names):
    if step % 100 == 0:
        print(step)

    if ".png" in frame_name:
        frame_path = results_dir + frame_name
        frame = cv2.imread(frame_path, -1)

        out.write(frame)
Example #11
0
import numpy as np
import cv2

cap = cv2.VideoCapture(1)

# Define the codec and create VideoWriter object
fourcc = cv2.CV_FOURCC('D', 'I', 'V', 'X')
out = cv2.VideoWriter('Videos/CupVid.avi', fourcc, 24.0, (640, 480), 1)
key = cv2.waitKey(1)
while (key != 27):
    ret, frame = cap.read()
    cv2.imshow('frame', frame)
    key = cv2.waitKey(1)
    while ((ret == True) and (key == 13)):
        ret, frame = cap.read()
        cv2.imshow('frame', frame)
        # write the flipped frame
        out.write(frame)
        keyAux = cv2.waitKey(1)
        if keyAux == 13:
            key = 0

# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
Example #12
0
    def run(self):
        #initiate font
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        # instantiate images
        hsv_img = cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)), 8, 3)
        threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        i = 0
        writer = cv.CreateVideoWriter('angle_tracking.avi', cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, cv.GetSize(hsv_img), 1)

        while True:
            # capture the image from the cam
            img = cv.QueryFrame(self.capture)

            # convert the image to HSV
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            # threshold the image to isolate two colors
            cv.InRangeS(hsv_img, (165, 145, 100), (250, 210, 160), threshold_img1)  # red
            cv.InRangeS(hsv_img, (0, 145, 100), (10, 210, 160), threshold_img1a)  # red again
            cv.Add(threshold_img1, threshold_img1a, threshold_img1)  # this is combining the two limits for red
            cv.InRangeS(hsv_img, (105, 180, 40), (120, 260, 100), threshold_img2)  # blue

            # determine the moments of the two objects
            threshold_img1 = cv.GetMat(threshold_img1)
            threshold_img2 = cv.GetMat(threshold_img2)
            moments1 = cv.Moments(threshold_img1, 0)
            moments2 = cv.Moments(threshold_img2, 0)
            area1 = cv.GetCentralMoment(moments1, 0, 0)
            area2 = cv.GetCentralMoment(moments2, 0, 0)

            # initialize x and y
            x1, y1, x2, y2 = (1, 2, 3, 4)
            coord_list = [x1, y1, x2, y2]
            for x in coord_list:
                x = 0

            # there can be noise in the video so ignore objects with small areas
            if (area1 > 200000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x1 = int(cv.GetSpatialMoment(moments1, 1, 0) / area1)
                y1 = int(cv.GetSpatialMoment(moments1, 0, 1) / area1)

            # draw circle
            cv.Circle(img, (x1, y1), 2, (0, 255, 0), 20)

            # write x and y position
            cv.PutText(img, str(x1) +', '+str(y1), (x1, y1 + 20), font, 255)  # Draw the text

            if (area2 > 100000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x2 = int(cv.GetSpatialMoment(moments2, 1, 0) / area2)
                y2 = int(cv.GetSpatialMoment(moments2, 0, 1) / area2)

                # draw circle
                cv.Circle(img, (x2, y2), 2, (0, 255, 0), 20)

            cv.PutText(img, str(x2) +', '+str(y2), (x2, y2 + 20), font, 255)  # Draw the text
            cv.Line(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv.CV_AA)
            # draw line and angle
            cv.Line(img, (x1, y1), (cv.GetSize(img)[0], y1), (100, 100, 100, 100), 4, cv.CV_AA)
            x1 = float(x1)
            y1 = float(y1)
            x2 = float(x2)
            y2 = float(y2)
            angle = int(math.atan((y1 - y2) / (x2 - x1)) * 180 / math.pi)
            cv.PutText(img, str(angle), (int(x1) + 50, (int(y2) + int(y1)) / 2), font, 255)

            # cv.WriteFrame(writer,img)

            # display frames to users
            cv.ShowImage('Target', img)
            cv.ShowImage('Threshold1', threshold_img1)
            cv.ShowImage('Threshold2', threshold_img2)
            cv.ShowImage('hsv', hsv_img)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            cv.DestroyAllWindows()
Example #13
0
                type=int,
                default=20,
                help="FPS of output video")
ap.add_argument("-c",
                "--codec",
                type=str,
                default="MJPG",
                help="codec of output video")
args = vars(ap.parse_args())

# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
rawCapture = PiRGBArray(camera)

fourcc = cv2.VideoWriter_fourcc(*args["codec"])

cam = cv2.VideoCapture(0)
writer = cv2.VideoWriter("outpy" + st + ".avi", cv2.CV_FOURCC(*'XVID'), 30,
                         (640, 480))

# allow the camera to warmup
time.sleep(0.1)

# grab an image from the camera
camera.capture(rawCapture, format="bgr")
image = rawCapture.array

# display the image on screen and wait for a keypress
cv2.imshow("Image", image)
cv2.waitKey(0)
Example #14
0
def h_264():
	return cv2.CV_FOURCC('H','2','6','4')
#functionality of this file is to crop the main content of the video and NOT trim the video
import numpy as np
import cv2

cap = cv2.VideoCapture(0)

# Define the codec and create VideoWriter object
fourcc = cv2.CV_FOURCC(*'MJPG')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
#out = cv2.VideoWriter('output.avi', -1, 20.0, (640,480))

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        # write the flipped frame
        out.write(frame)

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
Example #16
0
def camera_record(record_type,
                  unique_id,
                  duration_sec=None,
                  tmp_filename=None):
    """
    Record still image from cameras
    :param record_type:
    :param unique_id:
    :param duration_sec:
    :param tmp_filename:
    :return:
    """
    daemon_control = None
    settings = db_retrieve_table_daemon(Camera, unique_id=unique_id)
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    assure_path_exists(PATH_CAMERAS)
    camera_path = assure_path_exists(
        os.path.join(PATH_CAMERAS, '{uid}'.format(uid=settings.unique_id)))
    if record_type == 'photo':
        if settings.path_still:
            save_path = settings.path_still
        else:
            save_path = assure_path_exists(os.path.join(camera_path, 'still'))
        filename = 'Still-{cam_id}-{cam}-{ts}.jpg'.format(
            cam_id=settings.id, cam=settings.name,
            ts=timestamp).replace(" ", "_")
    elif record_type == 'timelapse':
        if settings.path_timelapse:
            save_path = settings.path_timelapse
        else:
            save_path = assure_path_exists(
                os.path.join(camera_path, 'timelapse'))
        start = datetime.datetime.fromtimestamp(
            settings.timelapse_start_time).strftime("%Y-%m-%d_%H-%M-%S")
        filename = 'Timelapse-{cam_id}-{cam}-{st}-img-{cn:05d}.jpg'.format(
            cam_id=settings.id,
            cam=settings.name,
            st=start,
            cn=settings.timelapse_capture_number).replace(" ", "_")
    elif record_type == 'video':
        if settings.path_video:
            save_path = settings.path_video
        else:
            save_path = assure_path_exists(os.path.join(camera_path, 'video'))
        filename = 'Video-{cam}-{ts}.h264'.format(cam=settings.name,
                                                  ts=timestamp).replace(
                                                      " ", "_")
    else:
        return

    assure_path_exists(save_path)

    if tmp_filename:
        filename = tmp_filename

    path_file = os.path.join(save_path, filename)

    # Turn on output, if configured
    output_already_on = False
    output_id = None
    output_channel_id = None
    output_channel = None
    if settings.output_id and ',' in settings.output_id:
        output_id = settings.output_id.split(",")[0]
        output_channel_id = settings.output_id.split(",")[1]
        output_channel = db_retrieve_table_daemon(OutputChannel,
                                                  unique_id=output_channel_id)

    if output_id and output_channel:
        daemon_control = DaemonControl()
        if daemon_control.output_state(
                output_id, output_channel=output_channel.channel) == "on":
            output_already_on = True
        else:
            daemon_control.output_on(output_id,
                                     output_channel=output_channel.channel)

    # Pause while the output remains on for the specified duration.
    # Used for instance to allow fluorescent lights to fully turn on before
    # capturing an image.
    if settings.output_duration:
        time.sleep(settings.output_duration)

    if settings.library == 'picamera':
        import picamera

        # Try 5 times to access the pi camera (in case another process is accessing it)
        for _ in range(5):
            try:
                with picamera.PiCamera() as camera:
                    camera.resolution = (settings.width, settings.height)
                    camera.hflip = settings.hflip
                    camera.vflip = settings.vflip
                    camera.rotation = settings.rotation
                    camera.brightness = int(settings.brightness)
                    camera.contrast = int(settings.contrast)
                    camera.exposure_compensation = int(settings.exposure)
                    camera.saturation = int(settings.saturation)
                    camera.shutter_speed = settings.picamera_shutter_speed
                    camera.sharpness = settings.picamera_sharpness
                    camera.iso = settings.picamera_iso
                    camera.awb_mode = settings.picamera_awb
                    if settings.picamera_awb == 'off':
                        camera.awb_gains = (settings.picamera_awb_gain_red,
                                            settings.picamera_awb_gain_blue)
                    camera.exposure_mode = settings.picamera_exposure_mode
                    camera.meter_mode = settings.picamera_meter_mode
                    camera.image_effect = settings.picamera_image_effect

                    camera.start_preview()
                    time.sleep(2)  # Camera warm-up time

                    if record_type in ['photo', 'timelapse']:
                        camera.capture(path_file, use_video_port=False)
                    elif record_type == 'video':
                        camera.start_recording(path_file,
                                               format='h264',
                                               quality=20)
                        camera.wait_recording(duration_sec)
                        camera.stop_recording()
                    else:
                        return
                    break
            except picamera.exc.PiCameraMMALError:
                logger.error(
                    "The camera is already open by picamera. Retrying 4 times."
                )
            time.sleep(1)

    elif settings.library == 'fswebcam':
        cmd = "/usr/bin/fswebcam --device {dev} --resolution {w}x{h} --set brightness={bt}% " \
              "--no-banner --save {file}".format(dev=settings.device,
                                                 w=settings.width,
                                                 h=settings.height,
                                                 bt=settings.brightness,
                                                 file=path_file)
        if settings.hflip:
            cmd += " --flip h"
        if settings.vflip:
            cmd += " --flip h"
        if settings.rotation:
            cmd += " --rotate {angle}".format(angle=settings.rotation)
        if settings.custom_options:
            cmd += " {}".format(settings.custom_options)

        out, err, status = cmd_output(cmd, stdout_pipe=False, user='******')
        logger.debug("Camera debug message: "
                     "cmd: {}; out: {}; error: {}; status: {}".format(
                         cmd, out, err, status))

    elif settings.library == 'opencv':
        import cv2
        import imutils

        cap = cv2.VideoCapture(settings.opencv_device)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, settings.width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, settings.height)
        cap.set(cv2.CAP_PROP_EXPOSURE, settings.exposure)
        cap.set(cv2.CAP_PROP_GAIN, settings.gain)
        cap.set(cv2.CAP_PROP_BRIGHTNESS, settings.brightness)
        cap.set(cv2.CAP_PROP_CONTRAST, settings.contrast)
        cap.set(cv2.CAP_PROP_HUE, settings.hue)
        cap.set(cv2.CAP_PROP_SATURATION, settings.saturation)

        # Check if image can be read
        status, _ = cap.read()
        if not status:
            logger.error("Cannot detect USB camera with device '{dev}'".format(
                dev=settings.opencv_device))
            return

        # Discard a few frames to allow camera to adjust to settings
        for _ in range(2):
            cap.read()

        if record_type in ['photo', 'timelapse']:
            edited = False
            status, img_orig = cap.read()
            cap.release()

            if not status:
                logger.error("Could not acquire image")
                return

            img_edited = img_orig.copy()

            if any((settings.hflip, settings.vflip, settings.rotation)):
                edited = True

            if settings.hflip and settings.vflip:
                img_edited = cv2.flip(img_orig, -1)
            elif settings.hflip:
                img_edited = cv2.flip(img_orig, 1)
            elif settings.vflip:
                img_edited = cv2.flip(img_orig, 0)

            if settings.rotation:
                img_edited = imutils.rotate_bound(img_orig, settings.rotation)

            if edited:
                cv2.imwrite(path_file, img_edited)
            else:
                cv2.imwrite(path_file, img_orig)

        elif record_type == 'video':
            # TODO: opencv video recording is currently not working. No idea why. Try to fix later.
            try:
                cap = cv2.VideoCapture(settings.opencv_device)
                fourcc = cv2.CV_FOURCC('X', 'V', 'I', 'D')
                resolution = (settings.width, settings.height)
                out = cv2.VideoWriter(path_file, fourcc, 20.0, resolution)

                time_end = time.time() + duration_sec
                while cap.isOpened() and time.time() < time_end:
                    ret, frame = cap.read()
                    if ret:
                        # write the frame
                        out.write(frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
                    else:
                        break
                cap.release()
                out.release()
                cv2.destroyAllWindows()
            except Exception as e:
                logger.exception("Exception raised while recording video: "
                                 "{err}".format(err=e))
        else:
            return

    elif settings.library == 'http_address':
        import cv2
        import imutils
        from urllib.error import HTTPError
        from urllib.parse import urlparse
        from urllib.request import urlretrieve

        if record_type in ['photo', 'timelapse']:
            path_tmp = "/tmp/tmpimg.jpg"

            # Get filename and extension, if available
            a = urlparse(settings.url_still)
            filename = os.path.basename(a.path)
            if filename:
                path_tmp = "/tmp/{}".format(filename)

            try:
                os.remove(path_tmp)
            except FileNotFoundError:
                pass

            try:
                urlretrieve(settings.url_still, path_tmp)
            except HTTPError as err:
                logger.error(err)
            except Exception as err:
                logger.exception(err)

            try:
                img_orig = cv2.imread(path_tmp)

                if img_orig is not None and img_orig.shape is not None:
                    if any(
                        (settings.hflip, settings.vflip, settings.rotation)):
                        img_edited = None
                        if settings.hflip and settings.vflip:
                            img_edited = cv2.flip(img_orig, -1)
                        elif settings.hflip:
                            img_edited = cv2.flip(img_orig, 1)
                        elif settings.vflip:
                            img_edited = cv2.flip(img_orig, 0)

                        if settings.rotation:
                            img_edited = imutils.rotate_bound(
                                img_orig, settings.rotation)

                        if img_edited:
                            cv2.imwrite(path_file, img_edited)
                    else:
                        cv2.imwrite(path_file, img_orig)
                else:
                    os.rename(path_tmp, path_file)
            except Exception as err:
                logger.error(
                    "Could not convert, rotate, or invert image: {}".format(
                        err))
                try:
                    os.rename(path_tmp, path_file)
                except FileNotFoundError:
                    logger.error("Camera image not found")

        elif record_type == 'video':
            pass  # No video (yet)

    elif settings.library == 'http_address_requests':
        import cv2
        import imutils
        import requests

        if record_type in ['photo', 'timelapse']:
            path_tmp = "/tmp/tmpimg.jpg"
            try:
                os.remove(path_tmp)
            except FileNotFoundError:
                pass

            try:
                r = requests.get(settings.url_still)
                if r.status_code == 200:
                    open(path_tmp, 'wb').write(r.content)
                else:
                    logger.error(
                        "Could not download image. Status code: {}".format(
                            r.status_code))
            except requests.HTTPError as err:
                logger.error("HTTPError: {}".format(err))
            except Exception as err:
                logger.exception(err)

            try:
                img_orig = cv2.imread(path_tmp)

                if img_orig is not None and img_orig.shape is not None:
                    if any(
                        (settings.hflip, settings.vflip, settings.rotation)):
                        if settings.hflip and settings.vflip:
                            img_edited = cv2.flip(img_orig, -1)
                        elif settings.hflip:
                            img_edited = cv2.flip(img_orig, 1)
                        elif settings.vflip:
                            img_edited = cv2.flip(img_orig, 0)

                        if settings.rotation:
                            img_edited = imutils.rotate_bound(
                                img_orig, settings.rotation)

                        cv2.imwrite(path_file, img_edited)
                    else:
                        cv2.imwrite(path_file, img_orig)
                else:
                    os.rename(path_tmp, path_file)
            except Exception as err:
                logger.error(
                    "Could not convert, rotate, or invert image: {}".format(
                        err))
                try:
                    os.rename(path_tmp, path_file)
                except FileNotFoundError:
                    logger.error("Camera image not found")

        elif record_type == 'video':
            pass  # No video (yet)

    try:
        set_user_grp(path_file, 'mycodo', 'mycodo')
    except Exception as e:
        logger.exception(
            "Exception raised in 'camera_record' when setting user grp: "
            "{err}".format(err=e))

    # Turn off output, if configured
    if output_id and output_channel and daemon_control and not output_already_on:
        daemon_control.output_off(output_id,
                                  output_channel=output_channel.channel)

    try:
        set_user_grp(path_file, 'mycodo', 'mycodo')
        return save_path, filename
    except Exception as e:
        logger.exception(
            "Exception raised in 'camera_record' when setting user grp: "
            "{err}".format(err=e))
from goprocam import constants
import cv2
import numpy as np
gpCam = GoProCamera.GoPro()
#gpCam.stream("udp://localhost:8081")
#gopro.livestream("start")
#gopro.stream("udp://localhost:5000")
print(gpCam.getMedia())
gpCam.stream("udp://127.0.0.1:10000")

cascPath = "haarcascades_AlexeyAB\haarcascades_GPU\haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#gpCam = GoProCamera.GoPro()
cap = cv2.VideoCapture("udp://127.0.0.1:10000")
font = cv2.FONT_HERSHEY_PLAIN
video = cv2.VideoWriter("tester.avi", cv2.CV_FOURCC('M', 'J', 'P', 'G'), 10.0,
                        (w, h), True)

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame, "This one!", (x + w, y + h), font, 0.8, (0, 255, 0),
                    1, cv2.LINE_AA)
        video.write(frame)
Example #18
0
from tkinter import *
from tkinter import filedialog

from PIL import Image, ImageTk
import matplotlib.pyplot as plt
import matplotlib.image as img

# we will not use a built-in dictionary, but we could
# aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)

# define an empty custom dictionary with
aruco_dict = aruco.custom_dictionary(44, 5, 1)
# add empty bytesList array to fill with 3 markers later
aruco_dict.bytesList = np.empty(shape=(44, 4, 4), dtype=np.uint8)
print(cv2.CV_FOURCC('m', 'p', '4', 'v'))
# add new marker(s)
mybits = np.array([[0, 1, 1, 0, 0], [1, 0, 1, 0, 0], [0, 0, 1, 0, 0],
                   [0, 0, 1, 0, 0], [1, 1, 1, 1, 1]],
                  dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(mybits)
mybits = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [0, 0, 0, 1, 0],
                   [0, 0, 1, 0, 0], [1, 1, 1, 1, 1]],
                  dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(mybits)
mybits = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [0, 0, 1, 1, 0],
                   [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
                  dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(mybits)

mybits = np.array([[0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 0, 1, 0],
Example #19
0
import cv2 as cv
import numpy as np

#从摄像头上实时获取数据
cap = cv.VideoCapture(0)
fourcc = cv.CV_FOURCC(*'XVID')
#opencv3的话用:fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (640, 480))  #保存视频
while True:
    ret, frame = cap.read()
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    out.write(frame)  #写入视频
    cv.imshow('frame', frame)  #一个窗口用以显示原视频
    cv.imshow('gray', gray)  #另一窗口显示处理视频
    #27ASCII是ESC退出键
    if cv.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
out.release()
cv.destroyAllWindows()
except:  # PyQt5
    from PyQt5.QtWidgets import (QWidget, QToolTip, QDesktopWidget,
                                 QSizePolicy, QProgressBar, QInputDialog,
                                 QLineEdit, QCheckBox, QPushButton,
                                 QApplication, QMainWindow, QAction, QTextEdit,
                                 QFileDialog, QComboBox, QLabel, QHBoxLayout,
                                 QVBoxLayout, QGridLayout)
    from PyQt5.QtCore import QCoreApplication, QThread, pyqtSignal, QObject
    from PyQt5.QtGui import QIcon, QFont, QPixmap, QImage
    QString = str
from utils import ensure_dir

# from robotiq_interface import Robotiq

try:  # OpenCV 2
    fourcc = cv2.CV_FOURCC('M', 'J', 'P', 'G')
except:  # OpenCV 3
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')

# robotiq = Robotiq()


class robotiqGUI(QWidget):
    video_controller = pyqtSignal(str)

    def __init__(self):
        super().__init__()

        # rospy.init_node('GUI', anonymous=True)
        self.record_video = False
        self.init_ui()
Example #21
0
def main():
    cap = cv2.VideoCapture('MVI_0069.MOV')

    if int(cv2.__version__[0]) >= 3:  #if opencv 3.0
        fourcc = cv2.VideoWriter_fourcc(*'H264')
    else:
        fourcc = cv2.CV_FOURCC(*'H264')

    VidOut = cv2.VideoWriter('output.mp4', fourcc, 10, (480, 640))

    while (cap.isOpened()):
        # Read next frame
        ret, frame = cap.read()

        if not ret:
            break

        # Show current frame
        cv2.imshow('frame', frame)

        print(frame.shape)
        # split the color channels
        red = frame[:, :, 2]
        green = frame[:, :, 1]
        blue = frame[:, :, 0]

        # Calculate excess red and threshold
        temp = cv2.addWeighted(red, 0.4, green, -0.2, 50)
        exr = cv2.addWeighted(temp, 1, blue, -0.2, 0)

        # Find places with an exr value over 80 (it suits for this video, but probably not yours)
        retval, thrsholdeded = cv2.threshold(exr, 80, 255, cv2.THRESH_BINARY)

        # show the thresholded image
        cv2.imshow('exr', thrsholdeded)

        # Find connected components
        contours, hierarchy = cv2.findContours(thrsholdeded, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        # loop through connected components
        for cnt in contours:
            # Calculate area and circumference
            area = cv2.contourArea(cnt)
            circumference = cv2.arcLength(cnt, True)

            # Do something if area is greated than 1.
            if (area > 1):
                # Determine if the contour looks like a circle
                #
                # <alter these lines>
                # Change the "if-statement", so that it is only true for circles
                if (1 < 2):
                    # </alter these lines>
                    # Calculate the centre of mass
                    M = cv2.moments(cnt)
                    cx = int(M['m10'] / M['m00'])
                    cy = int(M['m01'] / M['m00'])
                    #print(cx, cy)

                    # Draw a cross-hair on the circle
                    # <alter these lines>
                    point1 = (0, 0)
                    point2 = (cx, cy)
                    cv2.line(frame, point1, point2, (255, 0, 0), 4)
                    cv2.circle(frame, (cx, cy), 20, (255, 0, 255), -1)
                    # </alter these lines>

        cv2.imshow('frame annotated', frame)
        VidOut.write(frame)

        if cv2.waitKey(10) & 0xFF == ord('q'):
            break
    VidOut.release()
    return