示例#1
0
def getLinuxCap():
    cap = avconv.AVConverter([
        "-y", "-rtbufsize", "1500M", "-f", "x11grab", "-s", "1920x1080",
        "-draw_mouse", "1", "-i", ":0.0", "-f", "pulse", "-i", "default",
        "-ac", "2", "-c:a", "libvo_aacenc", "-c:v", "libx264", "-qscale",
        "0.1", "/home/caioviel/Desktop/capturedfile.mp4"
    ], True)
    return cap
示例#2
0
def main():
    cap = avconv.AVConverter([
        "-y", "-s", "hd480", "-f", "video4linux2", "-i", "/dev/video0", "-f",
        "alsa", "-i", "hw:1,0", "-ar", "48000", "-ac", "2", "-c:a",
        "libvo_aacenc", "-c:v", "libx264", "-qscale", "1",
        "/home/caioviel/Desktop/capturedfile.mp4"
    ], True)

    cap.start()
    time.sleep(15)
    cap.stop()
示例#3
0
    def start_recording_screen(self):
        import video.avconv as avconv

        arg = ["-y"]

        if IS_LINUX:
            #Video
            arg.append('-f')
            arg.append('x11grab')
            arg.append('-s')
            arg.append(
                str(self.ui.txt_width.text()) + "x" +
                str(self.ui.txt_height.text()))
            arg.append("-draw_mouse")
            arg.append("1")
            if self.ui.rdb_partial.isChecked():
                arg.append("-show_region")
                arg.append("1")
            arg.append('-i')
            arg.append(":0.0+" + str(self.ui.txt_x.text()) + "," +
                       str(self.ui.txt_y.text()))

            #Audio
            audio = self.ui.cmb_screen_audio.currentText()
            if audio != "Nenhum":
                if audio == "Pulse":
                    arg.append("-f")
                    arg.append("pulse")
                    arg.append("-i")
                    arg.append("default")
                else:
                    arg.append('-f')
                    arg.append('alsa')
                    arg.append('-i')
                    arg.append(self.audio_devices[
                        self.ui.cmb_webcam_audio.currentText()])
        else:
            #Video
            arg.append('-f')
            arg.append('gdigrab')
            arg.append('-framerate')
            arg.append("25")
            arg.append('-video_size')
            arg.append(
                str(self.ui.txt_width.text()) + "x" +
                str(self.ui.txt_height.text()))
            arg.append("-draw_mouse")
            arg.append("1")
            if self.ui.rdb_partial.isChecked():
                arg.append("-show_region")
                arg.append("1")
                arg.append("-offset_x")
                arg.append(str(self.ui.txt_x.text()))
                arg.append("-offset_y")
                arg.append(str(self.ui.txt_y.text()))
            arg.append('-i')
            arg.append('desktop')

        #Codec
        arg.append('-c:a')
        arg.append('libvo_aacenc')
        arg.append('-c:v')
        arg.append('libx264')
        arg.append('-qscale')
        arg.append('0.1')
        arg.append('-f')
        arg.append('mp4')

        self.screen_file = os.path.join(self.directory, "videos", "screen.mp4")
        arg.append(self.screen_file)

        self.screenrec = avconv.AVConverter(arg, True)
        self.screenrec.start()
示例#4
0
    def start_recording_webcam(self):
        import video.avconv as avconv
        import cv2

        device_path = self.video_devices[
            self.ui.cmb_webcam_video.currentText()]

        if IS_LINUX:
            device_number = int(device_path[10:])
        else:
            device_number = self.ui.cmb_webcam_video.currentIndex()
        print device_number

        cap = cv2.VideoCapture(device_number)
        width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
        cap.release()

        arg = ["-y"]
        if IS_LINUX:
            #Video
            arg.append('-s')
            arg.append(str(width) + "x" + str(height))
            arg.append('-f')
            arg.append('video4linux2')
            arg.append('-i')
            arg.append(device_path)

            #Audio
            arg.append('-f')
            arg.append('alsa')
            arg.append('-i')
            arg.append(
                self.audio_devices[self.ui.cmb_webcam_audio.currentText()])

            #Codec
            arg.append('-c:a')
            arg.append('libvo_aacenc')
            arg.append('-c:v')
            arg.append('libx264')
            arg.append('-qscale')
            arg.append('0.1')
        else:
            #Video
            #arg.append('-s')
            #arg.append(str(width) + "x" + str(height))
            arg.append('-f')
            arg.append('dshow')
            arg.append('-i')
            #arg.append("video=\"" + device_path + "\":audio=\"" + self.audio_devices[self.ui.cmb_webcam_audio.currentText()] + "\"")
            arg.append(
                "video=" + device_path + ":audio=" +
                self.audio_devices[self.ui.cmb_webcam_audio.currentText()])

            #Codec
            arg.append('-c:a')
            arg.append('libvo_aacenc')
            arg.append('-c:v')
            arg.append('libx264')
            arg.append('-qscale')
            arg.append('0.1')

        self.cam_file = os.path.join(self.directory, "videos", "webcam.mp4")
        arg.append(self.cam_file)

        self.camrec = avconv.AVConverter(arg, True)
        self.camrec.start()
示例#5
0
import transitiondetection
import uuid
import datetime
import json
import os
import video.avconv as avconv

basepath = "/home/caioviel/Desktop/"
project = "MariaAdelina"
videofile = os.path.join(basepath, project, "videos/screen.mp4")
tempvideofile = os.path.join(basepath, project, "videos/temp.mp4")
imagespath = os.path.join(basepath, project, "images")
videoParams = ["-i", videofile, "-qscale", "0", "-y", tempvideofile]
jsonfile = os.path.join(basepath, project, "presente_json.js")

conv = avconv.AVConverter(videoParams, True)
conv.start()
conv.wait_finish()

detector = transitiondetection.TransitionDetector()
duration, pois = detector.detect_transitions(tempvideofile, imagespath)
json_object = \
            {
                "id": str(uuid.uuid4()),
                "autorName": "Maria Adelina Silva Brito",
                "autorEmail": "*****@*****.**",
                "date": datetime.datetime.now().isoformat(),
                "duration": duration,
                "pointsOfInterest": []
            }
示例#6
0
def getWindowsCap():
    cap = avconv.AVConverter([
        "-y", "-f", "gdigrab", "-framerate", "25", "-i", "desktop",
        "C://Users//Caio/Desktop//teste.mp4"
    ], True)
    return cap