Exemplo n.º 1
0
def get_checkerboarding_percents(capture):
    try:
        cache = pickle.load(open(capture.cache_filename, 'r'))
    except:
        cache = {}

    try:
        percents = cache['checkerboard_percents']
    except:
        percents = []
        for i in range(1, capture.num_frames + 1):
            frame = capture.get_frame(i, type=numpy.int16)
            percent = 0.0
            checkerboard_box = square.get_biggest_square((255, 0, 255), frame)
            if checkerboard_box:
                checkerboard_size = (checkerboard_box[2] - checkerboard_box[0]
                                     ) * (checkerboard_box[3] -
                                          checkerboard_box[1])
                percent = float(checkerboard_size) / (capture.dimensions[0] *
                                                      capture.dimensions[1])
            percents.append(percent)
        cache['checkerboard_percents'] = percents
        pickle.dump(cache, open(capture.cache_filename, 'w'))

    return percents
Exemplo n.º 2
0
def get_checkerboard_image(capture, framenum):

    frame = capture.get_frame(framenum, type=numpy.int16)
    checkerboard_box = square.get_biggest_square((255,0,255), frame)

    dimensions = capture.dimensions
    imgarray = 0xFF000000 * numpy.ones((dimensions[0], dimensions[1]), dtype=numpy.uint32)
    imgarray.shape = dimensions[1],dimensions[0]

    if checkerboard_box:
        imgarray[checkerboard_box[1]:checkerboard_box[3],checkerboard_box[0]:checkerboard_box[2]] = 0xFF0000FF

    return Image.frombuffer('RGBA',(dimensions[0],dimensions[1]),imgarray,'raw','RGBA',0,1)
Exemplo n.º 3
0
def get_checkerboard_image(capture, framenum):

    frame = capture.get_frame(framenum, type=numpy.int16)
    checkerboard_box = square.get_biggest_square((255, 0, 255), frame)

    dimensions = capture.dimensions
    imgarray = 0xFF000000 * numpy.ones((dimensions[0], dimensions[1]),
                                       dtype=numpy.uint32)
    imgarray.shape = dimensions[1], dimensions[0]

    if checkerboard_box:
        imgarray[checkerboard_box[1]:checkerboard_box[3],
                 checkerboard_box[0]:checkerboard_box[2]] = 0xFF0000FF

    return Image.frombuffer('RGBA', (dimensions[0], dimensions[1]), imgarray,
                            'raw', 'RGBA', 0, 1)
Exemplo n.º 4
0
def get_checkerboarding_percents(capture):
    try:
        cache = pickle.load(open(capture.cache_filename, 'r'))
    except:
        cache = {}

    try:
        percents = cache['checkerboard_percents']
    except:
        percents = []
        for i in range(1, capture.num_frames + 1):
            frame = capture.get_frame(i, type=numpy.int16)
            percent = 0.0
            checkerboard_box = square.get_biggest_square((255, 0, 255), frame)
            if checkerboard_box:
                checkerboard_size = (checkerboard_box[2] - checkerboard_box[0]) * (checkerboard_box[3] - checkerboard_box[1])
                percent = float(checkerboard_size) / (capture.dimensions[0] * capture.dimensions[1])
            percents.append(percent)
        cache['checkerboard_percents'] = percents
        pickle.dump(cache, open(capture.cache_filename, 'w'))

    return percents
Exemplo n.º 5
0
    def convert_capture(self, start_frame, end_frame, create_webm=True):
        self.log("Converting capture...")
        # wait for capture to finish if it has not already
        if self.capturing:
            self.log("Capture not finished... waiting")
            while self.capturing:
                time.sleep(0.5)

        if self.capture_device == "decklink":
            subprocess.Popen((os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                              self.output_raw_file.name, self.outputdir, self.mode),
                             close_fds=True).wait()

        self.log("Gathering capture dimensions and cropping to start/end of capture...")
        imagefiles = [os.path.join(self.outputdir, path) for path in
                      sorted(os.listdir(self.outputdir), key=_natural_key)]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0,0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # searching for start/end frames and capture dimensions only really
        # makes sense on the decklink cards, which have a clean HDMI signal.
        # input from things like the pointgrey cameras is too noisy...
        if self.capture_device == "decklink":
            # start frame
            if self.find_start_signal:
                self.log("Searching for start of capture signal ...")
                squares = []
                for (i, imagefile) in enumerate(imagefiles):
                    imgarray = numpy.array(Image.open(imagefile), dtype=numpy.int16)
                    squares.append(get_biggest_square((0,255,0), imgarray))
                    if i > 1 and not squares[-1] and squares[-2]:
                        if not start_frame:
                            start_frame = i
                        self.capture_area = squares[-2]
                        self.log("Found start capture signal at frame %s. Area: %s" %
                                 (i, self.capture_area))
                        break

            # end frame
            if self.find_end_signal:
                self.log("Searching for end of capture signal ...")
                squares = []
                for i in range(num_frames-1, 0, -1):
                    imgarray = numpy.array(Image.open(imagefiles[i]), dtype=numpy.int16)
                    squares.append(get_biggest_square((255,0,0), imgarray))

                    if len(squares) > 1 and not squares[-1] and squares[-2]:
                        if not end_frame:
                            end_frame = (i-1)
                        if not self.capture_area:
                            self.capture_area = squares[-2]
                        self.log("Found end capture signal at frame %s. Area: %s" %
                                 (i-1, self.capture_area))
                        break

        # If we don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1
        # Don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames

        self.log("Rewriting images in %s..." % self.outputdir)
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        def _rewrite_frame(framenum, dirname, imagefilename):
            im = Image.open(imagefilename)
            if self.capture_area:
                im = im.crop(self.capture_area)
            # pointgrey needs a median filter because it's so noisy
            if self.capture_device == "pointgrey":
                im = im.filter(ImageFilter.MedianFilter())
            im = im.convert("RGB")
            im.save(os.path.join(dirname, '%s.png' % framenum))

        # map the frame before the start frame to the zeroth frame (if possible)
        if start_frame > 1:
            _rewrite_frame(0, rewritten_imagedir, imagefiles[start_frame-1])
        else:
            # HACK: otherwise, create a copy of the start frame
            # (this duplicates a frame)
            _rewrite_frame(0, rewritten_imagedir, imagefiles[0])
        # last frame is the specified end frame or the first red frame if
        # no last frame specified, or the very last frame in the
        # sequence if there is no red frame and no specified last frame
        last_frame = min(num_frames-1, end_frame+2)

        # copy the remaining frames into numeric order starting from 1
        # (use multiprocessing to speed this up: there's probably a more
        # elegant way of doing this, but I'm not sure what it is)
        multiprocesses = []
        for (i,j) in enumerate(range(start_frame, last_frame)):
            p = multiprocessing.Process(target=_rewrite_frame, args=((i+1), rewritten_imagedir, imagefiles[j]))
            p.start()
            multiprocesses.append(p)
           # _rewrite_frame((i+1), rewritten_imagedir, imagefiles[j])
        for p in multiprocesses:
            p.join()


        capturefps = self.fps
        if not capturefps:
            capturefps = 60

        if create_webm:
            self.log("Creating movie ...")

            moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                    suffix=".webm")
            subprocess.Popen(('ffmpeg', '-y', '-r', str(capturefps), '-i',
                              os.path.join(rewritten_imagedir, '%d.png'),
                              moviefile.name), close_fds=True).wait()

        self.log("Writing final capture '%s'..." % self.output_filename)
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr('metadata.json',
                         json.dumps(dict({ 'captureDevice': self.capture_device,
                                           'date': self.capture_time.isoformat(),
                                           'frameDimensions': frame_dimensions,
                                           'fps': capturefps,
                                           'version': 1 },
                                         **self.capture_metadata)))

        if create_webm:
            zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr("images/%s" % imagefilename,
                             open(os.path.join(rewritten_imagedir,
                                               imagefilename)).read())

        zipfile.close()

        shutil.rmtree(self.outputdir)
        shutil.rmtree(rewritten_imagedir)

        self.output_filename = None
        self.outputdir = None
        self.output_raw_file = None
Exemplo n.º 6
0
    def convert_capture(self, start_frame, end_frame):
        imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        subprocess.Popen((os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                          self.output_raw_file.name, imagedir, self.mode),
                         close_fds=True).wait()

        print "Gathering capture dimensions and cropping to start/end of capture..."
        imagefiles = [os.path.join(imagedir, path) for path in sorted(os.listdir(imagedir),
                                                                     key=_natural_key)]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0,0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # start frame
        print "Searching for start of capture signal ..."
        squares = []
        capture_area = None
        for (i, imagefile) in enumerate(imagefiles):
            imgarray = numpy.array(Image.open(imagefile), dtype=numpy.int16)
            squares.append(get_biggest_square((0,255,0), imgarray))

            if i > 1 and not squares[-1] and squares[-2]:
                if not start_frame:
                    start_frame = i
                capture_area = squares[-2]
                break
        # If we still don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1

        # end frame
        print "Searching for end of capture signal ..."
        squares = []
        for i in range(num_frames-1, 0, -1):
            imgarray = numpy.array(Image.open(imagefiles[i]), dtype=numpy.int16)
            squares.append(get_biggest_square((255,0,0), imgarray))

            if len(squares) > 1 and not squares[-1] and squares[-2]:
                if not end_frame:
                    end_frame = (i-1)
                if not capture_area:
                    capture_area = squares[-2]
                break

        # still don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames


        print "Rewriting images ..."
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        def _rewrite_frame(framenum, dirname, imagefilename):
            im = Image.open(imagefilename)
            if capture_area:
                im = im.crop(capture_area)
            im.save(os.path.join(dirname, '%s.png' % framenum))

        # map the frame before the start frame to the zeroth frame (if possible)
        if start_frame > 1:
            _rewrite_frame(0, rewritten_imagedir, imagefiles[start_frame-1])
        else:
            # HACK: otherwise, create a copy of the start frame
            # (this duplicates a frame)
            _rewrite_frame(0, rewritten_imagedir, imagefiles[0])
        # last frame is the specified end frame or the first red frame if
        # no last frame specified, or the very last frame in the
        # sequence if there is no red frame and no specified last frame
        last_frame = min(num_frames-1, end_frame+2)

        # copy the remaining frames into numeric order starting from 1
        # (use multiprocessing to speed this up: there's probably a more
        # elegant way of doing this, but I'm not sure what it is)
        multiprocesses = []
        for (i,j) in enumerate(range(start_frame, last_frame)):
            p = multiprocessing.Process(target=_rewrite_frame, args=((i+1), rewritten_imagedir, imagefiles[j]))
            p.start()
            multiprocesses.append(p)
           # _rewrite_frame((i+1), rewritten_imagedir, imagefiles[j])
        for p in multiprocesses:
            p.join()

        print "Creating movie ..."
        moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                suffix=".webm")
        subprocess.Popen(('ffmpeg', '-y', '-r', '60', '-i',
                          os.path.join(rewritten_imagedir, '%d.png'),
                          moviefile.name), close_fds=True).wait()

        print "Writing final capture '%s'..." % self.output_filename
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr('metadata.json',
                         json.dumps(dict({ 'date': self.capture_time.isoformat(),
                                           'frameDimensions': frame_dimensions,
                                           'version': 1 },
                                         **self.capture_metadata)))

        zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr("images/%s" % imagefilename,
                             open(os.path.join(rewritten_imagedir,
                                               imagefilename)).read())

        zipfile.close()

        shutil.rmtree(imagedir)
        shutil.rmtree(rewritten_imagedir)

        self.output_filename = None
        self.output_raw_file = None
Exemplo n.º 7
0
    def convert_capture(self, start_frame, end_frame, create_webm=True):
        self.logger.info("Converting capture...")
        # wait for capture to finish if it has not already
        if self.capturing:
            self.logger.info("Capture not finished... waiting")
            while self.capturing:
                time.sleep(0.5)

        if self.capture_device == "decklink":
            subprocess.Popen(
                (os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                 self.output_raw_file.name, self.outputdir, self.mode),
                close_fds=True).wait()

        self.logger.info("Gathering capture dimensions and cropping to "
                         "start/end of capture...")
        imagefiles = [
            os.path.join(self.outputdir, path)
            for path in sorted(os.listdir(self.outputdir), key=_natural_key)
        ]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0, 0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # searching for start/end frames and capture dimensions only really
        # makes sense on the decklink cards, which have a clean HDMI signal.
        # input from things like the pointgrey cameras is too noisy...
        if self.capture_device == "decklink":
            # start frame
            if self.find_start_signal:
                self.logger.info("Searching for start of capture signal ...")
                squares = []
                for (i, imagefile) in enumerate(imagefiles):
                    imgarray = numpy.array(Image.open(imagefile),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((0, 255, 0), imgarray))
                    if i > 1 and not squares[-1] and squares[-2]:
                        if not start_frame:
                            start_frame = i
                        self.capture_area = squares[-2]
                        self.logger.info("Found start capture signal at frame "
                                         "%s. Area: %s" %
                                         (i, self.capture_area))
                        break

            # end frame
            if self.find_end_signal:
                self.logger.info("Searching for end of capture signal ...")
                squares = []
                for i in range(num_frames - 1, 0, -1):
                    imgarray = numpy.array(Image.open(imagefiles[i]),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((255, 0, 0), imgarray))

                    if len(squares) > 1 and not squares[-1] and squares[-2]:
                        if not end_frame:
                            end_frame = (i - 1)
                        if not self.capture_area:
                            self.capture_area = squares[-2]
                        self.logger.info("Found end capture signal at frame "
                                         "%s. Area: %s" %
                                         (i - 1, self.capture_area))
                        break

        # If we don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1
        # Don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames

        self.logger.info("Rewriting images in %s..." % self.outputdir)
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        with concurrent.futures.ProcessPoolExecutor() as executor:
            # map the frame before the start frame to the zeroth frame (if
            # possible). HACK: otherwise, create a copy of the start
            # frame (this duplicates a frame).
            remapped_frame = 0
            if start_frame > 1:
                remapped_frame = start_frame - 1
            executor.submit(_rewrite_frame, 0, rewritten_imagedir,
                            imagefiles[remapped_frame], self.capture_area,
                            self.capture_device)

            # last frame is the specified end frame or the first red frame if
            # no last frame specified, or the very last frame in the
            # sequence if there is no red frame and no specified last frame
            last_frame = min(num_frames - 1, end_frame + 2)

            # copy the remaining frames into numeric order starting from 1
            for (i, j) in enumerate(range(start_frame, last_frame)):
                executor.submit(_rewrite_frame, (i + 1), rewritten_imagedir,
                                imagefiles[j], self.capture_area,
                                self.capture_device)

        capturefps = self.fps
        if not capturefps:
            capturefps = 60
        generated_video_fps = capturefps
        if generated_video_fps > MAX_VIDEO_FPS:
            generated_video_fps = MAX_VIDEO_FPS

        if create_webm:
            self.logger.info("Creating movie ...")

            moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                    suffix=".webm")
            # png2yuv is broken on Ubuntu 12.04 and earlier, so we can't use
            # vpxenc there by default
            if self.use_vpxenc:
                with tempfile.NamedTemporaryFile(
                        dir=self.custom_tempdir) as yuvfile:
                    yuvconv = subprocess.Popen(
                        ('png2yuv', '-I', 'p', '-f', str(capturefps), '-n',
                         str(last_frame - start_frame), '-j',
                         '%s/%%d.png' % rewritten_imagedir),
                        stdout=subprocess.PIPE)
                    while yuvconv.poll() == None:
                        yuvfile.write(yuvconv.stdout.read())
                    yuvfile.write(yuvconv.stdout.read())
                    yuvfile.flush()

                    subprocess.Popen(
                        ('vpxenc', '--good', '--cpu-used=0', '--end-usage=vbr',
                         '--passes=2',
                         '--threads=%s' % (multiprocessing.cpu_count() - 1),
                         '--target-bitrate=%s' % DEFAULT_WEBM_BIT_RATE, '-o',
                         moviefile.name, yuvfile.name)).wait()
            else:
                subprocess.Popen(
                    ('avconv', '-y', '-r', str(generated_video_fps), '-i',
                     os.path.join(rewritten_imagedir,
                                  '%d.png'), moviefile.name),
                    close_fds=True).wait()

        self.logger.info("Writing final capture '%s'..." %
                         self.output_filename)
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr(
            'metadata.json',
            json.dumps(
                dict(
                    {
                        'captureDevice': self.capture_device,
                        'date': datetime.datetime.now().isoformat(),
                        'frameDimensions': frame_dimensions,
                        'fps': capturefps,
                        'generatedVideoFPS': generated_video_fps,
                        'version': 1
                    }, **self.capture_metadata)))
        if create_webm:
            zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr(
                "images/%s" % imagefilename,
                open(os.path.join(rewritten_imagedir, imagefilename)).read())

        zipfile.close()
        self.logger.info("Wrote out final capture.")

        shutil.rmtree(self.outputdir)
        shutil.rmtree(rewritten_imagedir)
        if self.output_raw_file:
            # closing the file should delete it
            self.output_raw_file.close()
            self.output_raw_file = None
Exemplo n.º 8
0
    def convert_capture(self, start_frame, end_frame, create_webm=True):
        self.log("Converting capture...")
        # wait for capture to finish if it has not already
        if self.capturing:
            self.log("Capture not finished... waiting")
            while self.capturing:
                time.sleep(0.5)

        if self.capture_device == "decklink":
            subprocess.Popen((
                os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                self.output_raw_file.name, self.outputdir, self.mode),
                close_fds=True).wait()

        self.log("Gathering capture dimensions and cropping to start/end of "
                 "capture...")
        imagefiles = [os.path.join(self.outputdir, path) for path in
                      sorted(os.listdir(self.outputdir), key=_natural_key)]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0, 0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # searching for start/end frames and capture dimensions only really
        # makes sense on the decklink cards, which have a clean HDMI signal.
        # input from things like the pointgrey cameras is too noisy...
        if self.capture_device == "decklink":
            # start frame
            if self.find_start_signal:
                self.log("Searching for start of capture signal ...")
                squares = []
                for (i, imagefile) in enumerate(imagefiles):
                    imgarray = numpy.array(Image.open(imagefile),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((0, 255, 0), imgarray))
                    if i > 1 and not squares[-1] and squares[-2]:
                        if not start_frame:
                            start_frame = i
                        self.capture_area = squares[-2]
                        self.log("Found start capture signal at frame %s. "
                                 "Area: %s" % (i, self.capture_area))
                        break

            # end frame
            if self.find_end_signal:
                self.log("Searching for end of capture signal ...")
                squares = []
                for i in range(num_frames - 1, 0, -1):
                    imgarray = numpy.array(Image.open(imagefiles[i]),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((255, 0, 0), imgarray))

                    if len(squares) > 1 and not squares[-1] and squares[-2]:
                        if not end_frame:
                            end_frame = (i - 1)
                        if not self.capture_area:
                            self.capture_area = squares[-2]
                        self.log("Found end capture signal at frame %s. Area: "
                                 "%s" % (i - 1, self.capture_area))
                        break

        # If we don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1
        # Don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames

        self.log("Rewriting images in %s..." % self.outputdir)
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        pool = multiprocessing.Pool()

        # map the frame before the start frame to the zeroth frame (if
        # possible). HACK: otherwise, create a copy of the start
        # frame (this duplicates a frame).
        remapped_frame = 0
        if start_frame > 1:
            remapped_frame = start_frame - 1
        pool.apply_async(_rewrite_frame,
                         [0, rewritten_imagedir, imagefiles[remapped_frame],
                          self.capture_area, self.capture_device])

        # last frame is the specified end frame or the first red frame if
        # no last frame specified, or the very last frame in the
        # sequence if there is no red frame and no specified last frame
        last_frame = min(num_frames - 1, end_frame + 2)

        # copy the remaining frames into numeric order starting from 1
        for (i, j) in enumerate(range(start_frame, last_frame)):
            pool.apply_async(_rewrite_frame, [(i + 1),
                             rewritten_imagedir, imagefiles[j],
                             self.capture_area, self.capture_device])

        # wait for the rewriting of the images to complete
        pool.close()
        pool.join()

        capturefps = self.fps
        if not capturefps:
            capturefps = 60
        generated_video_fps = capturefps
        if generated_video_fps > MAX_VIDEO_FPS:
            generated_video_fps = MAX_VIDEO_FPS

        if create_webm:
            self.log("Creating movie ...")

            moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                    suffix=".webm")
            # png2yuv is broken on Ubuntu 12.04 and earlier, so we can't use
            # vpxenc there by default
            if self.use_vpxenc:
                with tempfile.NamedTemporaryFile(dir=self.custom_tempdir) as yuvfile:
                    yuvconv = subprocess.Popen(('png2yuv', '-I',  'p', '-f',
                                                str(capturefps), '-n',
                                                str(last_frame-start_frame), '-j',
                                                '%s/%%d.png' % rewritten_imagedir),
                                               stdout=subprocess.PIPE)
                    while yuvconv.poll() == None:
                        yuvfile.write(yuvconv.stdout.read())
                    yuvfile.write(yuvconv.stdout.read())
                    yuvfile.flush()

                    subprocess.Popen(('vpxenc', '--good', '--cpu-used=0',
                                      '--end-usage=vbr', '--passes=2',
                                      '--threads=%s' % (multiprocessing.cpu_count() - 1),
                                      '--target-bitrate=%s' % DEFAULT_WEBM_BIT_RATE,
                                      '-o', moviefile.name, yuvfile.name)).wait()
            else:
                subprocess.Popen(('avconv', '-y', '-r', str(generated_video_fps), '-i',
                                  os.path.join(rewritten_imagedir, '%d.png'),
                                  moviefile.name), close_fds=True).wait()


        self.log("Writing final capture '%s'..." % self.output_filename)
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr('metadata.json',
                         json.dumps(dict({ 'captureDevice': self.capture_device,
                                           'date': self.capture_time.isoformat(),
                                           'frameDimensions': frame_dimensions,
                                           'fps': capturefps,
                                           'generatedVideoFPS': generated_video_fps,
                                           'version': 1 },
                                         **self.capture_metadata)))
        if create_webm:
            zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr("images/%s" % imagefilename,
                             open(os.path.join(rewritten_imagedir,
                                               imagefilename)).read())

        zipfile.close()

        shutil.rmtree(self.outputdir)
        shutil.rmtree(rewritten_imagedir)

        self.output_filename = None
        self.outputdir = None
        self.output_raw_file = None
Exemplo n.º 9
0
    def convert_capture(self, start_frame, end_frame, create_webm=True):
        self.log("Converting capture...")
        # wait for capture to finish if it has not already
        if self.capturing:
            self.log("Capture not finished... waiting")
            while self.capturing:
                time.sleep(0.5)

        if self.capture_device == "decklink":
            subprocess.Popen(
                (os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                 self.output_raw_file.name, self.outputdir, self.mode),
                close_fds=True).wait()

        self.log("Gathering capture dimensions and cropping to start/end of "
                 "capture...")
        imagefiles = [
            os.path.join(self.outputdir, path)
            for path in sorted(os.listdir(self.outputdir), key=_natural_key)
        ]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0, 0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # searching for start/end frames and capture dimensions only really
        # makes sense on the decklink cards, which have a clean HDMI signal.
        # input from things like the pointgrey cameras is too noisy...
        if self.capture_device == "decklink":
            # start frame
            if self.find_start_signal:
                self.log("Searching for start of capture signal ...")
                squares = []
                for (i, imagefile) in enumerate(imagefiles):
                    imgarray = numpy.array(Image.open(imagefile),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((0, 255, 0), imgarray))
                    if i > 1 and not squares[-1] and squares[-2]:
                        if not start_frame:
                            start_frame = i
                        self.capture_area = squares[-2]
                        self.log("Found start capture signal at frame %s. "
                                 "Area: %s" % (i, self.capture_area))
                        break

            # end frame
            if self.find_end_signal:
                self.log("Searching for end of capture signal ...")
                squares = []
                for i in range(num_frames - 1, 0, -1):
                    imgarray = numpy.array(Image.open(imagefiles[i]),
                                           dtype=numpy.int16)
                    squares.append(get_biggest_square((255, 0, 0), imgarray))

                    if len(squares) > 1 and not squares[-1] and squares[-2]:
                        if not end_frame:
                            end_frame = (i - 1)
                        if not self.capture_area:
                            self.capture_area = squares[-2]
                        self.log("Found end capture signal at frame %s. Area: "
                                 "%s" % (i - 1, self.capture_area))
                        break

        # If we don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1
        # Don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames

        self.log("Rewriting images in %s..." % self.outputdir)
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        def _rewrite_frame(framenum, dirname, imagefilename):
            im = Image.open(imagefilename)
            if self.capture_area:
                im = im.crop(self.capture_area)
            # pointgrey needs a median filter because it's so noisy
            if self.capture_device == "pointgrey":
                im = im.filter(ImageFilter.MedianFilter())
            im = im.convert("RGB")
            im.save(os.path.join(dirname, '%s.png' % framenum))

        # map the frame before the start frame to the zeroth
        # frame (if possible)
        if start_frame > 1:
            _rewrite_frame(0, rewritten_imagedir, imagefiles[start_frame - 1])
        else:
            # HACK: otherwise, create a copy of the start frame
            # (this duplicates a frame)
            _rewrite_frame(0, rewritten_imagedir, imagefiles[0])
        # last frame is the specified end frame or the first red frame if
        # no last frame specified, or the very last frame in the
        # sequence if there is no red frame and no specified last frame
        last_frame = min(num_frames - 1, end_frame + 2)

        # copy the remaining frames into numeric order starting from 1
        # (use multiprocessing to speed this up: there's probably a more
        # elegant way of doing this, but I'm not sure what it is)
        multiprocesses = []
        for (i, j) in enumerate(range(start_frame, last_frame)):
            p = multiprocessing.Process(target=_rewrite_frame,
                                        args=((i + 1), rewritten_imagedir,
                                              imagefiles[j]))
            p.start()
            multiprocesses.append(p)
        # _rewrite_frame((i+1), rewritten_imagedir, imagefiles[j])
        for p in multiprocesses:
            p.join()

        capturefps = self.fps
        if not capturefps:
            capturefps = 60
        generated_video_fps = capturefps
        if generated_video_fps > MAX_VIDEO_FPS:
            generated_video_fps = MAX_VIDEO_FPS

        if create_webm:
            self.log("Creating movie ...")

            moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                    suffix=".webm")
            subprocess.Popen(
                ('ffmpeg', '-y', '-r', str(generated_video_fps), '-i',
                 os.path.join(rewritten_imagedir, '%d.png'), moviefile.name),
                close_fds=True).wait()

        self.log("Writing final capture '%s'..." % self.output_filename)
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr(
            'metadata.json',
            json.dumps(
                dict(
                    {
                        'captureDevice': self.capture_device,
                        'date': self.capture_time.isoformat(),
                        'frameDimensions': frame_dimensions,
                        'fps': capturefps,
                        'generatedVideoFPS': generated_video_fps,
                        'version': 1
                    }, **self.capture_metadata)))
        if create_webm:
            zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr(
                "images/%s" % imagefilename,
                open(os.path.join(rewritten_imagedir, imagefilename)).read())

        zipfile.close()

        shutil.rmtree(self.outputdir)
        shutil.rmtree(rewritten_imagedir)

        self.output_filename = None
        self.outputdir = None
        self.output_raw_file = None
Exemplo n.º 10
0
    def convert_capture(self, start_frame, end_frame):
        imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        subprocess.Popen((os.path.join(DECKLINK_DIR, 'decklink-convert.sh'),
                          self.output_raw_file.name, imagedir, self.mode),
                         close_fds=True).wait()

        print "Gathering capture dimensions and cropping to start/end of capture..."
        imagefiles = [
            os.path.join(imagedir, path)
            for path in sorted(os.listdir(imagedir), key=_natural_key)
        ]
        num_frames = len(imagefiles)

        # full image dimensions
        frame_dimensions = (0, 0)
        if num_frames > 0:
            im = Image.open(imagefiles[0])
            frame_dimensions = im.size

        # start frame
        print "Searching for start of capture signal ..."
        squares = []
        capture_area = None
        for (i, imagefile) in enumerate(imagefiles):
            imgarray = numpy.array(Image.open(imagefile), dtype=numpy.int16)
            squares.append(get_biggest_square((0, 255, 0), imgarray))

            if i > 1 and not squares[-1] and squares[-2]:
                if not start_frame:
                    start_frame = i
                capture_area = squares[-2]
                break
        # If we still don't have a start frame, set it to 1
        if not start_frame:
            start_frame = 1

        # end frame
        print "Searching for end of capture signal ..."
        squares = []
        for i in range(num_frames - 1, 0, -1):
            imgarray = numpy.array(Image.open(imagefiles[i]),
                                   dtype=numpy.int16)
            squares.append(get_biggest_square((255, 0, 0), imgarray))

            if len(squares) > 1 and not squares[-1] and squares[-2]:
                if not end_frame:
                    end_frame = (i - 1)
                if not capture_area:
                    capture_area = squares[-2]
                break

        # still don't have an end frame? make it the entire length of the
        # capture
        if not end_frame:
            end_frame = num_frames

        print "Rewriting images ..."
        rewritten_imagedir = tempfile.mkdtemp(dir=self.custom_tempdir)

        def _rewrite_frame(framenum, dirname, imagefilename):
            im = Image.open(imagefilename)
            if capture_area:
                im = im.crop(capture_area)
            im.save(os.path.join(dirname, '%s.png' % framenum))

        # map the frame before the start frame to the zeroth frame (if possible)
        if start_frame > 1:
            _rewrite_frame(0, rewritten_imagedir, imagefiles[start_frame - 1])
        else:
            # HACK: otherwise, create a copy of the start frame
            # (this duplicates a frame)
            _rewrite_frame(0, rewritten_imagedir, imagefiles[0])
        # last frame is the specified end frame or the first red frame if
        # no last frame specified, or the very last frame in the
        # sequence if there is no red frame and no specified last frame
        last_frame = min(num_frames - 1, end_frame + 2)

        # copy the remaining frames into numeric order starting from 1
        # (use multiprocessing to speed this up: there's probably a more
        # elegant way of doing this, but I'm not sure what it is)
        multiprocesses = []
        for (i, j) in enumerate(range(start_frame, last_frame)):
            p = multiprocessing.Process(target=_rewrite_frame,
                                        args=((i + 1), rewritten_imagedir,
                                              imagefiles[j]))
            p.start()
            multiprocesses.append(p)
        # _rewrite_frame((i+1), rewritten_imagedir, imagefiles[j])
        for p in multiprocesses:
            p.join()

        print "Creating movie ..."
        moviefile = tempfile.NamedTemporaryFile(dir=self.custom_tempdir,
                                                suffix=".webm")
        subprocess.Popen(
            ('ffmpeg', '-y', '-r', '60', '-i',
             os.path.join(rewritten_imagedir, '%d.png'), moviefile.name),
            close_fds=True).wait()

        print "Writing final capture '%s'..." % self.output_filename
        zipfile = ZipFile(self.output_filename, 'a')

        zipfile.writestr(
            'metadata.json',
            json.dumps(
                dict(
                    {
                        'date': self.capture_time.isoformat(),
                        'frameDimensions': frame_dimensions,
                        'version': 1
                    }, **self.capture_metadata)))

        zipfile.writestr('movie.webm', moviefile.read())

        for imagefilename in os.listdir(rewritten_imagedir):
            zipfile.writestr(
                "images/%s" % imagefilename,
                open(os.path.join(rewritten_imagedir, imagefilename)).read())

        zipfile.close()

        shutil.rmtree(imagedir)
        shutil.rmtree(rewritten_imagedir)

        self.output_filename = None
        self.output_raw_file = None