Esempio n. 1
0
def posterize(files, posterfile=None, background_color="black", margin=5):
    min_h = max_height(files)
    min_w = max_width(files)
    util.logger.debug("Max W x H = %d x %d", min_w, min_h)
    gap = (min_w * margin) // 100

    nb_files = len(files)
    root = math.sqrt(nb_files)
    rows = int(round(root))
    if rows < root:
        rows += 1
    cols = (nb_files + rows-1) // rows

    full_w = (cols*min_w) + (cols+1)*gap
    full_h = (rows*min_h) + (rows+1)*gap

    util.logger.debug("W x H = %d x %d / Gap = %d / c,r = %d, %d => Full W x H = %d x %d",
                      min_w, min_h, gap, cols, rows, full_w, full_h)
    bgfile = "white-square.jpg" if background_color == "white" else "black-square.jpg"
    tmpbg = "bg.tmp.jpg"
    rescale(bgfile, full_w, full_h, tmpbg)

    file_list = util.build_ffmpeg_file_list(files)

    cmplx = util.build_ffmpeg_complex_prep(files)
    cmplx = cmplx + __build_poster_fcomplex(rows, cols, gap, min_w, min_h, len(files))

    posterfile = util.automatic_output_file_name(posterfile, files[0], "poster")
    util.run_ffmpeg('-i "%s" %s -filter_complex "%s" "%s"' % (tmpbg, file_list, cmplx, posterfile))
    util.logger.info("Generated %s", posterfile)
    util.delete_files(tmpbg)
    return posterfile
Esempio n. 2
0
    def crop(self, width, height, top, left, out_file, **kwargs):
        ''' Applies crop video filter for width x height pixels '''

        if width is str:
            width = int(width)
        if height is str:
            height = int(height)
        i_bitrate = self.get_video_bitrate()
        i_w, i_h = self.get_dimensions()
        media_opts = self.get_properties()
        media_opts[opt.media.ACODEC] = 'copy'
        # Target bitrate proportional to crop level (+ 20%)
        media_opts[opt.media.VBITRATE] = int(
            int(i_bitrate) * (width * height) / (int(i_w) * int(i_h)) * 1.2)

        media_opts.update(util.cleanup_options(kwargs))
        util.logger.info("Cmd line settings = %s", str(media_opts))
        out_file = util.automatic_output_file_name(out_file, self.filename, \
            "crop_{0}x{1}-{2}x{3}".format(width, height, top, left))
        aspect = __get_aspect_ratio__(width, height, **kwargs)


        cmd = '-i "%s" %s %s -aspect %s "%s"' % (self.filename, \
            media.build_ffmpeg_options(media_opts), media.get_crop_filter_options(width, height, top, left), \
            aspect, out_file)
        util.run_ffmpeg(cmd)
        return out_file
Esempio n. 3
0
    def shake_horizontal(self, nbr_slices = 10 , shake_pct = 3, background_color = "black", out_file = None):
        w, h = self.get_dimensions()
        w_jitter = w * shake_pct // 100
        slice_height = max(h // nbr_slices, 16)
        slices = self.slice_horizontal(nbr_slices)
        tmpbg = get_rectangle(background_color, w + w_jitter, slice_height * len(slices))
        filelist = util.build_ffmpeg_file_list(slices) + ' -i "%s"' % tmpbg
        cmplx = util.build_ffmpeg_complex_prep(slices)

        step = 0
        n_slices = len(slices)
        cmplx = cmplx + "[%d][pip0]overlay=0:0[step%d]; " % (n_slices, step)
        first_slice = slices.pop(0)

        for j in range(n_slices):
            x = random.randint(1, w_jitter)
            y = (j+1) * slice_height
            cmplx = cmplx + "[step%d][pip%d]overlay=%d:%d" % (j, j+1, x, y)
            if j < n_slices-1:
                cmplx = cmplx + '[step%d]; ' % (j+1)
            j = j+1

        out_file = util.automatic_output_file_name(out_file, self.filename, "shake")
        util.run_ffmpeg(' %s -filter_complex "%s" %s' % (filelist, cmplx, out_file))
        util.delete_files(*slices, first_slice, tmpbg)
        return out_file
Esempio n. 4
0
    def blindify(self, out_file = None, **kwargs):
        nbr_slices = int(kwargs.pop('blinds', 10))
        blinds_size_pct = int(kwargs.pop('blinds_ratio', 3))
        background_color = kwargs.pop('background_color', 'black')
        direction = kwargs.pop('direction', 'vertical')
        w, h = self.get_dimensions()

        w_gap = w * blinds_size_pct // 100
        h_gap = h * blinds_size_pct // 100

        if direction == 'horizontal':
            tmpbg = get_rectangle(background_color, w, (h//nbr_slices*nbr_slices) + h_gap*(nbr_slices-1))
        else:
            tmpbg = get_rectangle(background_color, (w//nbr_slices*nbr_slices) + w_gap*(nbr_slices-1), h)

        # ffmpeg -i file1.jpg -i file2.jpg -i bg.tmp.jpg \
        # -filter_complex "[0]scale=iw:-1:flags=lanczos[pip0]; \
        # [1]scale=iw:-1:flags=lanczos[pip1]; \
        # [8]scale=iw:-1:flags=lanczos[pip8]; \
        # [9][pip0]overlay=204:204[step0] ; \
        # [step0][pip1]overlay=2456:204[step1]; \
        # [step7][pip8]overlay=4708:3374" outfile.jpg

        slices = self.slice(nbr_slices, direction)
        filelist = util.build_ffmpeg_file_list(slices)
        filelist = filelist + ' -i "%s"' % tmpbg
        cmplx = util.build_ffmpeg_complex_prep(slices)

        i = 0
        cmplx = ''
        for slicefile in slices:
            cmplx = cmplx + "[%d]scale=iw:-1:flags=lanczos[pip%d]; " % (i, i)
            i = i + 1

        step = 0
        cmplx = cmplx + "[%d][pip0]overlay=0:0[step%d]; " % (i, step)
        first_slice = slices.pop(0)
        j = 0
        x = 0
        y = 0
        for slicefile in slices:
            if direction == 'horizontal':
                y = (j+1) * (h // nbr_slices + h_gap)
            else:
                x = (j+1) * (w // nbr_slices + w_gap)
            cmplx = cmplx + "[step%d][pip%d]overlay=%d:%d" % (j, j+1, x, y)
            if slicefile != slices[len(slices)-1]:
                cmplx = cmplx + '[step%d]; ' % (j+1)
            j = j+1

        out_file = util.automatic_output_file_name(out_file, self.filename, "blind")
        util.run_ffmpeg('%s -filter_complex "%s" %s' % (filelist, cmplx, out_file))
        util.delete_files(*slices, first_slice, tmpbg)
Esempio n. 5
0
def encode_file(args, options):
    '''Encodes a single file'''
    if util.is_audio_file(args.inputfile):
        file_object = audio.AudioFile(args.inputfile)
    elif util.is_image_file(args.inputfile):
        file_object = img.ImageFile(args.inputfile)
    else:
        file_object = video.VideoFile(args.inputfile)
    if args.width is not None:
        specs = file_object.get_properties()
        w = int(specs[opt.media.WIDTH])
        h = int(specs[opt.media.HEIGHT])
        new_w = int(args.width)
        new_h = (int(h * new_w / w) // 8) * 8
        options[opt.media.SIZE] = "%dx%d" % (new_w, new_h)
    if args.timeranges is None:
        file_object.encode(args.outputfile, args.profile, **options)
        return

    if args.outputfile is None:
        ext = util.get_profile_extension(args.profile)
    count = 0
    filelist = []
    timeranges = re.split(',', args.timeranges)
    for video_range in timeranges:
        options[opt.media.START], options[opt.media.STOP] = re.split(
            '-', video_range)
        count += 1
        target_file = util.automatic_output_file_name(args.outputfile,
                                                      args.inputfile,
                                                      str(count), ext)
        filelist.append(target_file)
        outputfile = file_object.encode(target_file, args.profile, **options)
        util.logger.info("File %s generated", outputfile)
    if len(timeranges) > 1:
        # If more than 1 file generated, concatenate all generated files
        target_file = util.automatic_output_file_name(args.outputfile,
                                                      args.inputfile,
                                                      "combined", ext)
        video.concat(target_file, filelist)
Esempio n. 6
0
 def resize(self, width = None, height = None, out_file = None):
     '''Resizes an image file
     If one of width or height is None, then it is calculated to
     preserve the image aspect ratio'''
     if width is None and height is None:
         util.logger.error("Resize requested with neither width not height")
         return None
     if isinstance(width, str):
         width = int(width)
     if isinstance(height, str):
         height = int(height)
     if width is None:
         w, h = self.get_dimensions()
         width = w * height // h
     elif height is None:
         w, h = self.get_dimensions()
         height = h * width // w
     util.logger.debug("Resizing %s to %d x %d into %s", self.filename, width, height, out_file)
     out_file = util.automatic_output_file_name(out_file, self.filename, "resized-%dx%d" % (width, height))
     util.run_ffmpeg('-i "%s" -vf scale=%d:%d "%s"' % (self.filename, width, height, out_file))
     return out_file
Esempio n. 7
0
def stack(file1, file2, direction, out_file = None):
    util.logger.debug("stack(%s, %s, %s, _)", file1, file2, direction)
    if not util.is_image_file(file1):
        raise media.FileTypeError('File %s is not an image file' % file1)
    if not util.is_image_file(file2):
        raise media.FileTypeError('File %s is not an image file' % file2)
    out_file = util.automatic_output_file_name(out_file, file1, "stacked")
    w1, h1 = ImageFile(file1).get_dimensions()
    w2, h2 = ImageFile(file2).get_dimensions()
    tmpfile1 = file1
    tmpfile2 = file2
    util.logger.debug("Images dimensions: %d x %d and %d x %d", w1, h1, w2, h2)
    if direction == 'horizontal':
        filter_name = 'hstack'
        if h1 > h2:
            new_w2 = w2 * h1 // h2
            tmpfile2 = rescale(file2, new_w2, h1)
        elif h2 > h1:
            new_w1 = w1 * h2 // h1
            tmpfile1 = rescale(file1, new_w1, h2)
    else:
        filter_name = 'vstack'
        if w1 > w2:
            new_h2 = h2 * w1 // w2
            tmpfile2 = rescale(file2, w1, new_h2)
        elif w2 > w1:
            new_h1 = h1 * w2 // w1
            tmpfile1 = rescale(file1, w2, new_h1)

    # ffmpeg -i a.jpg -i b.jpg -filter_complex hstack output

    util.run_ffmpeg('-i "%s" -i "%s" -filter_complex %s "%s"' % (tmpfile1, tmpfile2, filter_name, out_file))
    if tmpfile1 is not file1:
        util.delete_files(tmpfile1)
    if tmpfile2 is not file2:
        util.delete_files(tmpfile2)
    return out_file
Esempio n. 8
0
    def cut(self, start, stop, out_file=None, **kwargs):
        if out_file is None:
            out_file = util.automatic_output_file_name(
                out_file, self.filename, "cut_%s-to-%s" % (start, stop))
        util.logger.debug("Cutting %s from %s to %s into %s", self.filename,
                          start, stop, out_file)
        media_opts = self.get_properties()
        kwargs['start'] = start
        kwargs['stop'] = stop

        video_filters = []
        if 'fade' in kwargs and kwargs['fade'] is not None:
            fade_d = int(kwargs['fade'])
            fmt = "fade=type={0}:duration={1}:start_time={2}"
            fader = fmt.format('in', fade_d, start) + "," + fmt.format(
                'out', fade_d, stop - fade_d)
            video_filters.append(fader)

        util.run_ffmpeg(
            '-i "%s" %s %s "%s"' %
            (self.filename, opt.media2ffmpeg(media_opts),
             media.build_video_filters_options(video_filters), out_file))

        return out_file
Esempio n. 9
0
def rescale(image_file, width, height, out_file = None):
    util.logger.debug("Rescaling %s to %d x %d into %s", image_file, width, height, out_file)
    # ffmpeg -i input.jpg -vf scale=320:240 output_320x240.png
    out_file = util.automatic_output_file_name(out_file, image_file, "scale-%dx%d" % (width, height))
    util.run_ffmpeg('-i "%s" -vf scale=%d:%d "%s"' % (image_file, width, height, out_file))
    return out_file
Esempio n. 10
0
 def crop(self, w, h, x, y, out_file = None):
     util.logger.debug("%s(->%s, %d, %d, %d, %d)", 'crop', self.filename, w, h, x, y)
     out_file = util.automatic_output_file_name(out_file, self.filename, "crop.%dx%d" % (w, h))
     # ffmpeg -i input.png -vf  "crop=w:h:x:y" input_crop.png
     util.run_ffmpeg('-y -i "%s" -vf crop=%d:%d:%d:%d "%s"' % (self.filename, w, h, x, y, out_file))