コード例 #1
0
ファイル: processors.py プロジェクト: auvipy/flowgram.com
def process_filter(video_renderer, tag, context):
    #return context
    #print 'process_filter'
    (tag, attribs) = check_tag(context,
                               tag,
                               'filter',
                               None,
                               {'alpha': 'float=1.0',
                                'blur': 'int=0'})
    cpush(context)

    filter_image = Image.new('RGBA', (cget(context, 'layer_width'), cget(context, 'layer_height')))
    cset(context, 'layer_image', filter_image)

    context = process_child_tags(video_renderer, tag, context)

    if attribs['alpha'] < 1.0:
        alpha_image = Image.new('RGBA', (cget(context, 'layer_width'), cget(context, 'layer_height')))
        alpha_image = Image.blend(alpha_image, filter_image, attribs['alpha'])
        filter_image = alpha_image

    if attribs['blur'] > 0:
        for blur_index in range(attribs['blur']):
            filter_image = filter_image.filter(ImageFilter.BLUR)

    cpop(context)

    cpush(context)

    cset(context, 'transformation', [])
    draw_image(context, filter_image)
    cpop(context)
    
    return context
コード例 #2
0
ファイル: processors.py プロジェクト: auvipy/flowgram.com
def process_load_image(video_renderer, tag, context):
    #print 'process_load_image'
    (tag, attribs) = check_tag(context,
                               tag,
                               'load-image',
                               {'id': 'string',
                                'src': 'string'})

    image = Image.open(attribs['src'])
    #print 'image: %s' % str(image)
    cset(context, attribs['id'], image)

    return context
コード例 #3
0
ファイル: processors.py プロジェクト: auvipy/flowgram.com
def process_build_layer(video_renderer, tag, context):
    #print 'process_build_layer'
    (tag, attribs) = check_tag(context,
                               tag,
                               'build-layer',
                               {'id': 'string'},
                               {'width': 'int',
                                'height': 'int'})
    width = attribs.get('width', cget(context, 'layer_width'))
    height = attribs.get('height', cget(context, 'layer_height'))

    cpush(context)

    layer_image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
    cset(context, 'layer_image', layer_image)
    cset(context, 'layer_width', width)
    cset(context, 'layer_height', height)

    context = process_child_tags(video_renderer, tag, context)

    cpop(context)

    cset(context, attribs['id'], layer_image)

    return context
コード例 #4
0
ファイル: processors.py プロジェクト: auvipy/flowgram.com
def process_frame(video_renderer, tag, context):
    #print 'process_frame'
    (tag, attribs) = check_tag(context,
                               tag,
                               'frame',
                               {'duration': 'duration'},
                               {'animate': 'string',
                                'audio-only': 'boolean=false'})
    if attribs.has_key('animate'):
        complete_frame_image = None

        for frame_index in range(attribs['duration']):
            # This frame is declared outside the cpush/cpop block because the next frame may wish to
            # access this data.
            frame_image = Image.new('RGBA',
                                    (cget(context, 'layer_width'), cget(context, 'layer_height')))
            complete_frame_image = frame_image
            cset(context, 'layer_image', frame_image)

            cpush(context)

            cset(context, 'animated_frame_index', frame_index)
            setup_animation_values(context, attribs['animate'])

            frame_number = cget(context, 'frame_number')

            context = process_child_tags(video_renderer, tag, context)

            # frame_image.convert("RGB").save('%sframe_%05d.png' % (video_renderer.storage_dir, frame_number[0]))
            # gobaudd
            frame_image.save('%sframe_%05d.png' % (video_renderer.storage_dir, frame_number[0]))
            
            frame_number[0] += 1

            cpop(context)

        if complete_frame_image and not attribs['audio-only']:
            cset(context, 'last_frame_image', complete_frame_image)
    else:
        # Optimized rendering for when animations are not set.
    
        # This frame is declared outside the cpush/cpop block because the next frame may wish to
        # access this data.
        frame_image = Image.new('RGBA',
                                (cget(context, 'layer_width'), cget(context, 'layer_height')))
        cset(context, 'layer_image', frame_image)

        cpush(context)

        frame_number = cget(context, 'frame_number')

        context = process_child_tags(video_renderer, tag, context)

        if not attribs['audio-only']:
            first_frame_filename = '%sframe_%05d.png' % (video_renderer.storage_dir, frame_number[0])
            
            # frame_image.convert("RGB").save(first_frame_filename)
            # gobaudd        
            frame_image.save(first_frame_filename)
            
            frame_number[0] += 1
            for frame_index in range(1, attribs['duration']):
                duplicate_frame_filename = '%sframe_%05d.png' % (video_renderer.storage_dir,
                                                                 frame_number[0])
                shutil.copyfile(first_frame_filename, duplicate_frame_filename)
                frame_number[0] += 1

        cpop(context)

        if not attribs['audio-only']:
            cset(context, 'last_frame_image', frame_image)

    return context