コード例 #1
0
    def common_setup(self):
        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self._set_ext()
        self._set_model()
        self._set_greatest()
コード例 #2
0
    def common_setup(self):
        """
        Set up common pdf processing steps
        """
        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self._set_pdf_filename()
コード例 #3
0
ファイル: processing.py プロジェクト: sherlockliu/mediagoblin
    def common_setup(self):
        self.video_config = mgg \
            .global_config['plugins'][MEDIA_TYPE]

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = transcoders.VideoTranscoder()
        self.did_transcode = False
コード例 #4
0
    def common_setup(self):
        self.video_config = mgg \
            .global_config['media_type:mediagoblin.media_types.video']

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(self.entry,
                                                     self.workbench,
                                                     self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = transcoders.VideoTranscoder()
        self.did_transcode = False
コード例 #5
0
ファイル: processing.py プロジェクト: gitGNU/gnu_mediagoblin
    def common_setup(self):
        """
        Setup the workbench directory and pull down the original file, add
        the audio_config, transcoder, thumbnailer and spectrogram_tmp path
        """
        self.audio_config = mgg \
            .global_config['plugins']['mediagoblin.media_types.audio']

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = AudioTranscoder()
        self.thumbnailer = AudioThumbnailer()
コード例 #6
0
    def common_setup(self):
        self.ascii_config = mgg.global_config['plugins'][
            'mediagoblin.media_types.ascii']

         # Conversions subdirectory to avoid collisions
        self.conversions_subdir = os.path.join(
            self.workbench.dir, 'conversions')
        os.mkdir(self.conversions_subdir)

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.charset = None
コード例 #7
0
def process_pdf(proc_state):
    """Code to process a pdf file. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench

    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    # Copy our queued local workbench to its final destination
    original_dest = name_builder.fill('{basename}{ext}')
    proc_state.copy_original(original_dest)

    # Create a pdf if this is a different doc, store pdf for viewer
    ext = queued_filename.rsplit('.', 1)[-1].lower()
    if ext == 'pdf':
        pdf_filename = queued_filename
    else:
        pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
        unoconv = where('unoconv')
        Popen(executable=unoconv,
              args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait()
        if not os.path.exists(pdf_filename):
            _log.debug('unoconv failed to convert file to pdf')
            raise BadMediaFail()
        proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)

    pdf_info_dict = pdf_info(pdf_filename)

    for name, width, height in [
        (u'thumb', mgg.global_config['media:thumb']['max_width'],
         mgg.global_config['media:thumb']['max_height']),
        (u'medium', mgg.global_config['media:medium']['max_width'],
         mgg.global_config['media:medium']['max_height']),
    ]:
        filename = name_builder.fill('{basename}.%s.png' % name)
        path = workbench.joinpath(filename)
        create_pdf_thumb(pdf_filename, path, width, height)
        assert (os.path.exists(path))
        proc_state.store_public(keyname=name, local_file=path)

    proc_state.delete_queue_file()

    entry.media_data_init(**pdf_info_dict)
    entry.save()
コード例 #8
0
def process_image(proc_state):
    """Code to process an image. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench

    # Conversions subdirectory to avoid collisions
    conversions_subdir = os.path.join(workbench.dir, 'conversions')
    os.mkdir(conversions_subdir)

    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    # EXIF extraction
    exif_tags = extract_exif(queued_filename)
    gps_data = get_gps_data(exif_tags)

    # Always create a small thumbnail
    resize_tool(proc_state, True, 'thumb',
                name_builder.fill('{basename}.thumbnail{ext}'),
                conversions_subdir, exif_tags)

    # Possibly create a medium
    resize_tool(proc_state, False, 'medium',
                name_builder.fill('{basename}.medium{ext}'),
                conversions_subdir, exif_tags)

    # Copy our queued local workbench to its final destination
    proc_state.copy_original(name_builder.fill('{basename}{ext}'))

    # Remove queued media file from storage and database
    proc_state.delete_queue_file()

    # Insert exif data into database
    exif_all = clean_exif(exif_tags)

    if len(exif_all):
        entry.media_data_init(exif_all=exif_all)

    if len(gps_data):
        for key in list(gps_data.keys()):
            gps_data['gps_' + key] = gps_data.pop(key)
        entry.media_data_init(**gps_data)
コード例 #9
0
    def common_setup(self, resolution=None):
        self.video_config = mgg \
            .global_config['plugins'][MEDIA_TYPE]

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(
            self.entry, self.workbench, self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        self.transcoder = transcoders.VideoTranscoder()
        self.did_transcode = False

        if resolution:
            self.curr_file = 'webm_' + str(resolution)
            self.part_filename = (self.name_builder.fill('{basename}.' +
                                  str(resolution) + '.webm'))
        else:
            self.curr_file = 'webm_video'
            self.part_filename = self.name_builder.fill('{basename}.medium.webm')
コード例 #10
0
    def common_setup(self):
        """
        Set up the workbench directory and pull down the original file
        """
        self.image_config = mgg.global_config[
            'media_type:mediagoblin.media_types.image']

        ## @@: Should this be two functions?
        # Conversions subdirectory to avoid collisions
        self.conversions_subdir = os.path.join(self.workbench.dir,
                                               'convirsions')
        os.mkdir(self.conversions_subdir)

        # Pull down and set up the processing file
        self.process_filename = get_process_filename(self.entry,
                                                     self.workbench,
                                                     self.acceptable_files)
        self.name_builder = FilenameBuilder(self.process_filename)

        # Exif extraction
        self.exif_tags = extract_exif(self.process_filename)
コード例 #11
0
    def common_setup(self):
        """
        Pull out a full-size JPEG-preview
        """
        super().common_setup()

        self._original_raw = self.process_filename

        # Read EXIF data
        md = pyexiv2.ImageMetadata(self._original_raw)
        md.read()
        self.process_filename = os.path.join(self.conversions_subdir,
                                             self.entry.queued_media_file[-1])

        # Extract the biggest preview and write it as our working image
        md.previews[-1].write_to_file(self.process_filename.encode('utf-8'))
        self.process_filename += '.jpg'
        _log.debug('Wrote new file from {} to preview (jpg) {}'.format(
            self._original_raw, self.process_filename))

        # Override the namebuilder with our new jpg-based name
        self.name_builder = FilenameBuilder(self.process_filename)
コード例 #12
0
def process_stl(proc_state):
    """Code to process an stl or obj model. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench

    queued_filepath = entry.queued_media_file
    queued_filename = workbench.localized_file(
        mgg.queue_store, queued_filepath, 'source')
    name_builder = FilenameBuilder(queued_filename)

    ext = queued_filename.lower().strip()[-4:]
    if ext.startswith("."):
        ext = ext[1:]
    else:
        ext = None

    # Attempt to parse the model file and divine some useful
    # information about it.
    with open(queued_filename, 'rb') as model_file:
        model = model_loader.auto_detect(model_file, ext)

    # generate preview images
    greatest = [model.width, model.height, model.depth]
    greatest.sort()
    greatest = greatest[-1]

    def snap(name, camera, width=640, height=640, project="ORTHO"):
        filename = name_builder.fill(name)
        workbench_path = workbench.joinpath(filename)
        shot = {
            "model_path": queued_filename,
            "model_ext": ext,
            "camera_coord": camera,
            "camera_focus": model.average,
            "camera_clip": greatest*10,
            "greatest": greatest,
            "projection": project,
            "width": width,
            "height": height,
            "out_file": workbench_path,
            }
        blender_render(shot)

        # make sure the image rendered to the workbench path
        assert os.path.exists(workbench_path)

        # copy it up!
        with open(workbench_path, 'rb') as rendered_file:
            public_path = create_pub_filepath(entry, filename)

            with mgg.public_store.get_file(public_path, "wb") as public_file:
                public_file.write(rendered_file.read())

        return public_path

    thumb_path = snap(
        "{basename}.thumb.jpg",
        [0, greatest*-1.5, greatest],
        mgg.global_config['media:thumb']['max_width'],
        mgg.global_config['media:thumb']['max_height'],
        project="PERSP")

    perspective_path = snap(
        "{basename}.perspective.jpg",
        [0, greatest*-1.5, greatest], project="PERSP")

    topview_path = snap(
        "{basename}.top.jpg",
        [model.average[0], model.average[1], greatest*2])

    frontview_path = snap(
        "{basename}.front.jpg",
        [model.average[0], greatest*-2, model.average[2]])

    sideview_path = snap(
        "{basename}.side.jpg",
        [greatest*-2, model.average[1], model.average[2]])

    ## Save the public file stuffs
    model_filepath = create_pub_filepath(
        entry, name_builder.fill('{basename}{ext}'))

    with mgg.public_store.get_file(model_filepath, 'wb') as model_file:
        with open(queued_filename, 'rb') as queued_file:
            model_file.write(queued_file.read())

    # Remove queued media file from storage and database.
    # queued_filepath is in the task_id directory which should
    # be removed too, but fail if the directory is not empty to be on
    # the super-safe side.
    mgg.queue_store.delete_file(queued_filepath)      # rm file
    mgg.queue_store.delete_dir(queued_filepath[:-1])  # rm dir
    entry.queued_media_file = []

    # Insert media file information into database
    media_files_dict = entry.setdefault('media_files', {})
    media_files_dict[u'original'] = model_filepath
    media_files_dict[u'thumb'] = thumb_path
    media_files_dict[u'perspective'] = perspective_path
    media_files_dict[u'top'] = topview_path
    media_files_dict[u'side'] = sideview_path
    media_files_dict[u'front'] = frontview_path

    # Put model dimensions into the database
    dimensions = {
        "center_x" : model.average[0],
        "center_y" : model.average[1],
        "center_z" : model.average[2],
        "width" : model.width,
        "height" : model.height,
        "depth" : model.depth,
        "file_type" : ext,
        }
    entry.media_data_init(**dimensions)
コード例 #13
0
def process_video(proc_state):
    """
    Process a video entry, transcode the queued media files (originals) and
    create a thumbnail for the entry.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench
    video_config = mgg.global_config['media_type:mediagoblin.media_types.video']

    queued_filepath = entry.queued_media_file
    queued_filename = proc_state.get_queued_filename()
    name_builder = FilenameBuilder(queued_filename)

    medium_basename = name_builder.fill('{basename}-640p.webm')
    medium_filepath = create_pub_filepath(entry, medium_basename)

    thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
    thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)

    # Create a temporary file for the video destination (cleaned up with workbench)
    tmp_dst = os.path.join(workbench.dir, medium_basename)
    # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
    progress_callback = ProgressCallback(entry)

    dimensions = (
        mgg.global_config['media:medium']['max_width'],
        mgg.global_config['media:medium']['max_height'])

    # Extract metadata and keep a record of it
    metadata = transcoders.VideoTranscoder().discover(queued_filename)
    store_metadata(entry, metadata)

    # Figure out whether or not we need to transcode this video or
    # if we can skip it
    if skip_transcode(metadata):
        _log.debug('Skipping transcoding')

        dst_dimensions = metadata['videowidth'], metadata['videoheight']

            # Push original file to public storage
        _log.debug('Saving original...')
        proc_state.copy_original(queued_filepath[-1])

        did_transcode = False
    else:
        transcoder = transcoders.VideoTranscoder()

        transcoder.transcode(queued_filename, tmp_dst,
                vp8_quality=video_config['vp8_quality'],
                vp8_threads=video_config['vp8_threads'],
                vorbis_quality=video_config['vorbis_quality'],
                progress_callback=progress_callback,
                dimensions=dimensions)

        dst_dimensions = transcoder.dst_data.videowidth,\
            transcoder.dst_data.videoheight

        # Push transcoded video to public storage
        _log.debug('Saving medium...')
        mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
        _log.debug('Saved medium')

        entry.media_files['webm_640'] = medium_filepath

        did_transcode = True

    # Save the width and height of the transcoded video
    entry.media_data_init(
        width=dst_dimensions[0],
        height=dst_dimensions[1])

    # Temporary file for the video thumbnail (cleaned up with workbench)
    tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)

    # Create a thumbnail.jpg that fits in a 180x180 square
    transcoders.VideoThumbnailerMarkII(
        queued_filename,
        tmp_thumb,
        180)

    # Push the thumbnail to public storage
    _log.debug('Saving thumbnail...')
    mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
    entry.media_files['thumb'] = thumbnail_filepath

    # save the original... but only if we did a transcoding
    # (if we skipped transcoding and just kept the original anyway as the main
    #  media, then why would we save the original twice?)
    if video_config['keep_original'] and did_transcode:
        # Push original file to public storage
        _log.debug('Saving original...')
        proc_state.copy_original(queued_filepath[-1])

    # Remove queued media file from storage and database
    proc_state.delete_queue_file()
コード例 #14
0
def process_audio(proc_state):
    """Code to process uploaded audio. Will be run by celery.

    A Workbench() represents a local tempory dir. It is automatically
    cleaned up when this function exits.
    """
    entry = proc_state.entry
    workbench = proc_state.workbench
    audio_config = mgg.global_config[
        'media_type:mediagoblin.media_types.audio']

    queued_filepath = entry.queued_media_file
    queued_filename = workbench.localized_file(mgg.queue_store,
                                               queued_filepath, 'source')
    name_builder = FilenameBuilder(queued_filename)

    webm_audio_filepath = create_pub_filepath(
        entry, '{original}.webm'.format(
            original=os.path.splitext(queued_filepath[-1])[0]))

    if audio_config['keep_original']:
        with open(queued_filename, 'rb') as queued_file:
            original_filepath = create_pub_filepath(
                entry, name_builder.fill('{basename}{ext}'))

            with mgg.public_store.get_file(original_filepath, 'wb') as \
                    original_file:
                _log.debug('Saving original...')
                original_file.write(queued_file.read())

            entry.media_files['original'] = original_filepath

    transcoder = AudioTranscoder()

    with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
        progress_callback = ProgressCallback(entry)

        transcoder.transcode(queued_filename,
                             webm_audio_tmp.name,
                             quality=audio_config['quality'],
                             progress_callback=progress_callback)

        transcoder.discover(webm_audio_tmp.name)

        _log.debug('Saving medium...')
        mgg.public_store.get_file(webm_audio_filepath,
                                  'wb').write(webm_audio_tmp.read())

        entry.media_files['webm_audio'] = webm_audio_filepath

        # entry.media_data_init(length=int(data.audiolength))

    if audio_config['create_spectrogram']:
        spectrogram_filepath = create_pub_filepath(
            entry, '{original}-spectrogram.jpg'.format(
                original=os.path.splitext(queued_filepath[-1])[0]))

        with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
            _log.info('Creating OGG source for spectrogram')
            transcoder.transcode(
                queued_filename,
                wav_tmp.name,
                mux_string='vorbisenc quality={0} ! oggmux'.format(
                    audio_config['quality']))

            thumbnailer = AudioThumbnailer()

            with NamedTemporaryFile(dir=workbench.dir,
                                    suffix='.jpg') as spectrogram_tmp:
                thumbnailer.spectrogram(
                    wav_tmp.name,
                    spectrogram_tmp.name,
                    width=mgg.global_config['media:medium']['max_width'],
                    fft_size=audio_config['spectrogram_fft_size'])

                _log.debug('Saving spectrogram...')
                mgg.public_store.get_file(spectrogram_filepath,
                                          'wb').write(spectrogram_tmp.read())

                entry.media_files['spectrogram'] = spectrogram_filepath

                with NamedTemporaryFile(dir=workbench.dir,
                                        suffix='.jpg') as thumb_tmp:
                    thumbnailer.thumbnail_spectrogram(
                        spectrogram_tmp.name, thumb_tmp.name,
                        (mgg.global_config['media:thumb']['max_width'],
                         mgg.global_config['media:thumb']['max_height']))

                    thumb_filepath = create_pub_filepath(
                        entry, '{original}-thumbnail.jpg'.format(
                            original=os.path.splitext(queued_filepath[-1])[0]))

                    mgg.public_store.get_file(thumb_filepath,
                                              'wb').write(thumb_tmp.read())

                    entry.media_files['thumb'] = thumb_filepath
    else:
        entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']

    # Remove queued media file from storage and database.
    # queued_filepath is in the task_id directory which should
    # be removed too, but fail if the directory is not empty to be on
    # the super-safe side.
    mgg.queue_store.delete_file(queued_filepath)  # rm file
    mgg.queue_store.delete_dir(queued_filepath[:-1])  # rm dir
    entry.queued_media_file = []