def import_file(self, media):
        try:
            media_type, media_manager = sniff_media(media)
        except (InvalidFileType, FileTypeNotSupported) as e:
            print u"File error {0}: {1}".format(media.filename, repr(e)).encode("utf-8")
            return
        entry = self.db.MediaEntry()
        entry.media_type = unicode(media_type)
        entry.title = unicode(os.path.splitext(media.filename)[0])

        entry.uploader = 1
        # Process the user's folksonomy "tags"
        entry.tags = convert_to_tag_list_of_dicts("")
        # Generate a slug from the title
        entry.generate_slug()

        task_id = unicode(uuid.uuid4())

        entry.queued_media_file = media.filename.split("/")
        entry.queued_task_id = task_id

        entry.save()

        process_media = registry.tasks[ProcessMedia.name]
        try:
            process_media.apply_async( [unicode(entry.id)], {}, task_id=task_id)
        except BaseException as exc:
            mark_entry_failed(entry.id, exc)
            raise
def test_garbage_collection_task(test_app):
    """ Test old media entry are removed by GC task """
    user = fixture_add_user()

    # Create a media entry that's unprocessed and over an hour old.
    entry_id = 72
    now = datetime.datetime.now(pytz.UTC)
    file_data = FileStorage(
        stream=open(GOOD_JPG, "rb"),
        filename="mah_test.jpg",
        content_type="image/jpeg"
    )

    # Find media manager
    media_type, media_manager = sniff_media(file_data, "mah_test.jpg")
    entry = new_upload_entry(user)
    entry.id = entry_id
    entry.title = "Mah Image"
    entry.slug = "slugy-slug-slug"
    entry.media_type = 'image'
    entry.created = now - datetime.timedelta(days=2)
    entry.save()

    # Validate the model exists
    assert MediaEntry.query.filter_by(id=entry_id).first() is not None

    # Call the garbage collection task
    collect_garbage()

    # Now validate the image has been deleted
    assert MediaEntry.query.filter_by(id=entry_id).first() is None
Exemple #3
0
def test_garbage_collection_task(test_app):
    """ Test old media entry are removed by GC task """
    user = fixture_add_user()

    # Create a media entry that's unprocessed and over an hour old.
    entry_id = 72
    now = datetime.datetime.now(pytz.UTC)
    file_data = FileStorage(stream=open(GOOD_JPG, "rb"),
                            filename="mah_test.jpg",
                            content_type="image/jpeg")

    # Find media manager
    media_type, media_manager = sniff_media(file_data, "mah_test.jpg")
    entry = new_upload_entry(user)
    entry.id = entry_id
    entry.title = "Mah Image"
    entry.slug = "slugy-slug-slug"
    entry.media_type = 'image'
    entry.created = now - datetime.timedelta(days=2)
    entry.save()

    # Validate the model exists
    assert MediaEntry.query.filter_by(id=entry_id).first() is not None

    # Call the garbage collection task
    collect_garbage()

    # Now validate the image has been deleted
    assert MediaEntry.query.filter_by(id=entry_id).first() is None
Exemple #4
0
def pwg_images_addSimple(request):
    form = AddSimpleForm(request.form)
    if not form.validate():
        _log.error("addSimple: form failed")
        raise BadRequest()
    dump = []
    for f in form:
        dump.append("%s=%r" % (f.name, f.data))
    _log.info("addSimple: %r %s %r", request.form, " ".join(dump), request.files)

    if not check_file_field(request, "image"):
        raise BadRequest()

    filename = request.files["image"].filename

    # Sniff the submitted media to determine which
    # media plugin should handle processing
    media_type, media_manager = sniff_media(request.files["image"])

    # create entry and save in database
    entry = new_upload_entry(request.user)
    entry.media_type = unicode(media_type)
    entry.title = unicode(form.name.data) or unicode(splitext(filename)[0])

    entry.description = unicode(form.comment.data)

    """
    # Process the user's folksonomy "tags"
    entry.tags = convert_to_tag_list_of_dicts(
        form.tags.data)
    """

    # Generate a slug from the title
    entry.generate_slug()

    queue_file = prepare_queue_task(request.app, entry, filename)

    with queue_file:
        shutil.copyfileobj(request.files["image"].stream, queue_file, length=4 * 1048576)

    # Save now so we have this data before kicking off processing
    entry.save()

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    feed_url = request.urlgen("mediagoblin.user_pages.atom_feed", qualified=True, user=request.user.username)
    run_process_media(entry, feed_url)

    collection_id = form.category.data
    if collection_id > 0:
        collection = Collection.query.get(collection_id)
        if collection is not None and collection.creator == request.user.id:
            add_media_to_collection(collection, entry, "")

    return {"image_id": entry.id, "url": entry.url_for_self(request.urlgen, qualified=True)}
def post_entry(request):
    _log.debug('Posting entry')

    if request.method == 'OPTIONS':
        return json_response({'status': 200})

    if request.method != 'POST':
        _log.debug('Must POST against post_entry')
        raise BadRequest()

    if not 'file' in request.files \
            or not isinstance(request.files['file'], FileStorage) \
            or not request.files['file'].stream:
        _log.debug('File field not found')
        raise BadRequest()

    media_file = request.files['file']

    media_type, media_manager = sniff_media(media_file)

    entry = request.db.MediaEntry()
    entry.media_type = unicode(media_type)
    entry.title = unicode(request.form.get('title')
            or splitext(media_file.filename)[0])

    entry.description = unicode(request.form.get('description'))
    entry.license = unicode(request.form.get('license', ''))

    entry.uploader = request.user.id

    entry.generate_slug()

    # queue appropriately
    queue_file = prepare_queue_task(request.app, entry, media_file.filename)

    with queue_file:
        queue_file.write(request.files['file'].stream.read())

    # Save now so we have this data before kicking off processing
    entry.save()

    if request.form.get('callback_url'):
        metadata = request.db.ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = unicode(request.form['callback_url'])
        metadata.save()

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    run_process_media(entry)

    return json_response(get_entry_serializable(entry, request.urlgen))
def post_entry(request):
    _log.debug('Posting entry')

    if request.method == 'OPTIONS':
        return json_response({'status': 200})

    if request.method != 'POST':
        _log.debug('Must POST against post_entry')
        raise BadRequest()

    if not check_file_field(request, 'file'):
        _log.debug('File field not found')
        raise BadRequest()

    media_file = request.files['file']

    media_type, media_manager = sniff_media(media_file)

    entry = new_upload_entry(request.user)
    entry.media_type = unicode(media_type)
    entry.title = unicode(
        request.form.get('title') or splitext(media_file.filename)[0])

    entry.description = unicode(request.form.get('description'))
    entry.license = unicode(request.form.get('license', ''))

    entry.generate_slug()

    # queue appropriately
    queue_file = prepare_queue_task(request.app, entry, media_file.filename)

    with queue_file:
        queue_file.write(request.files['file'].stream.read())

    # Save now so we have this data before kicking off processing
    entry.save()

    if request.form.get('callback_url'):
        metadata = request.db.ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = unicode(request.form['callback_url'])
        metadata.save()

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    feed_url = request.urlgen('mediagoblin.user_pages.atom_feed',
                              qualified=True,
                              user=request.user.username)
    run_process_media(entry, feed_url)

    return json_response(get_entry_serializable(entry, request.urlgen))
Exemple #7
0
def post_entry(request):
    _log.debug("Posting entry")

    if request.method == "OPTIONS":
        return json_response({"status": 200})

    if request.method != "POST":
        _log.debug("Must POST against post_entry")
        raise BadRequest()

    if not check_file_field(request, "file"):
        _log.debug("File field not found")
        raise BadRequest()

    media_file = request.files["file"]

    media_type, media_manager = sniff_media(media_file)

    entry = request.db.MediaEntry()
    entry.media_type = unicode(media_type)
    entry.title = unicode(request.form.get("title") or splitext(media_file.filename)[0])

    entry.description = unicode(request.form.get("description"))
    entry.license = unicode(request.form.get("license", ""))

    entry.uploader = request.user.id

    entry.generate_slug()

    # queue appropriately
    queue_file = prepare_queue_task(request.app, entry, media_file.filename)

    with queue_file:
        queue_file.write(request.files["file"].stream.read())

    # Save now so we have this data before kicking off processing
    entry.save()

    if request.form.get("callback_url"):
        metadata = request.db.ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = unicode(request.form["callback_url"])
        metadata.save()

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    feed_url = request.urlgen("mediagoblin.user_pages.atom_feed", qualified=True, user=request.user.username)
    run_process_media(entry, feed_url)

    return json_response(get_entry_serializable(entry, request.urlgen))
Exemple #8
0
def submit_start(request):
    """
    First view for submitting a file.
    """
    user = request.user
    if user.upload_limit >= 0:
        upload_limit = user.upload_limit
    else:
        upload_limit = mg_globals.app_config.get('upload_limit', None)

    if upload_limit and user.uploaded >= upload_limit:
        messages.add_message(
            request,
            messages.WARNING,
            _('Sorry, you have reached your upload limit.'))
        return redirect(request, "mediagoblin.user_pages.user_home",
                        user=request.user.username)

    max_file_size = mg_globals.app_config.get('max_file_size', None)

    submit_form = submit_forms.get_submit_start_form(
        request.form,
        license=request.user.license_preference,
        max_file_size=max_file_size,
        upload_limit=upload_limit,
        uploaded=user.uploaded)

    if request.method == 'POST' and submit_form.validate():
        if not check_file_field(request, 'file'):
            submit_form.file.errors.append(
                _(u'You must provide a file.'))
        else:
            try:
                filename = request.files['file'].filename

                # If the filename contains non ascii generate a unique name
                if not all(ord(c) < 128 for c in filename):
                    filename = unicode(uuid.uuid4()) + splitext(filename)[-1]

                # Sniff the submitted media to determine which
                # media plugin should handle processing
                media_type, media_manager = sniff_media(
                    request.files['file'])

                # create entry and save in database
                entry = new_upload_entry(request.user)
                entry.media_type = unicode(media_type)
                entry.title = (
                    unicode(submit_form.title.data)
                    or unicode(splitext(request.files['file'].filename)[0]))

                entry.description = unicode(submit_form.description.data)

                entry.license = unicode(submit_form.license.data) or None

                # Process the user's folksonomy "tags"
                entry.tags = convert_to_tag_list_of_dicts(
                    submit_form.tags.data)

                # Generate a slug from the title
                entry.generate_slug()

                queue_file = prepare_queue_task(request.app, entry, filename)

                with queue_file:
                    queue_file.write(request.files['file'].stream.read())

                # Get file size and round to 2 decimal places
                file_size = request.app.queue_store.get_file_size(
                    entry.queued_media_file) / (1024.0 * 1024)
                file_size = float('{0:.2f}'.format(file_size))

                error = False

                # Check if file size is over the limit
                if max_file_size and file_size >= max_file_size:
                    submit_form.file.errors.append(
                        _(u'Sorry, the file size is too big.'))
                    error = True

                # Check if user is over upload limit
                if upload_limit and (user.uploaded + file_size) >= upload_limit:
                    submit_form.file.errors.append(
                        _('Sorry, uploading this file will put you over your'
                          ' upload limit.'))
                    error = True

                if not error:
                    user.uploaded = user.uploaded + file_size
                    user.save()

                    entry.file_size = file_size

                    # Save now so we have this data before kicking off processing
                    entry.save()

                    # Pass off to processing
                    #
                    # (... don't change entry after this point to avoid race
                    # conditions with changes to the document via processing code)
                    feed_url = request.urlgen(
                        'mediagoblin.user_pages.atom_feed',
                        qualified=True, user=request.user.username)
                    run_process_media(entry, feed_url)
                    add_message(request, SUCCESS, _('Woohoo! Submitted!'))

                    add_comment_subscription(request.user, entry)

                    return redirect(request, "mediagoblin.user_pages.user_home",
                                user=user.username)
            except Exception as e:
                '''
                This section is intended to catch exceptions raised in
                mediagoblin.media_types
                '''
                if isinstance(e, InvalidFileType) or \
                        isinstance(e, FileTypeNotSupported):
                    submit_form.file.errors.append(
                        e)
                else:
                    raise

    return render_to_response(
        request,
        'mediagoblin/submit/start.html',
        {'submit_form': submit_form,
         'app_config': mg_globals.app_config})
Exemple #9
0
def submit_media(mg_app, user, submitted_file, filename,
                 title=None, description=None,
                 license=None, tags_string=u"",
                 upload_limit=None, max_file_size=None,
                 callback_url=None,
                 # If provided we'll do the feed_url update, otherwise ignore
                 urlgen=None,):
    """
    Args:
     - mg_app: The MediaGoblinApp instantiated for this process
     - user: the user object this media entry should be associated with
     - submitted_file: the file-like object that has the
       being-submitted file data in it (this object should really have
       a .name attribute which is the filename on disk!)
     - filename: the *original* filename of this.  Not necessarily the
       one on disk being referenced by submitted_file.
     - title: title for this media entry
     - description: description for this media entry
     - license: license for this media entry
     - tags_string: comma separated string of tags to be associated
       with this entry
     - upload_limit: size in megabytes that's the per-user upload limit
     - max_file_size: maximum size each file can be that's uploaded
     - callback_url: possible post-hook to call after submission
     - urlgen: if provided, used to do the feed_url update
    """
    if upload_limit and user.uploaded >= upload_limit:
        raise UserPastUploadLimit()

    # If the filename contains non ascii generate a unique name
    if not all(ord(c) < 128 for c in filename):
        filename = unicode(uuid.uuid4()) + splitext(filename)[-1]

    # Sniff the submitted media to determine which
    # media plugin should handle processing
    media_type, media_manager = sniff_media(submitted_file, filename)

    # create entry and save in database
    entry = new_upload_entry(user)
    entry.media_type = media_type
    entry.title = (title or unicode(splitext(filename)[0]))

    entry.description = description or u""

    entry.license = license or None

    # Process the user's folksonomy "tags"
    entry.tags = convert_to_tag_list_of_dicts(tags_string)

    # Generate a slug from the title
    entry.generate_slug()

    queue_file = prepare_queue_task(mg_app, entry, filename)

    with queue_file:
        queue_file.write(submitted_file.read())

    # Get file size and round to 2 decimal places
    file_size = mg_app.queue_store.get_file_size(
        entry.queued_media_file) / (1024.0 * 1024)
    file_size = float('{0:.2f}'.format(file_size))

    # Check if file size is over the limit
    if max_file_size and file_size >= max_file_size:
        raise FileUploadLimit()

    # Check if user is over upload limit
    if upload_limit and (user.uploaded + file_size) >= upload_limit:
        raise UserUploadLimit()

    user.uploaded = user.uploaded + file_size
    user.save()

    entry.file_size = file_size

    # Save now so we have this data before kicking off processing
    entry.save()

    # Various "submit to stuff" things, callbackurl and this silly urlgen
    # thing
    if callback_url:
        metadata = ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = callback_url
        metadata.save()

    if urlgen:
        feed_url = urlgen(
            'mediagoblin.user_pages.atom_feed',
            qualified=True, user=user.username)
    else:
        feed_url = None

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    run_process_media(entry, feed_url)

    add_comment_subscription(user, entry)

    return entry
Exemple #10
0
def post_entry(request):
    _log.debug('Posting entry')

    if request.method == 'OPTIONS':
        return json_response({'status': 200})

    if request.method != 'POST':
        _log.debug('Must POST against post_entry')
        return exc.HTTPBadRequest()

    if not 'file' in request.files \
            or not isinstance(request.files['file'], FileStorage) \
            or not request.files['file'].stream:
        _log.debug('File field not found')
        return exc.HTTPBadRequest()

    media_file = request.files['file']

    media_type, media_manager = sniff_media(media_file)

    entry = request.db.MediaEntry()
    entry.id = ObjectId()
    entry.media_type = unicode(media_type)
    entry.title = unicode(request.form.get('title')
            or splitext(media_file.filename)[0])

    entry.description = unicode(request.form.get('description'))
    entry.license = unicode(request.form.get('license', ''))

    entry.uploader = request.user.id

    entry.generate_slug()

    task_id = unicode(uuid.uuid4())

    # Now store generate the queueing related filename
    queue_filepath = request.app.queue_store.get_unique_filepath(
        ['media_entries',
            task_id,
            secure_filename(media_file.filename)])

    # queue appropriately
    queue_file = request.app.queue_store.get_file(
        queue_filepath, 'wb')

    with queue_file:
        queue_file.write(request.files['file'].stream.read())

    # Add queued filename to the entry
    entry.queued_media_file = queue_filepath

    entry.queued_task_id = task_id

    # Save now so we have this data before kicking off processing
    entry.save(validate=True)

    if request.form.get('callback_url'):
        metadata = request.db.ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = unicode(request.form['callback_url'])
        metadata.save()

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    process_media = registry.tasks[ProcessMedia.name]
    try:
        process_media.apply_async(
            [unicode(entry._id)], {},
            task_id=task_id)
    except BaseException as e:
        # The purpose of this section is because when running in "lazy"
        # or always-eager-with-exceptions-propagated celery mode that
        # the failure handling won't happen on Celery end.  Since we
        # expect a lot of users to run things in this way we have to
        # capture stuff here.
        #
        # ... not completely the diaper pattern because the
        # exception is re-raised :)
        mark_entry_failed(entry._id, e)
        # re-raise the exception
        raise

    return json_response(get_entry_serializable(entry, request.urlgen))
def multi_submit_start(request):
    """
  First view for submitting a file.
  """
    submit_form = submit_forms.get_submit_start_form(
        request.form, license=request.user.license_preference)
    users_collections = Collection.query.filter_by(
        actor=request.user.id,
        type=Collection.USER_DEFINED_TYPE).order_by(Collection.title)

    if users_collections.count() > 0:
        submit_form.collection.query = users_collections
    else:
        del submit_form.collection


#  Below is what was used for mediagoblin 0.5.0-dev. Above is the new way.
#  submit_form = submit_forms.SubmitStartForm(request.form, license=request.user.license_preference)
    filecount = 0
    if request.method == 'POST' and submit_form.validate():
        if not check_file_field(request, 'file'):
            submit_form.file.errors.append(
                _(u'You must provide at least one file.'))
        else:
            for submitted_file in request.files.getlist('file'):
                try:
                    if not submitted_file.filename:
                        # MOST likely an invalid file
                        continue  # Skip the rest of the loop for this file
                    else:
                        filename = submitted_file.filename
                        _log.info("html5-multi-upload: Got filename: %s" %
                                  filename)

                        # If the filename contains non ascii generate a unique name
                        if not all(ord(c) < 128 for c in filename):
                            filename = str(
                                uuid.uuid4()) + splitext(filename)[-1]

                        # Sniff the submitted media to determine which
                        # media plugin should handle processing
                        media_type, media_manager = sniff_media(
                            submitted_file, filename)

                        # create entry and save in database
                        entry = new_upload_entry(request.user)
                        entry.media_type = str(media_type)
                        entry.title = (str(submit_form.title.data) or str(
                            splitext(submitted_file.filename)[0]))

                        entry.description = str(submit_form.description.data)

                        entry.license = str(submit_form.license.data) or None

                        # Process the user's folksonomy "tags"
                        entry.tags = convert_to_tag_list_of_dicts(
                            submit_form.tags.data)

                        # Generate a slug from the title
                        entry.generate_slug()

                        queue_file = prepare_queue_task(
                            request.app, entry, filename)

                        with queue_file:
                            queue_file.write(submitted_file.stream.read())

                        # Save now so we have this data before kicking off processing
                        entry.save()

                        # Pass off to async processing
                        #
                        # (... don't change entry after this point to avoid race
                        # conditions with changes to the document via processing code)
                        feed_url = request.urlgen(
                            'mediagoblin.user_pages.atom_feed',
                            qualified=True,
                            user=request.user.username)
                        run_process_media(entry, feed_url)
                        if submit_form.collection and submit_form.collection.data:
                            add_media_to_collection(
                                submit_form.collection.data, entry)
                            create_activity("add",
                                            entry,
                                            request.user,
                                            target=submit_form.collection.data)

                        add_comment_subscription(request.user, entry)
                        filecount = filecount + 1

                except Exception as e:
                    '''
          This section is intended to catch exceptions raised in
          mediagoblin.media_types
          '''
                    if isinstance(e, TypeNotFound) or isinstance(
                            e, FileTypeNotSupported):
                        submit_form.file.errors.append(e)
                    else:
                        raise

            add_message(request, SUCCESS,
                        _('Woohoo! Submitted %d Files!' % filecount))
            return redirect(request,
                            "mediagoblin.user_pages.user_home",
                            user=request.user.username)

    return render_to_response(request, 'start.html',
                              {'multi_submit_form': submit_form})
Exemple #12
0
def submit_media(
    mg_app,
    user,
    submitted_file,
    filename,
    title=None,
    description=None,
    collection_slug=None,
    license=None,
    metadata=None,
    tags_string=u"",
    callback_url=None,
    urlgen=None,
):
    """
    Args:
     - mg_app: The MediaGoblinApp instantiated for this process
     - user: the user object this media entry should be associated with
     - submitted_file: the file-like object that has the
       being-submitted file data in it (this object should really have
       a .name attribute which is the filename on disk!)
     - filename: the *original* filename of this.  Not necessarily the
       one on disk being referenced by submitted_file.
     - title: title for this media entry
     - description: description for this media entry
     - collection_slug: collection for this media entry
     - license: license for this media entry
     - tags_string: comma separated string of tags to be associated
       with this entry
     - callback_url: possible post-hook to call after submission
     - urlgen: if provided, used to do the feed_url update and assign a public
               ID used in the API (very important).
    """
    upload_limit, max_file_size = get_upload_file_limits(user)
    if upload_limit and user.uploaded >= upload_limit:
        raise UserPastUploadLimit()

    # If the filename contains non ascii generate a unique name
    if not all(ord(c) < 128 for c in filename):
        filename = six.text_type(uuid.uuid4()) + splitext(filename)[-1]

    # Sniff the submitted media to determine which
    # media plugin should handle processing
    media_type, media_manager = sniff_media(submitted_file, filename)

    # create entry and save in database
    entry = new_upload_entry(user)
    entry.media_type = media_type
    entry.title = (title or six.text_type(splitext(filename)[0]))

    entry.description = description or u""

    entry.license = license or None

    entry.media_metadata = metadata or {}

    # Process the user's folksonomy "tags"
    entry.tags = convert_to_tag_list_of_dicts(tags_string)

    # Generate a slug from the title
    entry.generate_slug()

    queue_file = prepare_queue_task(mg_app, entry, filename)

    with queue_file:
        queue_file.write(submitted_file)

    # Get file size and round to 2 decimal places
    file_size = mg_app.queue_store.get_file_size(
        entry.queued_media_file) / (1024.0 * 1024)
    file_size = float('{0:.2f}'.format(file_size))

    # Check if file size is over the limit
    if max_file_size and file_size >= max_file_size:
        raise FileUploadLimit()

    # Check if user is over upload limit
    if upload_limit and (user.uploaded + file_size) >= upload_limit:
        raise UserUploadLimit()

    user.uploaded = user.uploaded + file_size
    user.save()

    entry.file_size = file_size

    # Save now so we have this data before kicking off processing
    entry.save()

    # Various "submit to stuff" things, callbackurl and this silly urlgen
    # thing
    if callback_url:
        metadata = ProcessingMetaData()
        metadata.media_entry = entry
        metadata.callback_url = callback_url
        metadata.save()

    if urlgen:
        # Generate the public_id, this is very importent, especially relating
        # to deletion, it allows the shell to be accessable post-delete!
        entry.get_public_id(urlgen)

        # Generate the feed URL
        feed_url = urlgen('mediagoblin.user_pages.atom_feed',
                          qualified=True,
                          user=user.username)
    else:
        feed_url = None

    add_comment_subscription(user, entry)

    # Create activity
    create_activity("post", entry, entry.actor)
    entry.save()

    # add to collection
    if collection_slug:
        collection = Collection.query.filter_by(slug=collection_slug,
                                                actor=user.id).first()
        if collection:
            add_media_to_collection(collection, entry)

    # Pass off to processing
    #
    # (... don't change entry after this point to avoid race
    # conditions with changes to the document via processing code)
    run_process_media(entry, feed_url)

    return entry
def submit_start(request):
    """
    First view for submitting a file.
    """
    submit_form = submit_forms.SubmitStartForm(request.form)

    if request.method == 'POST' and submit_form.validate():
        if not ('file' in request.files
                and isinstance(request.files['file'], FileStorage)
                and request.files['file'].stream):
            submit_form.file.errors.append(
                _(u'You must provide a file.'))
        else:
            try:
                filename = request.files['file'].filename

                # Sniff the submitted media to determine which
                # media plugin should handle processing
                media_type, media_manager = sniff_media(
                    request.files['file'])

                # create entry and save in database
                entry = request.db.MediaEntry()
                entry.media_type = unicode(media_type)
                entry.title = (
                    unicode(request.form['title'])
                    or unicode(splitext(filename)[0]))

                entry.description = unicode(request.form.get('description'))

                entry.license = unicode(request.form.get('license', "")) or None

                entry.uploader = request.user.id

                # Process the user's folksonomy "tags"
                entry.tags = convert_to_tag_list_of_dicts(
                    request.form.get('tags'))

                # Generate a slug from the title
                entry.generate_slug()

                queue_file = prepare_queue_task(request.app, entry, filename)

                with queue_file:
                    queue_file.write(request.files['file'].stream.read())

                # Save now so we have this data before kicking off processing
                entry.save()

                # Pass off to processing
                #
                # (... don't change entry after this point to avoid race
                # conditions with changes to the document via processing code)
                run_process_media(entry)

                handle_push_urls(request)

                add_message(request, SUCCESS, _('Woohoo! Submitted!'))

                return redirect(request, "mediagoblin.user_pages.user_home",
                                user=request.user.username)
            except Exception as e:
                '''
                This section is intended to catch exceptions raised in
                mediagoblin.media_types
                '''
                if isinstance(e, InvalidFileType) or \
                        isinstance(e, FileTypeNotSupported):
                    submit_form.file.errors.append(
                        e)
                else:
                    raise

    return render_to_response(
        request,
        'mediagoblin/submit/start.html',
        {'submit_form': submit_form,
         'app_config': mg_globals.app_config})
Exemple #14
0
def submit_start(request):
    """
    First view for submitting a file.
    """
    submit_form = submit_forms.SubmitStartForm(request.form,
        license=request.user.license_preference)

    if request.method == 'POST' and submit_form.validate():
        if not check_file_field(request, 'file'):
            submit_form.file.errors.append(
                _(u'You must provide a file.'))
        else:
            try:
                filename = request.files['file'].filename

                # If the filename contains non ascii generate a unique name
                if not all(ord(c) < 128 for c in filename):
                    filename = unicode(uuid.uuid4()) + splitext(filename)[-1]

                # Sniff the submitted media to determine which
                # media plugin should handle processing
                media_type, media_manager = sniff_media(
                    request.files['file'])

                # create entry and save in database
                entry = new_upload_entry(request.user)
                entry.media_type = unicode(media_type)
                entry.title = (
                    unicode(submit_form.title.data)
                    or unicode(splitext(request.files['file'].filename)[0]))

                entry.description = unicode(submit_form.description.data)

                entry.license = unicode(submit_form.license.data) or None

                # Process the user's folksonomy "tags"
                entry.tags = convert_to_tag_list_of_dicts(
                    submit_form.tags.data)

                # Generate a slug from the title
                entry.generate_slug()

                queue_file = prepare_queue_task(request.app, entry, filename)

                with queue_file:
                    queue_file.write(request.files['file'].stream.read())

                # Save now so we have this data before kicking off processing
                entry.save()

                # Pass off to async processing
                #
                # (... don't change entry after this point to avoid race
                # conditions with changes to the document via processing code)
                feed_url = request.urlgen(
                    'mediagoblin.user_pages.atom_feed',
                    qualified=True, user=request.user.username)
                run_process_media(entry, feed_url)

                add_message(request, SUCCESS, _('Woohoo! Submitted!'))

                add_comment_subscription(request.user, entry)

                return redirect(request, "mediagoblin.user_pages.user_home",
                                user=request.user.username)
            except Exception as e:
                '''
                This section is intended to catch exceptions raised in
                mediagoblin.media_types
                '''
                if isinstance(e, InvalidFileType) or \
                        isinstance(e, FileTypeNotSupported):
                    submit_form.file.errors.append(
                        e)
                else:
                    raise

    return render_to_response(
        request,
        'mediagoblin/submit/start.html',
        {'submit_form': submit_form,
         'app_config': mg_globals.app_config})
def multi_submit_start(request):
  """
  First view for submitting a file.
  """
  submit_form = submit_forms.get_submit_start_form(request.form, license=request.user.license_preference)
  users_collections = Collection.query.filter_by(
    actor=request.user.id,
    type=Collection.USER_DEFINED_TYPE
  ).order_by(Collection.title)

  if users_collections.count() > 0:
    submit_form.collection.query = users_collections
  else:
    del submit_form.collection

#  Below is what was used for mediagoblin 0.5.0-dev. Above is the new way.
#  submit_form = submit_forms.SubmitStartForm(request.form, license=request.user.license_preference)
  filecount = 0
  if request.method == 'POST' and submit_form.validate():
    if not check_file_field(request, 'file'):
      submit_form.file.errors.append(_(u'You must provide at least one file.'))
    else:
      for submitted_file in request.files.getlist('file'):
        try:
          if not submitted_file.filename:
            # MOST likely an invalid file
            continue # Skip the rest of the loop for this file
          else:
            filename = submitted_file.filename
            _log.info("html5-multi-upload: Got filename: %s" % filename)

            # If the filename contains non ascii generate a unique name
            if not all(ord(c) < 128 for c in filename):
              filename = unicode(uuid.uuid4()) + splitext(filename)[-1]

            # Sniff the submitted media to determine which
            # media plugin should handle processing
            media_type, media_manager = sniff_media(
              submitted_file, filename)

            # create entry and save in database
            entry = new_upload_entry(request.user)
            entry.media_type = unicode(media_type)
            entry.title = (
              unicode(submit_form.title.data)
              or unicode(splitext(submitted_file.filename)[0]))

            entry.description = unicode(submit_form.description.data)

            entry.license = unicode(submit_form.license.data) or None

            # Process the user's folksonomy "tags"
            entry.tags = convert_to_tag_list_of_dicts(
              submit_form.tags.data)

            # Generate a slug from the title
            entry.generate_slug()

            queue_file = prepare_queue_task(request.app, entry, filename)

            with queue_file:
              queue_file.write(submitted_file.stream.read())

            # Save now so we have this data before kicking off processing
            entry.save()

            # Pass off to async processing
            #
            # (... don't change entry after this point to avoid race
            # conditions with changes to the document via processing code)
            feed_url = request.urlgen(
              'mediagoblin.user_pages.atom_feed',
              qualified=True, user=request.user.username)
            run_process_media(entry, feed_url)
            if submit_form.collection and submit_form.collection.data:
              add_media_to_collection(
                submit_form.collection.data, entry)
              create_activity(
                "add", entry, request.user,
                target=submit_form.collection.data)

            add_comment_subscription(request.user, entry)
            filecount = filecount + 1

        except Exception as e:
          '''
          This section is intended to catch exceptions raised in
          mediagoblin.media_types
          '''
          if isinstance(e, TypeNotFound) or isinstance(e, FileTypeNotSupported):
            submit_form.file.errors.append(e)
          else:
            raise

      add_message(request, SUCCESS, _('Woohoo! Submitted %d Files!' % filecount))
      return redirect(request, "mediagoblin.user_pages.user_home",
              user=request.user.username)

  return render_to_response(
    request,
    'start.html',
    {'multi_submit_form': submit_form})
Exemple #16
0
def submit_start(request):
    """
    First view for submitting a file.
    """
    submit_form = submit_forms.SubmitStartForm(request.form,
        license=request.user.license_preference)

    if request.method == 'POST' and submit_form.validate():
        if not check_file_field(request, 'file'):
            submit_form.file.errors.append(
                _(u'You must provide a file.'))
        else:
            try:
                filename = request.files['file'].filename

                # If the filename contains non ascii generate a unique name
                if not all(ord(c) < 128 for c in filename):
                    filename = unicode(uuid.uuid4()) + splitext(filename)[-1]


                # Sniff the submitted media to determine which
                # media plugin should handle processing
		# I've modified ../media_types/__init__.py so that a zip file can pass the checking system
		# It will be seen as an image file
                media_type, media_manager = sniff_media(
                    request.files['file'])

		# Read the zip file and processing each photo with its metadata
                zf = zipfile.ZipFile(request.files['file'], 'r')
                for name in zf.namelist():
                        try:
                                data = zf.read(name)
                                if imghdr.what(name, data):
					metadata = ElementTree.ElementTree(ElementTree.fromstring(zf.read(name.split(".")[0]+'.xml')))

					# Fetch the data in metadata that matches the format in mediagoblin
					# Not yet finish the whole checking problem, some data may not exist
					img_title = metadata.find('title').text
					img_description = metadata.find('description').text
					img_tags = ''
					for tag in metadata.find('tags'):
						img_tags = img_tags + ', ' + tag.text

					upload_data = data
                                        upload_filename = name.lstrip('dst/')

					# create entry and save in database
			                entry = new_upload_entry(request.user)
			                entry.media_type = unicode(media_type)
			                
					entry.title = unicode(img_title)#(
			                    #unicode(submit_form.title.data)
			                    #or unicode(splitext(request.files['file'].filename)[0]))

			                entry.description = unicode(img_description)#unicode(submit_form.description.data)

			                entry.license = unicode('http://creativecommons.org/publicdomain/mark/1.0/')#unicode(submit_form.license.data) or None

			                # Process the user's folksonomy "tags"
			                entry.tags = convert_to_tag_list_of_dicts(img_tags)
			                #    submit_form.tags.data)

			                # Generate a slug from the title
			                entry.generate_slug()

					queue_file = prepare_queue_task(request.app, entry, upload_filename)#filename)

			                with queue_file:
			                    queue_file.write(upload_data)#request.files['file'].stream.read())

					# Save now so we have this data before kicking off processing
			                entry.save()

					feed_url = request.urlgen(
			                    'mediagoblin.user_pages.atom_feed',
			                    qualified=True, user=request.user.username)
			                run_process_media(entry, feed_url)
                        except KeyError:
                                print 'ERROR: Did not find %s in zip file' % name

                # Pass off to processing
                #
                # (... don't change entry after this point to avoid race
                # conditions with changes to the document via processing code)
                add_message(request, SUCCESS, _('Woohoo! Submitted!'))

                add_comment_subscription(request.user, entry)

                return redirect(request, "mediagoblin.user_pages.user_home",
                                user=request.user.username)
            except Exception as e:
                '''
                This section is intended to catch exceptions raised in
                mediagoblin.media_types
                '''
                if isinstance(e, InvalidFileType) or \
                        isinstance(e, FileTypeNotSupported):
                    submit_form.file.errors.append(
                        e)
                else:
                    raise

    return render_to_response(
        request,
        'mediagoblin/import_flickr/start.html',
        {'submit_form': submit_form,
         'app_config': mg_globals.app_config})
Exemple #17
0
def submit_start(request):
    """
    First view for submitting a file.
    """
    submit_form = submit_forms.SubmitStartForm(request.form)

    if request.method == 'POST' and submit_form.validate():
        if not ('file' in request.files
                and isinstance(request.files['file'], FileStorage)
                and request.files['file'].stream):
            submit_form.file.errors.append(
                _(u'You must provide a file.'))
        else:
            try:
                filename = request.files['file'].filename

                # Sniff the submitted media to determine which
                # media plugin should handle processing
                media_type, media_manager = sniff_media(
                    request.files['file'])

                # create entry and save in database
                entry = request.db.MediaEntry()
                entry.id = ObjectId()
                entry.media_type = unicode(media_type)
                entry.title = (
                    unicode(request.form['title'])
                    or unicode(splitext(filename)[0]))

                entry.description = unicode(request.form.get('description'))

                entry.license = unicode(request.form.get('license', "")) or None

                entry.uploader = request.user._id

                # Process the user's folksonomy "tags"
                entry.tags = convert_to_tag_list_of_dicts(
                    request.form.get('tags'))

                # Generate a slug from the title
                entry.generate_slug()

                # We generate this ourselves so we know what the taks id is for
                # retrieval later.

                # (If we got it off the task's auto-generation, there'd be
                # a risk of a race condition when we'd save after sending
                # off the task)
                task_id = unicode(uuid.uuid4())

                # Now store generate the queueing related filename
                queue_filepath = request.app.queue_store.get_unique_filepath(
                    ['media_entries',
                     task_id,
                     secure_filename(filename)])

                # queue appropriately
                queue_file = request.app.queue_store.get_file(
                    queue_filepath, 'wb')

                with queue_file:
                    queue_file.write(request.files['file'].stream.read())

                # Add queued filename to the entry
                entry.queued_media_file = queue_filepath

                entry.queued_task_id = task_id

                # Save now so we have this data before kicking off processing
                entry.save(validate=True)

                # Pass off to processing
                #
                # (... don't change entry after this point to avoid race
                # conditions with changes to the document via processing code)
                process_media = registry.tasks[ProcessMedia.name]
                try:
                    process_media.apply_async(
                        [unicode(entry._id)], {},
                        task_id=task_id)
                except BaseException as exc:
                    # The purpose of this section is because when running in "lazy"
                    # or always-eager-with-exceptions-propagated celery mode that
                    # the failure handling won't happen on Celery end.  Since we
                    # expect a lot of users to run things in this way we have to
                    # capture stuff here.
                    #
                    # ... not completely the diaper pattern because the
                    # exception is re-raised :)
                    mark_entry_failed(entry._id, exc)
                    # re-raise the exception
                    raise

                if mg_globals.app_config["push_urls"]:
                    feed_url = request.urlgen(
                                       'mediagoblin.user_pages.atom_feed',
                                       qualified=True,
                                       user=request.user.username)
                    hubparameters = {
                        'hub.mode': 'publish',
                        'hub.url': feed_url}
                    hubdata = urllib.urlencode(hubparameters)
                    hubheaders = {
                        "Content-type": "application/x-www-form-urlencoded",
                        "Connection": "close"}
                    for huburl in mg_globals.app_config["push_urls"]:
                        hubrequest = urllib2.Request(huburl, hubdata, hubheaders)
                        try:
                            hubresponse = urllib2.urlopen(hubrequest)
                        except urllib2.HTTPError as exc:
                            # This is not a big issue, the item will be fetched
                            # by the PuSH server next time we hit it
                            _log.warning(
                                "push url %r gave error %r", huburl, exc.code)
                        except urllib2.URLError as exc:
                            _log.warning(
                                "push url %r is unreachable %r", huburl, exc.reason)

                add_message(request, SUCCESS, _('Woohoo! Submitted!'))

                return redirect(request, "mediagoblin.user_pages.user_home",
                                user=request.user.username)
            except Exception as e:
                '''
                This section is intended to catch exceptions raised in
                mediagoblin.media_types
                '''
                if isinstance(e, InvalidFileType) or \
                        isinstance(e, FileTypeNotSupported):
                    submit_form.file.errors.append(
                        e)
                else:
                    raise

    return render_to_response(
        request,
        'mediagoblin/submit/start.html',
        {'submit_form': submit_form,
         'app_config': mg_globals.app_config})