示例#1
0
文件: file.py 项目: mmmika/encoded
def post_upload(context, request):
    properties = context.upgrade_properties()
    if properties['status'] not in ('uploading', 'upload failed'):
        raise HTTPForbidden('status must be "uploading" to issue new credentials')

    accession_or_external = properties.get('accession') or properties['external_accession']
    file_upload_bucket = request.registry.settings['file_upload_bucket']
    external = context.propsheets.get('external', None)
    registry = request.registry
    if external is None:
        # Handle objects initially posted as another state.
        bucket = file_upload_bucket
        uuid = context.uuid
        mapping = context.schema['file_format_file_extension']
        file_extension = mapping[properties['file_format']]
        date = properties['date_created'].split('T')[0].replace('-', '/')
        key = '{date}/{uuid}/{accession_or_external}{file_extension}'.format(
            accession_or_external=accession_or_external,
            date=date, file_extension=file_extension, uuid=uuid, **properties)
    elif external.get('service') == 's3':
        bucket = external['bucket']
        # Must reset file to point to file_upload_bucket (keep AWS public dataset in sync).
        if bucket != file_upload_bucket:
            registry.notify(BeforeModified(context, request))
            context._set_external_sheet({'bucket': file_upload_bucket})
            registry.notify(AfterModified(context, request))
            bucket = file_upload_bucket
        key = external['key']
    else:
        raise HTTPNotFound(
            detail='External service {} not expected'.format(external.get('service'))
        )

    name = 'up{time:.6f}-{accession_or_external}'.format(
        accession_or_external=accession_or_external,
        time=time.time(), **properties)[:32]  # max 32 chars
    profile_name = request.registry.settings.get('file_upload_profile_name')
    upload_creds = UploadCredentials(bucket, key, name, profile_name=profile_name)
    s3_transfer_allow = request.registry.settings.get('external_aws_s3_transfer_allow', 'false')
    creds = upload_creds.external_creds(
        s3_transfer_allow=asbool(s3_transfer_allow),
        s3_transfer_buckets=request.registry.settings.get('external_aws_s3_transfer_buckets'),
    )
    new_properties = None
    if properties['status'] == 'upload failed':
        new_properties = properties.copy()
        new_properties['status'] = 'uploading'

    registry.notify(BeforeModified(context, request))
    context.update(new_properties, {'external': creds})
    registry.notify(AfterModified(context, request))

    rendered = request.embed('/%s/@@object' % context.uuid, as_user=True)
    result = {
        'status': 'success',
        '@type': ['result'],
        '@graph': [rendered],
    }
    return result
示例#2
0
文件: file.py 项目: mmmika/encoded
def file_update_bucket(context, request):
    new_bucket = request.json_body.get('new_bucket')
    if not new_bucket:
        raise ValidationFailure('body', ['bucket'], 'New bucket not specified')
    force = asbool(request.params.get('force'))
    known_buckets = [
        request.registry.settings['file_upload_bucket'],
        request.registry.settings['pds_public_bucket'],
        request.registry.settings['pds_private_bucket'],
    ]
    # Try to validate input to a known bucket.
    if new_bucket not in known_buckets and not force:
        raise ValidationFailure('body', ['bucket'], 'Unknown bucket and force not specified')
    current_bucket = context._get_external_sheet().get('bucket')
    # Don't bother setting if already the same.
    if current_bucket != new_bucket:
        request.registry.notify(BeforeModified(context, request))
        context._set_external_sheet({'bucket': new_bucket})
        request.registry.notify(AfterModified(context, request))
    return {
        'status': 'success',
        '@type': ['result'],
        'old_bucket': current_bucket,
        'new_bucket': new_bucket
    }
示例#3
0
 def _update_status(self,
                    new_status,
                    current_status,
                    current_properties,
                    schema,
                    request,
                    item_id,
                    update,
                    validate=True):
     new_properties = current_properties.copy()
     new_properties['status'] = new_status
     # Some release specific functionality.
     if new_status == 'released':
         # This won't be reassigned if you rerelease something.
         if 'date_released' in schema[
                 'properties'] and 'date_released' not in new_properties:
             new_properties['date_released'] = str(datetime.now().date())
     if validate:
         self._validate_set_status_patch(request, schema, new_properties,
                                         current_properties)
     # Don't update if update parameter not true.
     if not update:
         return
     # Don't actually patch if the same.
     if new_status == current_status:
         return
     request.registry.notify(BeforeModified(self, request))
     self.update(new_properties)
     request.registry.notify(AfterModified(self, request))
     request._set_status_changed_paths.add(
         (item_id, current_status, new_status))
示例#4
0
def post_upload(context, request):
    properties = context.upgrade_properties()
    if properties['status'] not in ('uploading', 'upload failed'):
        raise HTTPForbidden(
            'status must be "uploading" to issue new credentials')

    accession_or_external = properties.get(
        'accession') or properties['external_accession']
    external = context.propsheets.get('external', None)

    if external is None:
        # Handle objects initially posted as another state.
        bucket = request.registry.settings['file_upload_bucket']
        uuid = context.uuid
        mapping = context.schema['file_format_file_extension']
        file_extension = mapping[properties['file_format']]
        date = properties['date_created'].split('T')[0].replace('-', '/')
        key = '{date}/{uuid}/{accession_or_external}{file_extension}'.format(
            accession_or_external=accession_or_external,
            date=date,
            file_extension=file_extension,
            uuid=uuid,
            **properties)
    elif external.get('service') == 's3':
        bucket = external['bucket']
        key = external['key']
    else:
        raise ValueError(external.get('service'))

    name = 'up{time:.6f}-{accession_or_external}'.format(
        accession_or_external=accession_or_external,
        time=time.time(),
        **properties)[:32]  # max 32 chars
    profile_name = request.registry.settings.get('file_upload_profile_name')
    creds = external_creds(bucket, key, name, profile_name)

    new_properties = None
    if properties['status'] == 'upload failed':
        new_properties = properties.copy()
        new_properties['status'] = 'uploading'

    registry = request.registry
    registry.notify(BeforeModified(context, request))
    context.update(new_properties, {'external': creds})
    registry.notify(AfterModified(context, request))

    rendered = request.embed('/%s/@@object' % context.uuid, as_user=True)
    result = {
        'status': 'success',
        '@type': ['result'],
        '@graph': [rendered],
    }
    return result