コード例 #1
0
ファイル: api.py プロジェクト: garbas/mozilla-releng
def download_file(digest, region):
    log = logger.bind(tooltool_sha512=digest, tooltool_operation='download')
    if not is_valid_sha512(digest):
        raise BadRequest("Invalid sha512 digest")

    # see where the file is..
    file_row = File.query.filter(File.sha512 == digest).first()
    if not file_row or not file_row.instances:
        raise NotFound

    # check visibility
    allow_pub_dl = current_app.config.get('TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD')  # noqa
    if file_row.visibility != 'public' or not allow_pub_dl:
        # TODO: check scope
        # if not p.get('tooltool.download.{}'.format(file_row.visibility)).can():  # noqa
        #     raise Forbidden
        pass

    # figure out which region to use, and from there which bucket
    cfg = current_app.config['TOOLTOOL_REGIONS']
    selected_region = None
    for inst in file_row.instances:
        if inst.region == region:
            selected_region = inst.region
            break
    else:
        # preferred region not found, so pick one from the available set
        selected_region = random.choice(
            [inst.region for inst in file_row.instances])
    bucket = cfg[selected_region]

    key = keyname(digest)

    s3 = current_app.aws.connect_to('s3', selected_region)
    log.info("generating signed S3 GET URL for {}.. expiring in {}s".format(
        digest[:10], GET_EXPIRES_IN))
    signed_url = s3.generate_url(
        method='GET', expires_in=GET_EXPIRES_IN, bucket=bucket, key=key)

    return redirect(signed_url)
コード例 #2
0
ファイル: api.py プロジェクト: garbas/mozilla-releng
def upload_batch(region, body):
    region, bucket = get_region_and_bucket(region)

    if not body.message:
        raise BadRequest("message must be non-empty")

    if not body.files:
        raise BadRequest("a batch must include at least one file")

    if body.author:
        raise BadRequest("Author must not be specified for upload")
    try:
        body.author = current_user.authenticated_email
    except AttributeError:
        # no authenticated_email -> use the stringified user (probably a token
        # ID)
        body.author = str(current_user)

    # verify permissions based on visibilities
    visibilities = set(f.visibility for f in body.files.itervalues())
    for visibility in visibilities:
        # TODO: check for scope with visibility
        # prm = p.get('tooltool.upload.{}'.format(v))
        # if not prm or not prm.can():
        #     raise Forbidden("no permission to upload {} files".format(v))
        pass

    session = g.db.session

    batch = Batch(
        uploaded=now(),
        author=body.author,
        message=body.message,
    )

    aws = AWS(current_app.config.get('AWS', {}))
    s3 = aws.connect_to('s3', region)

    for filename, info in body.files.iteritems():
        log = logger.bind(
            tooltool_sha512=info.digest,
            tooltool_operation='upload',
            tooltool_batch_id=batch.id,
            mozdef=True,
        )
        if info.algorithm != 'sha512':
            raise BadRequest("'sha512' is the only allowed digest algorithm")
        if not is_valid_sha512(info.digest):
            raise BadRequest("Invalid sha512 digest")
        digest = info.digest
        file = File.query.filter(File.sha512 == digest).first()
        if file and file.visibility != info.visibility:
            raise BadRequest("Cannot change a file's visibility level")
        if file and file.instances != []:
            if file.size != info.size:
                raise BadRequest("Size mismatch for {}".format(filename))
        else:
            if not file:
                file = File(
                    sha512=digest,
                    visibility=info.visibility,
                    size=info.size)
                session.add(file)
            log.info(
                "generating signed S3 PUT URL to {} for {}; expiring in "
                "{}s".format(
                    info.digest[:10],
                    current_user,
                    UPLOAD_EXPIRES_IN,
                )
            )
            info.put_url = s3.generate_url(
                method='PUT', expires_in=UPLOAD_EXPIRES_IN, bucket=bucket,
                key=keyname(info.digest),
                headers={'Content-Type': 'application/octet-stream'})

            # The PendingUpload row needs to reflect the updated expiration
            # time, even if there's an existing pending upload that expires
            # earlier.  The `merge` method does a SELECT and then either
            # UPDATEs or INSERTs the row.  However, merge needs the file_id,
            # rather than just a reference to the file object; and for that, we
            # need to flush the inserted file.
            session.flush()
            expires = time.now()
            expires += datetime.timedelta(seconds=UPLOAD_EXPIRES_IN)
            pu = PendingUpload(
                file_id=file.id,
                region=region,
                expires=time.now() + datetime.timedelta(seconds=UPLOAD_EXPIRES_IN),  # noqa
            )
            session.merge(pu)

        session.add(BatchFile(filename=filename, file=file, batch=batch))

    session.add(batch)
    session.commit()

    body.id = batch.id
    return body