Example #1
0
 def permissions(self):
     token_permissions = [p.get(permissionstr)
                          for permissionstr in self._permissions.split(',')]
     # silently ignore any nonexistent permissions; this allows us to remove unused
     # permissions without causing tokens permitting those permissions to fail
     # completely
     return [a for a in token_permissions if a]
Example #2
0
 def permissions(self):
     token_permissions = [p.get(permissionstr)
                          for permissionstr in self._permissions.split(',')]
     # silently ignore any nonexistent permissions; this allows us to remove unused
     # permissions without causing tokens permitting those permissions to fail
     # completely
     return [a for a in token_permissions if a]
Example #3
0
def can_access_token(access, typ, user):
    # ensure the user can see this token; for non-user-associated
    # tokens, that's just a permission check
    if typ in ('prm',):
        if not p.get('base.tokens.{}.{}'.format(typ, access)).can():
            return False
    # for user-associated tokens, if the .all permission is set,
    # the access is fine; otherwise very that the user matches and
    # the .my permission is set.
    elif typ in ('usr',):
        if not p.get('base.tokens.{}.{}.all'.format(typ, access)).can():
            email = get_user_email()
            if not email or not user or user != email:
                return False
            if not p.get('base.tokens.{}.{}.my'.format(typ, access)).can():
                return False

    return True
Example #4
0
def can_access_token(access, typ, user):
    # ensure the user can see this token; for non-user-associated
    # tokens, that's just a permission check
    if typ in ('prm', ):
        if not p.get('base.tokens.{}.{}'.format(typ, access)).can():
            return False
    # for user-associated tokens, if the .all permission is set,
    # the access is fine; otherwise very that the user matches and
    # the .my permission is set.
    elif typ in ('usr', ):
        if not p.get('base.tokens.{}.{}.all'.format(typ, access)).can():
            email = get_user_email()
            if not email or not user or user != email:
                return False
            if not p.get('base.tokens.{}.{}.my'.format(typ, access)).can():
                return False

    return True
Example #5
0
def issue_token(body):
    """Issue a new token.  The body should not include a ``token`` or ``id``,
    but should include a ``typ`` and the necessary fields for that type.  The
    response will contain both ``token`` and ``id``.  You must have permission
    to issue the given token type."""
    typ = body.typ

    # verify permission to issue this type
    perm = p.get('base.tokens.{}.issue'.format(typ))
    if not perm.can():
        raise Forbidden("You do not have permission to create this token type")

    # verify required parameters; any extras will be ignored
    for attr in required_token_attributes[typ]:
        if getattr(body, attr) is wsme.Unset:
            raise BadRequest("missing %s" % attr)

    # prohibit silly requests
    if body.disabled:
        raise BadRequest("can't issue disabled tokens")

    # All types have permissions, so handle those here -- ensure the request is
    # for a subset of the permissions the user can perform
    requested_permissions = [p.get(a) for a in body.permissions]
    if None in requested_permissions:
        raise BadRequest("bad permissions")
    if not set(requested_permissions) <= current_user.permissions:
        raise BadRequest("bad permissions")

    # Dispatch the rest to the per-type function.  Note that WSME has already
    # ensured `typ` is one of the recognized types.
    token = token_issuers[typ](body, requested_permissions)
    perms_str = ', '.join(str(p) for p in requested_permissions)
    log = logger.bind(token_typ=token.typ,
                      token_permissions=perms_str,
                      mozdef=True)
    if token.id:
        log = log.bind(token_id=token.id)
    log.info("Issuing {} token to {} with permissions {}".format(
        token.typ, current_user, perms_str))
    return token
Example #6
0
def issue_token(body):
    """Issue a new token.  The body should not include a ``token`` or ``id``,
    but should include a ``typ`` and the necessary fields for that type.  The
    response will contain both ``token`` and ``id``.  You must have permission
    to issue the given token type."""
    typ = body.typ

    # verify permission to issue this type
    perm = p.get('base.tokens.{}.issue'.format(typ))
    if not perm.can():
        raise Forbidden("You do not have permission to create this token type")

    # verify required parameters; any extras will be ignored
    for attr in required_token_attributes[typ]:
        if getattr(body, attr) is wsme.Unset:
            raise BadRequest("missing %s" % attr)

    # prohibit silly requests
    if body.disabled:
        raise BadRequest("can't issue disabled tokens")

    # All types have permissions, so handle those here -- ensure the request is
    # for a subset of the permissions the user can perform
    requested_permissions = [p.get(a) for a in body.permissions]
    if None in requested_permissions:
        raise BadRequest("bad permissions")
    if not set(requested_permissions) <= current_user.permissions:
        raise BadRequest("bad permissions")

    # Dispatch the rest to the per-type function.  Note that WSME has already
    # ensured `typ` is one of the recognized types.
    token = token_issuers[typ](body, requested_permissions)
    perms_str = ', '.join(str(p) for p in requested_permissions)
    log = logger.bind(token_typ=token.typ, token_permissions=perms_str)
    if token.id:
        log = log.bind(token_id=token.id)
    log.info("Issuing {} token to {} with permissions {}".format(
        token.typ, current_user, perms_str))
    return token
Example #7
0
def download_file(digest, region=None):
    """Fetch a link to the file with the given sha512 digest.  The response
    is a 302 redirect to a signed download URL.

    The query argument ``region=us-west-1`` indicates a preference for a URL in
    that region, although if the file is not available in tht region then a URL
    from another region may be returned."""
    log = logger.bind(tooltool_sha512=digest, tooltool_operation='download')
    if not is_valid_sha512(digest):
        raise BadRequest("Invalid sha512 digest")

    # see where the file is..
    tbl = tables.File
    file_row = tbl.query.filter(tbl.sha512 == digest).first()
    if not file_row or not file_row.instances:
        raise NotFound

    # check visibility
    allow_pub_dl = current_app.config.get(
        'TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD')
    if file_row.visibility != 'public' or not allow_pub_dl:
        if not p.get('tooltool.download.{}'.format(file_row.visibility)).can():
            raise Forbidden

    # figure out which region to use, and from there which bucket
    cfg = current_app.config['TOOLTOOL_REGIONS']
    selected_region = None
    for inst in file_row.instances:
        if inst.region == region:
            selected_region = inst.region
            break
    else:
        # preferred region not found, so pick one from the available set
        selected_region = random.choice(
            [inst.region for inst in file_row.instances])
    bucket = cfg[selected_region]

    key = util.keyname(digest)

    s3 = current_app.aws.connect_to('s3', selected_region)
    log.info("generating signed S3 GET URL for {}.. expiring in {}s".format(
        digest[:10], GET_EXPIRES_IN))
    signed_url = s3.generate_url(method='GET',
                                 expires_in=GET_EXPIRES_IN,
                                 bucket=bucket,
                                 key=key)

    return redirect(signed_url)
Example #8
0
def download_file(digest, region=None):
    """Fetch a link to the file with the given sha512 digest.  The response
    is a 302 redirect to a signed download URL.

    The query argument ``region=us-west-1`` indicates a preference for a URL in
    that region, although if the file is not available in tht region then a URL
    from another region may be returned."""
    log = logger.bind(tooltool_sha512=digest, tooltool_operation='download')
    if not is_valid_sha512(digest):
        raise BadRequest("Invalid sha512 digest")

    # see where the file is..
    tbl = tables.File
    file_row = tbl.query.filter(tbl.sha512 == digest).first()
    if not file_row or not file_row.instances:
        raise NotFound

    # check visibility
    allow_pub_dl = current_app.config.get('TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD')
    if file_row.visibility != 'public' or not allow_pub_dl:
        if not p.get('tooltool.download.{}'.format(file_row.visibility)).can():
            raise Forbidden

    # figure out which region to use, and from there which bucket
    cfg = current_app.config['TOOLTOOL_REGIONS']
    selected_region = None
    for inst in file_row.instances:
        if inst.region == region:
            selected_region = inst.region
            break
    else:
        # preferred region not found, so pick one from the available set
        selected_region = random.choice([inst.region for inst in file_row.instances])
    bucket = cfg[selected_region]

    key = util.keyname(digest)

    s3 = current_app.aws.connect_to('s3', selected_region)
    log.info("generating signed S3 GET URL for {}.. expiring in {}s".format(
        digest[:10], GET_EXPIRES_IN))
    signed_url = s3.generate_url(
        method='GET', expires_in=GET_EXPIRES_IN, bucket=bucket, key=key)

    return redirect(signed_url)
Example #9
0
def permlist_to_permissions(permlist):
    token_permissions = [p.get(s) for s in permlist]
    # silently ignore any nonexistent permissions; this allows us to remove unused
    # permissions without causing tokens permitting those permissions to fail
    # completely
    return [perm for perm in token_permissions if perm]
Example #10
0
def upload_batch(region=None, body=None):
    """Create a new upload batch.  The response object will contain a
    ``put_url`` for each file which needs to be uploaded -- which may not be
    all!  The caller is then responsible for uploading to those URLs.  The
    resulting signed URLs are valid for one hour, so uploads should begin
    within that timeframe.  Consider using Amazon's MD5-verification
    capabilities to ensure that the uploaded files are transferred correctly,
    although the tooltool server will verify the integrity anyway.  The
    upload must have the header ``Content-Type: application/octet-stream```.

    The query argument ``region=us-west-1`` indicates a preference for URLs
    in that region, although if the region is not available then URLs in
    other regions may be returned.

    The returned URLs are only valid for 60 seconds, so all upload requests
    must begin within that timeframe.  Clients should therefore perform all
    uploads in parallel, rather than sequentially.  This limitation is in
    place to prevent malicious modification of files after they have been
    verified."""
    region, bucket = get_region_and_bucket(region)

    if not body.message:
        raise BadRequest("message must be non-empty")

    if not body.files:
        raise BadRequest("a batch must include at least one file")

    if body.author:
        raise BadRequest("Author must not be specified for upload")
    try:
        body.author = current_user.authenticated_email
    except AttributeError:
        raise BadRequest("Could not determine authenticated username")

    # verify permissions based on visibilities
    visibilities = set(f.visibility for f in body.files.itervalues())
    for v in visibilities:
        prm = p.get('tooltool.upload.{}'.format(v))
        if not prm or not prm.can():
            raise Forbidden("no permission to upload {} files".format(v))

    session = g.db.session('relengapi')
    batch = tables.Batch(uploaded=time.now(),
                         author=body.author,
                         message=body.message)

    s3 = current_app.aws.connect_to('s3', region)
    for filename, info in body.files.iteritems():
        log = logger.bind(tooltool_sha512=info.digest,
                          tooltool_operation='upload',
                          tooltool_batch_id=batch.id,
                          mozdef=True)
        if info.algorithm != 'sha512':
            raise BadRequest("'sha512' is the only allowed digest algorithm")
        if not is_valid_sha512(info.digest):
            raise BadRequest("Invalid sha512 digest")
        digest = info.digest
        file = tables.File.query.filter(tables.File.sha512 == digest).first()
        if file and file.visibility != info.visibility:
            raise BadRequest("Cannot change a file's visibility level")
        if file and file.instances != []:
            if file.size != info.size:
                raise BadRequest("Size mismatch for {}".format(filename))
        else:
            if not file:
                file = tables.File(sha512=digest,
                                   visibility=info.visibility,
                                   size=info.size)
                session.add(file)
            log.info(
                "generating signed S3 PUT URL to {} for {}; expiring in {}s".
                format(info.digest[:10], current_user, UPLOAD_EXPIRES_IN))
            info.put_url = s3.generate_url(
                method='PUT',
                expires_in=UPLOAD_EXPIRES_IN,
                bucket=bucket,
                key=util.keyname(info.digest),
                headers={'Content-Type': 'application/octet-stream'})
            # The PendingUpload row needs to reflect the updated expiration
            # time, even if there's an existing pending upload that expires
            # earlier.  The `merge` method does a SELECT and then either UPDATEs
            # or INSERTs the row.  However, merge needs the file_id, rather than
            # just a reference to the file object; and for that, we need to flush
            # the inserted file.
            session.flush()
            pu = tables.PendingUpload(
                file_id=file.id,
                region=region,
                expires=time.now() +
                datetime.timedelta(seconds=UPLOAD_EXPIRES_IN))
            session.merge(pu)
        session.add(tables.BatchFile(filename=filename, file=file,
                                     batch=batch))
    session.add(batch)
    session.commit()

    body.id = batch.id
    return body
def permlist_to_permissions(permlist):
    token_permissions = [p.get(s) for s in permlist]
    # silently ignore any nonexistent permissions; this allows us to remove unused
    # permissions without causing tokens permitting those permissions to fail
    # completely
    return [perm for perm in token_permissions if perm]
Example #12
0
def upload_batch(region=None, body=None):
    """Create a new upload batch.  The response object will contain a
    ``put_url`` for each file which needs to be uploaded -- which may not be
    all!  The caller is then responsible for uploading to those URLs.  The
    resulting signed URLs are valid for one hour, so uploads should begin
    within that timeframe.  Consider using Amazon's MD5-verification
    capabilities to ensure that the uploaded files are transferred correctly,
    although the tooltool server will verify the integrity anyway.  The
    upload must have the header ``Content-Type: application/octet-stream```.

    The query argument ``region=us-west-1`` indicates a preference for URLs
    in that region, although if the region is not available then URLs in
    other regions may be returned.

    The returned URLs are only valid for 60 seconds, so all upload requests
    must begin within that timeframe.  Clients should therefore perform all
    uploads in parallel, rather than sequentially.  This limitation is in
    place to prevent malicious modification of files after they have been
    verified."""
    region, bucket = get_region_and_bucket(region)

    if not body.message:
        raise BadRequest("message must be non-empty")

    if not body.files:
        raise BadRequest("a batch must include at least one file")

    if body.author:
        raise BadRequest("Author must not be specified for upload")
    try:
        body.author = current_user.authenticated_email
    except AttributeError:
        raise BadRequest("Could not determine authenticated username")

    # verify permissions based on visibilities
    visibilities = set(f.visibility for f in body.files.itervalues())
    for v in visibilities:
        prm = p.get('tooltool.upload.{}'.format(v))
        if not prm or not prm.can():
            raise Forbidden("no permission to upload {} files".format(v))

    session = g.db.session('relengapi')
    batch = tables.Batch(
        uploaded=time.now(),
        author=body.author,
        message=body.message)

    s3 = current_app.aws.connect_to('s3', region)
    for filename, info in body.files.iteritems():
        log = logger.bind(tooltool_sha512=info.digest, tooltool_operation='upload',
                          tooltool_batch_id=batch.id)
        if info.algorithm != 'sha512':
            raise BadRequest("'sha512' is the only allowed digest algorithm")
        if not is_valid_sha512(info.digest):
            raise BadRequest("Invalid sha512 digest")
        digest = info.digest
        file = tables.File.query.filter(tables.File.sha512 == digest).first()
        if file and file.visibility != info.visibility:
            raise BadRequest("Cannot change a file's visibility level")
        if file and file.instances != []:
            if file.size != info.size:
                raise BadRequest("Size mismatch for {}".format(filename))
        else:
            if not file:
                file = tables.File(
                    sha512=digest,
                    visibility=info.visibility,
                    size=info.size)
                session.add(file)
            log.info("generating signed S3 PUT URL to {} for {}; expiring in {}s".format(
                info.digest[:10], current_user, UPLOAD_EXPIRES_IN))
            info.put_url = s3.generate_url(
                method='PUT', expires_in=UPLOAD_EXPIRES_IN, bucket=bucket,
                key=util.keyname(info.digest),
                headers={'Content-Type': 'application/octet-stream'})
            # The PendingUpload row needs to reflect the updated expiration
            # time, even if there's an existing pending upload that expires
            # earlier.  The `merge` method does a SELECT and then either UPDATEs
            # or INSERTs the row.  However, merge needs the file_id, rather than
            # just a reference to the file object; and for that, we need to flush
            # the inserted file.
            session.flush()
            pu = tables.PendingUpload(
                file_id=file.id,
                region=region,
                expires=time.now() + datetime.timedelta(seconds=UPLOAD_EXPIRES_IN))
            session.merge(pu)
        session.add(tables.BatchFile(filename=filename, file=file, batch=batch))
    session.add(batch)
    session.commit()

    body.id = batch.id
    return body