Exemplo n.º 1
0
def check_pending_upload(session, pu, _test_shim=lambda: None):
    # we can check the upload any time between the expiration of the URL
    # (after which the user can't make any more changes, but the upload
    # may yet be incomplete) and 1 day afterward (ample time for the upload
    # to complete)
    sha512 = pu.file.sha512
    size = pu.file.size

    log = logger.bind(tooltool_sha512=sha512)

    if time.now() < pu.expires:
        # URL is not expired yet
        return
    elif time.now() > pu.expires + timedelta(days=1):
        # Upload will probably never complete
        log.info(
            "Deleting abandoned pending upload for {}".format(sha512))
        session.delete(pu)
        return

    # connect and see if the file exists..
    s3 = current_app.aws.connect_to('s3', pu.region)
    cfg = current_app.config.get('TOOLTOOL_REGIONS')
    if not cfg or pu.region not in cfg:
        log.warning("Pending upload for {} was to an un-configured "
                    "region".format(sha512))
        session.delete(pu)
        return

    bucket = s3.get_bucket(cfg[pu.region], validate=False)
    key = bucket.get_key(util.keyname(sha512))
    if not key:
        # not uploaded yet
        return

    # commit the session before verifying the file instance, since the
    # DB connection may otherwise go away while we're distracted.
    session.commit()
    _test_shim()

    if not verify_file_instance(sha512, size, key):
        log.warning(
            "Upload of {} was invalid; deleting key".format(sha512))
        key.delete()
        session.delete(pu)
        session.commit()
        return

    log.info("Upload of {} considered valid".format(sha512))
    # add a file instance, but it's OK if it already exists
    try:
        tables.FileInstance(file=pu.file, region=pu.region)
        session.commit()
    except sa.exc.IntegrityError:
        session.rollback()

    # and delete the pending upload
    session.delete(pu)
    session.commit()
Exemplo n.º 2
0
def add_file_row(size, sha512, instances=[]):
    session = current_app.db.session('relengapi')
    file_row = tables.File(size=size, visibility='public', sha512=sha512)
    session.add(file_row)
    for region in instances:
        session.add(tables.FileInstance(file=file_row, region=region))
    session.commit()
    return file_row
Exemplo n.º 3
0
def add_file_to_db(app, content, regions=['us-east-1'],
                   pending_regions=[], visibility='public'):
    with app.app_context():
        session = app.db.session('relengapi')
        file_row = tables.File(size=len(content),
                               visibility=visibility,
                               sha512=hashlib.sha512(content).hexdigest())
        session.add(file_row)
        session.commit()
        for region in regions:
            session.add(tables.FileInstance(
                file_id=file_row.id, region=region))
        for region in pending_regions:
            session.add(tables.PendingUpload(
                file=file_row, region=region,
                expires=relengapi_time.now() + datetime.timedelta(seconds=60)))
        session.commit()

        return file_row
Exemplo n.º 4
0
def replicate_file(session, file, _test_shim=lambda: None):
    log = logger.bind(tooltool_sha512=file.sha512, mozdef=True)
    config = current_app.config['TOOLTOOL_REGIONS']
    regions = set(config)
    file_regions = set([i.region for i in file.instances])
    # only use configured source regions; if a region is removed
    # from the configuration, we can't copy from it.
    source_regions = file_regions & regions
    if not source_regions:
        # this should only happen when the only region containing a
        # file is removed from the configuration
        log.warning("no source regions for {}".format(file.sha512))
        return
    source_region = source_regions.pop()
    source_bucket = config[source_region]
    target_regions = regions - file_regions
    log.info("replicating {} from {} to {}".format(file.sha512, source_region,
                                                   ', '.join(target_regions)))

    key_name = util.keyname(file.sha512)
    for target_region in target_regions:
        target_bucket = config[target_region]
        conn = current_app.aws.connect_to('s3', target_region)
        bucket = conn.get_bucket(target_bucket)

        # commit the session before replicating, since the DB connection may
        # otherwise go away while we're distracted.
        session.commit()
        _test_shim()
        bucket.copy_key(new_key_name=key_name,
                        src_key_name=key_name,
                        src_bucket_name=source_bucket,
                        storage_class='STANDARD',
                        preserve_acl=False)
        try:
            session.add(tables.FileInstance(file=file, region=target_region))
            session.commit()
        except sa.exc.IntegrityError:
            session.rollback()
Exemplo n.º 5
0
 def test_shim():
     session = app.db.session('relengapi')
     session.add(tables.FileInstance(file=file, region='us-west-2'))
     session.commit()
Exemplo n.º 6
0
 def test_shim():
     session.add(tables.FileInstance(file=file_row, region='us-west-2'))
     session.commit()
 def test_shim():
     session = app.db.session(tables.DB_DECLARATIVE_BASE)
     session.add(tables.FileInstance(file=file, region='us-west-2'))
     session.commit()