def upload_filter_to_kinto(generation_time, is_base=True, upload_stash=False): server = KintoServer(KINTO_BUCKET, KINTO_COLLECTION_MLBF, kinto_sign_off_needed=False) mlbf = MLBF(generation_time) if is_base: # clear the collection for the base - we want to be the only filter server.delete_all_records() # Deal with possible stashes first if upload_stash: # If we have a stash, write that stash_data = { 'key_format': MLBF.KEY_FORMAT, 'stash_time': generation_time, 'stash': mlbf.stash_json, } server.publish_record(stash_data) # Then the bloomfilter data = { 'key_format': MLBF.KEY_FORMAT, 'generation_time': generation_time, 'attachment_type': BLOCKLIST_RECORD_MLBF_BASE if is_base else BLOCKLIST_RECORD_MLBF_UPDATE, } with storage.open(mlbf.filter_path, 'rb') as filter_file: attachment = ('filter.bin', filter_file, 'application/octet-stream') server.publish_attachment(data, attachment) server.complete_session() set_config(MLBF_TIME_CONFIG_KEY, generation_time, json_value=True) if is_base: set_config(MLBF_BASE_ID_CONFIG_KEY, generation_time, json_value=True)
def legacy_delete_blocks(blocks): server = KintoServer(KINTO_BUCKET, KINTO_COLLECTION_LEGACY) for block in blocks: if block.kinto_id and block.include_in_legacy: if block.is_imported_from_kinto_regex: log.debug( f'Block [{block.guid}] was imported from a regex guid so ' 'can\'t be safely deleted. Skipping.') else: server.delete_record(block.kinto_id) block.update(kinto_id='') server.complete_session()
def upload_filter_to_kinto(generation_time): server = KintoServer(KINTO_BUCKET, KINTO_COLLECTION_MLBF, kinto_sign_off_needed=False) data = { 'key_format': MLBF.KEY_FORMAT, 'generation_time': generation_time, } mlbf_path = MLBF(generation_time).filter_path with storage.open(mlbf_path) as filter_file: attachment = ('filter.bin', filter_file, 'application/octet-stream') server.publish_attachment(data, attachment) server.complete_session() set_config(MLBF_TIME_CONFIG_KEY, generation_time, json_value=True)
def test_complete_session_no_kinto_signoff(self): server = KintoServer('foo', 'baa', kinto_sign_off_needed=False) server._setup_done = True # should return because nothing to signoff server.complete_session() server._changes = True url = (settings.KINTO_API_URL + 'buckets/foo/collections/baa') responses.add(responses.PATCH, url, content_type='application/json') server.complete_session() assert not server._changes assert responses.calls[0].request.body == json.dumps({ 'data': { 'status': 'to-sign' } }).encode()
def legacy_publish_blocks(blocks): bucket = settings.REMOTE_SETTINGS_WRITER_BUCKET server = KintoServer(bucket, REMOTE_SETTINGS_COLLECTION_LEGACY) for block in blocks: needs_updating = block.include_in_legacy and block.kinto_id needs_creating = block.include_in_legacy and not block.kinto_id needs_deleting = block.kinto_id and not block.include_in_legacy if needs_updating or needs_creating: if block.is_imported_from_kinto_regex: log.debug( f'Block [{block.guid}] was imported from a regex guid so ' 'can\'t be safely updated. Skipping.') continue data = { 'guid': block.guid, 'details': { 'bug': block.url, 'why': block.reason, 'name': str(block.reason).partition('.')[0], # required }, 'enabled': True, 'versionRange': [{ 'severity': 3, # Always high severity now. 'minVersion': block.min_version, 'maxVersion': block.max_version, }], } if needs_creating: record = server.publish_record(data) block.update(kinto_id=record.get('id', '')) else: server.publish_record(data, block.kinto_id) elif needs_deleting: if block.is_imported_from_kinto_regex: log.debug( f'Block [{block.guid}] was imported from a regex guid so ' 'can\'t be safely deleted. Skipping.') else: server.delete_record(block.kinto_id) block.update(kinto_id='') # else no existing kinto record and it shouldn't be in legacy so skip server.complete_session()
def test_complete_session(self): server = KintoServer('foo', 'baa') server._setup_done = True # should return because nothing to signoff server.complete_session() server._changes = True url = ( settings.REMOTE_SETTINGS_WRITER_URL + 'buckets/foo/collections/baa') responses.add( responses.PATCH, url, content_type='application/json') server.complete_session() assert not server._changes assert responses.calls[0].request.body == json.dumps( {'data': {'status': 'to-review'}}).encode()
def upload_mlbf_to_kinto(): if not waffle.switch_is_active('blocklist_mlbf_submit'): log.info('Upload MLBF to kinto cron job disabled.') return last_generation_time = get_config(MLBF_TIME_CONFIG_KEY, 0, json_value=True) if last_generation_time > _get_blocklist_last_modified_time(): log.info( 'No new/modified Blocks in database; skipping MLBF generation') return log.info('Starting Upload MLBF to kinto cron job.') server = KintoServer(KINTO_BUCKET, KINTO_COLLECTION_MLBF, kinto_sign_off_needed=False) stats = {} key_format = get_mlbf_key_format() # This timestamp represents the point in time when all previous addon # guid + versions and blocks were used to generate the bloomfilter. # An add-on version/file from before this time will definitely be accounted # for in the bloomfilter so we can reliably assert if it's blocked or not. # An add-on version/file from after this time can't be reliably asserted - # there may be false positives or false negatives. # https://github.com/mozilla/addons-server/issues/13695 generation_time = int(time.time() * 1000) bloomfilter = generate_mlbf(stats, key_format) with tempfile.NamedTemporaryFile() as filter_file: bloomfilter.tofile(filter_file) filter_file.seek(0) data = { 'key_format': key_format, 'generation_time': generation_time, } attachment = ('filter.bin', filter_file, 'application/octet-stream') server.publish_attachment(data, attachment) server.complete_session() set_config(MLBF_TIME_CONFIG_KEY, generation_time, json_value=True) log.info(json.dumps(stats))