def RandomAUSTest(AUS, backgroundRate, force, mapping): with mock.patch('auslib.db.Rules.getRulesMatchingQuery') as m: m.return_value = [ dict(backgroundRate=backgroundRate, priority=1, mapping=mapping, update_type='minor', whitelist=None) ] results = AUS.rand.getRange() resultsLength = len(results) def se(*args, **kwargs): return results.pop() with mock.patch('auslib.AUS.AUSRandom.getInt') as m2: m2.side_effect = se served = 0 tested = 0 while len(results) > 0: updateQuery = dict(channel='foo', force=force, buildTarget='a', buildID='0', locale='a', version='1.0') r, _ = AUS.evaluateRules(updateQuery) tested += 1 if r: served += 1 # bail out if we're not asking for any randint's if resultsLength == len(results): break return (served, tested)
def RandomAUSTestWithFallback(AUS, backgroundRate, force, mapping): with mock.patch('auslib.db.Rules.getRulesMatchingQuery') as m: m.return_value = [dict(backgroundRate=backgroundRate, priority=1, mapping=mapping, update_type='minor', fallbackMapping='fallback')] results = AUS.rand.getRange() resultsLength = len(results) def se(*args, **kwargs): return results.pop() with mock.patch('auslib.AUS.AUSRandom.getInt') as m2: m2.side_effect = se served_mapping = 0 served_fallback = 0 tested = 0 while len(results) > 0: updateQuery = dict( channel='foo', force=force, buildTarget='a', buildID='0', locale='a', version='1.0', ) r, _ = AUS.evaluateRules(updateQuery) tested += 1 if r['name'] == mapping: served_mapping += 1 elif r['name'] == "fallback": served_fallback += 1 # bail out if we're not asking for any randint's if resultsLength == len(results): break return (served_mapping, served_fallback, tested)
def random_aus_test(self, background_rate, force=None, fallback=False): mapping = "b" with mock.patch("auslib.db.Rules.getRulesMatchingQuery") as m: fallback = fallback and "fallback" # convert True to string m.return_value = [dict(backgroundRate=background_rate, priority=1, mapping=mapping, update_type="minor", fallbackMapping=fallback)] results = list(ENTIRE_RANGE) resultsLength = len(results) def se(*args, **kwargs): return results.pop() aus = AUS() aus.rand = mock.Mock(side_effect=se) served_mapping = 0 served_fallback = 0 tested = 0 while len(results) > 0: updateQuery = dict(channel="foo", force=force, buildTarget="a", buildID="0", locale="a", version="1.0", product="bar") r, _ = aus.evaluateRules(updateQuery) tested += 1 if r and r["name"] == mapping: served_mapping += 1 elif fallback and r["name"] == fallback: served_fallback += 1 # bail out if we're not asking for any randint's if resultsLength == len(results): break return (served_mapping, served_fallback, tested)
def random_aus_test(self, background_rate, force=None, fallback=False): mapping = 'b' with mock.patch('auslib.db.Rules.getRulesMatchingQuery') as m: fallback = fallback and 'fallback' # convert True to string m.return_value = [dict(backgroundRate=background_rate, priority=1, mapping=mapping, update_type='minor', fallbackMapping=fallback)] results = list(ENTIRE_RANGE) resultsLength = len(results) def se(*args, **kwargs): return results.pop() aus = AUS() aus.rand = mock.Mock(side_effect=se) served_mapping = 0 served_fallback = 0 tested = 0 while len(results) > 0: updateQuery = dict( channel='foo', force=force, buildTarget='a', buildID='0', locale='a', version='1.0', product='bar' ) r, _ = aus.evaluateRules(updateQuery) tested += 1 if r and r['name'] == mapping: served_mapping += 1 elif fallback and r['name'] == fallback: served_fallback += 1 # bail out if we're not asking for any randint's if resultsLength == len(results): break return (served_mapping, served_fallback, tested)
def get_update_blob(**url): query = getQueryFromURL(url) LOG.debug("Got query: %s", query) release, update_type = AUS.evaluateRules(query) # passing {},None returns empty xml if release: response_products = release.getResponseProducts() response_blobs = [] response_blob_names = release.getResponseBlobs() if response_products: # if we have a SuperBlob of gmp, we process the response products and # concatenate their inner XMLs for product in response_products: product_query = query.copy() product_query["product"] = product response_release, response_update_type = AUS.evaluateRules(product_query) if not response_release: continue response_blobs.append({'product_query': product_query, 'response_release': response_release, 'response_update_type': response_update_type}) elif response_blob_names: for blob_name in response_blob_names: # if we have a SuperBlob of systemaddons, we process the response products and # concatenate their inner XMLs product_query = query.copy() product = dbo.releases.getReleases(name=blob_name, limit=1)[0]['product'] product_query["product"] = product response_release = dbo.releases.getReleaseBlob(name=blob_name) if not response_release: LOG.warning("No release found with name: %s", blob_name) continue response_blobs.append({'product_query': product_query, 'response_release': response_release, 'response_update_type': update_type}) else: response_blobs.append({'product_query': query, 'response_release': release, 'response_update_type': update_type}) # getHeaderXML() returns outermost header for an update which # is same for all release type xml = release.getHeaderXML() # we assume that all blobs will have similar ones. We might want to # verify that all of them are indeed the same in the future. # Appending Header # In case of superblob Extracting Header form parent release xml.append(release.getInnerHeaderXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) for response_blob in response_blobs: xml.extend(response_blob['response_release'] .getInnerXML(response_blob['product_query'], response_blob['response_update_type'], app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) # Appending Footer # In case of superblob Extracting Header form parent release xml.append(release.getInnerFooterXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) xml.append(release.getFooterXML()) # ensure valid xml by using the right entity for ampersand xml = re.sub('&(?!amp;)', '&', '\n'.join(xml)) else: xml = ['<?xml version="1.0"?>'] xml.append('<updates>') xml.append('</updates>') xml = "\n".join(xml) LOG.debug("Sending XML: %s", xml) response = make_response(xml) response.headers["Cache-Control"] = app.cacheControl response.mimetype = "text/xml" return response
def get_update_blob(transaction, **url): url['queryVersion'] = extract_query_version(request.url) # Underlying code depends on osVersion being set. Since this route only # exists to support ancient queries, and all newer versions have osVersion # in them it's easier to set this here than make the all of the underlying # code support queries without it. if url['queryVersion'] == 1: url['osVersion'] = '' # Bug 1517743 - two Firefox nightlies can't parse update.xml when it contains the usual newlines or indentations squash_response = False query = getQueryFromURL(url) LOG.debug("Got query: %s", query) release, update_type = AUS.evaluateRules(query, transaction=transaction) # passing {},None returns empty xml if release: response_products = release.getResponseProducts() response_blobs = [] response_blob_names = release.getResponseBlobs() if response_products: # if we have a SuperBlob of gmp, we process the response products and # concatenate their inner XMLs for product in response_products: product_query = query.copy() product_query["product"] = product response_release, response_update_type = AUS.evaluateRules( product_query, transaction=transaction) if not response_release: continue response_blobs.append({ 'product_query': product_query, 'response_release': response_release, 'response_update_type': response_update_type }) elif response_blob_names: for blob_name in response_blob_names: # if we have a SuperBlob of systemaddons, we process the response products and # concatenate their inner XMLs product_query = query.copy() product = dbo.releases.getReleases( name=blob_name, limit=1, transaction=transaction)[0]['product'] product_query["product"] = product response_release = dbo.releases.getReleaseBlob( name=blob_name, transaction=transaction) if not response_release: LOG.warning("No release found with name: %s", blob_name) continue response_blobs.append({ 'product_query': product_query, 'response_release': response_release, 'response_update_type': update_type }) else: response_blobs.append({ 'product_query': query, 'response_release': release, 'response_update_type': update_type }) # Bug 1517743 - we want a cheap test because this will be run on each request if release[ 'name'] == 'Firefox-mozilla-central-nightly-latest' and query[ 'buildID'] in ('20190103220533', '20190104093221'): squash_response = True LOG.debug('Busted nightly detected, will squash xml response') # getHeaderXML() returns outermost header for an update which # is same for all release type xml = release.getHeaderXML() # we assume that all blobs will have similar ones. We might want to # verify that all of them are indeed the same in the future. # Appending Header # In case of superblob Extracting Header form parent release xml.append( release.getInnerHeaderXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) for response_blob in response_blobs: xml.extend(response_blob['response_release'].getInnerXML( response_blob['product_query'], response_blob['response_update_type'], app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) # Appending Footer # In case of superblob Extracting Header form parent release xml.append( release.getInnerFooterXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) xml.append(release.getFooterXML()) # ensure valid xml by using the right entity for ampersand xml = re.sub('&(?!amp;)', '&', '\n'.join(xml)) else: xml = ['<?xml version="1.0"?>'] xml.append('<updates>') xml.append('</updates>') xml = "\n".join(xml) # Bug 1517743 - remove newlines and 4 space indents if squash_response: xml = xml.replace('\n', '').replace(' ', '') LOG.debug("Sending XML: %s", xml) response = make_response(xml) response.headers["Cache-Control"] = app.cacheControl response.mimetype = "text/xml" return response
def get_update_blob(**url): url['queryVersion'] = extract_query_version(request.url) # Underlying code depends on osVersion being set. Since this route only # exists to support ancient queries, and all newer versions have osVersion # in them it's easier to set this here than make the all of the underlying # code support queries without it. if url['queryVersion'] == 1: url['osVersion'] = '' query = getQueryFromURL(url) LOG.debug("Got query: %s", query) release, update_type = AUS.evaluateRules(query) # passing {},None returns empty xml if release: response_products = release.getResponseProducts() response_blobs = [] response_blob_names = release.getResponseBlobs() if response_products: # if we have a SuperBlob of gmp, we process the response products and # concatenate their inner XMLs for product in response_products: product_query = query.copy() product_query["product"] = product response_release, response_update_type = AUS.evaluateRules( product_query) if not response_release: continue response_blobs.append({ 'product_query': product_query, 'response_release': response_release, 'response_update_type': response_update_type }) elif response_blob_names: for blob_name in response_blob_names: # if we have a SuperBlob of systemaddons, we process the response products and # concatenate their inner XMLs product_query = query.copy() product = dbo.releases.getReleases(name=blob_name, limit=1)[0]['product'] product_query["product"] = product response_release = dbo.releases.getReleaseBlob(name=blob_name) if not response_release: LOG.warning("No release found with name: %s", blob_name) continue response_blobs.append({ 'product_query': product_query, 'response_release': response_release, 'response_update_type': update_type }) else: response_blobs.append({ 'product_query': query, 'response_release': release, 'response_update_type': update_type }) # getHeaderXML() returns outermost header for an update which # is same for all release type xml = release.getHeaderXML() # we assume that all blobs will have similar ones. We might want to # verify that all of them are indeed the same in the future. # Appending Header # In case of superblob Extracting Header form parent release xml.append( release.getInnerHeaderXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) for response_blob in response_blobs: xml.extend(response_blob['response_release'].getInnerXML( response_blob['product_query'], response_blob['response_update_type'], app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) # Appending Footer # In case of superblob Extracting Header form parent release xml.append( release.getInnerFooterXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) xml.append(release.getFooterXML()) # ensure valid xml by using the right entity for ampersand xml = re.sub('&(?!amp;)', '&', '\n'.join(xml)) else: xml = ['<?xml version="1.0"?>'] xml.append('<updates>') xml.append('</updates>') xml = "\n".join(xml) LOG.debug("Sending XML: %s", xml) response = make_response(xml) response.headers["Cache-Control"] = app.cacheControl response.mimetype = "text/xml" return response
def get_update_blob(transaction, **url): url["queryVersion"] = extract_query_version(request.url) # Underlying code depends on osVersion being set. Since this route only # exists to support ancient queries, and all newer versions have osVersion # in them it's easier to set this here than make the all of the underlying # code support queries without it. if url["queryVersion"] == 1: url["osVersion"] = "" # Bug 1517743 - two Firefox nightlies can't parse update.xml when it contains the usual newlines or indentations squash_response = False query = getQueryFromURL(url) LOG.debug("Got query: %s", query) release, update_type = AUS.evaluateRules(query, transaction=transaction) # passing {},None returns empty xml if release: response_products = release.getResponseProducts() response_blobs = [] response_blob_names = release.getResponseBlobs() if response_products: # if we have a SuperBlob of gmp, we process the response products and # concatenate their inner XMLs for product in response_products: product_query = query.copy() product_query["product"] = product response_release, response_update_type = AUS.evaluateRules(product_query, transaction=transaction) if not response_release: continue response_blobs.append({"product_query": product_query, "response_release": response_release, "response_update_type": response_update_type}) elif response_blob_names: for blob_name in response_blob_names: # if we have a SuperBlob of systemaddons, we process the response products and # concatenate their inner XMLs product_query = query.copy() product = dbo.releases.getReleases(name=blob_name, limit=1, transaction=transaction)[0]["product"] product_query["product"] = product response_release = dbo.releases.getReleaseBlob(name=blob_name, transaction=transaction) if not response_release: LOG.warning("No release found with name: %s", blob_name) continue response_blobs.append({"product_query": product_query, "response_release": response_release, "response_update_type": update_type}) else: response_blobs.append({"product_query": query, "response_release": release, "response_update_type": update_type}) # Bug 1517743 - we want a cheap test because this will be run on each request if release["name"] == "Firefox-mozilla-central-nightly-latest" and query["buildID"] in ("20190103220533", "20190104093221"): squash_response = True LOG.debug("Busted nightly detected, will squash xml response") # getHeaderXML() returns outermost header for an update which # is same for all release type xml = release.getHeaderXML() # we assume that all blobs will have similar ones. We might want to # verify that all of them are indeed the same in the future. # Appending Header # In case of superblob Extracting Header form parent release xml.append(release.getInnerHeaderXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) for response_blob in response_blobs: xml.extend( response_blob["response_release"].getInnerXML( response_blob["product_query"], response_blob["response_update_type"], app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"] ) ) # Appending Footer # In case of superblob Extracting Header form parent release xml.append(release.getInnerFooterXML(query, update_type, app.config["WHITELISTED_DOMAINS"], app.config["SPECIAL_FORCE_HOSTS"])) xml.append(release.getFooterXML()) # ensure valid xml by using the right entity for ampersand xml = re.sub("&(?!amp;)", "&", "\n".join(xml)) else: xml = ['<?xml version="1.0"?>'] xml.append("<updates>") xml.append("</updates>") xml = "\n".join(xml) # Bug 1517743 - remove newlines and 4 space indents if squash_response: xml = xml.replace("\n", "").replace(" ", "") LOG.debug("Sending XML: %s", xml) response = make_response(xml) response.headers["Cache-Control"] = app.cacheControl response.mimetype = "text/xml" return response