def get_overall_result(data, whitelist=[], verbose=False): """ Get human-readable representation of the result; partitioned by ami returns a tuple of an overal result and list of tuples overal_result, [(ami_resutl, ami_log), ...] """ ret = RESULT_PASSED log = [] if not data: return ret, log agg_data = aggregate.nested(data, 'region', 'arch', 'itype', 'ami', 'cloudhwname') for region in agg_data: for arch in agg_data[region]: for itype in agg_data[region][arch]: for ami in agg_data[region][arch][itype]: sub_result, sub_log = get_ami_result( agg_data[region][arch][itype][ami], whitelist, verbose) if sub_result != RESULT_PASSED and ret == RESULT_PASSED: ret = sub_result ami_header = '%s %s %s %s: %s' % (region, arch, itype, ami, sub_result) sub_log.insert(0, ami_header) sub_log.insert(0, '-' * len(ami_header)) sub_log.insert(0, '') log.append((sub_result, sub_log)) return ret, log
def process_ami_record_debug(ami_key, ami_data, whitelist=[], user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, bugzilla_product=DEFAULT_PRODUCT, verbose=False): '''process one ami record creating bug with comment per hwp''' summary = " ".join(ami_key) ami = ami_key[-1] logger.debug('*** Summary for bug: %s', summary) logger.debug('*** Ami data: %s', ami_data) bug = 'not created' ami_result = RESULT_PASSED data = aggregate.nested(ami_data, 'cloudhwname') for hwp in data: sub_result, sub_log = get_hwp_result(data[hwp], whitelist, verbose) if sub_result not in [RESULT_PASSED, RESULT_SKIP ] and ami_result == RESULT_PASSED: ami_result = sub_result logger.debug('.....adding comment: # %s: %s\n%s' % (hwp, sub_result, '\n'.join(sub_log))) logger.debug(".....setting status: %s", ami_result == RESULT_PASSED and 'VERIFIED' or 'ON_QA') return bug, ami, ami_result
def process_ami_record(ami_key, ami_data, whitelist=[], user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, bugzilla_product=DEFAULT_PRODUCT, verbose=False): '''process one ami record creating bug with comment per hwp''' connection = connect(url, user, password) summary = " ".join(ami_key) region, platform, product, version, arch, itype, ami = ami_key bug = create_bug(connection, summary, version, arch, component, bugzilla_product) create_bug_log_attachment(connection, bug, ami, ami_data) ami_result = RESULT_PASSED data = aggregate.nested(ami_data, 'cloudhwname') for hwp in data: sub_result, sub_log = get_hwp_result(data[hwp], whitelist, verbose) if sub_result not in [RESULT_PASSED, RESULT_SKIP ] and ami_result == RESULT_PASSED: ami_result = sub_result bug.addcomment('# %s: %s\n%s' % (hwp, sub_result, '\n'.join(sub_log))) bug.setstatus(ami_result == RESULT_PASSED and 'VERIFIED' or 'ON_QA') return bug.bug_id, ami, ami_result
def main(config, istream, ostream, user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, product=DEFAULT_PRODUCT, verbose=False, pool_size=128, debug_mode=False): user, password = bugzilla_credentials(config) logger.debug('got credentials: %s, %s', user, password) statuses = [] data = load_yaml(istream) agg_data = aggregate.nested(data, 'region', 'version', 'arch', 'itype', 'ami', 'cloudhwname') for region in agg_data: logger.debug(region) for version in agg_data[region]: logger.debug(version) for arch in agg_data[region][version]: logger.debug(arch) for itype in agg_data[region][version][arch]: logger.debug(itype) for ami in agg_data[region][version][arch][itype]: logger.debug(ami) statuses.append((ami, version, arch, region, itype, user, password, agg_data[region][version][arch][itype][ami], url, component, product)) pool = Pool(size=pool_size) if debug_mode: #There will be enhancement to have for each ami output in the file statuses = pool.map(lambda args: process_ami_record_debug(*args), statuses) else: statuses = pool.map(lambda args: process_ami_record(*args), statuses) for bug, ami, status in statuses: save_result(ostream, dict(bug=bug, id=ami, status=status)) return all([status == RESULT_PASSED for _, status, _ in statuses]) and 0 or 1
def process_ami_record_debug( ami_key, ami_data, whitelist=[], user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, bugzilla_product=DEFAULT_PRODUCT, verbose=False, ): """process one ami record creating bug with comment per hwp""" summary = " ".join(ami_key) ami = ami_key[-1] logger.debug("*** Summary for bug: %s", summary) logger.debug("*** Ami data: %s", ami_data) bug = "not created" ami_result = RESULT_PASSED data = aggregate.nested(ami_data, "cloudhwname") for hwp in data: sub_result, sub_log = get_hwp_result(data[hwp], whitelist, verbose) if sub_result not in [RESULT_PASSED, RESULT_SKIP] and ami_result == RESULT_PASSED: ami_result = sub_result logger.debug(".....adding comment: # %s: %s\n%s" % (hwp, sub_result, "\n".join(sub_log))) logger.debug(".....setting status: %s", ami_result == RESULT_PASSED and "VERIFIED" or "ON_QA") return bug, ami, ami_result
def process_ami_record( ami_key, ami_data, whitelist=[], user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, bugzilla_product=DEFAULT_PRODUCT, verbose=False, ): """process one ami record creating bug with comment per hwp""" connection = connect(url, user, password) summary = " ".join(ami_key) region, platform, product, version, arch, itype, ami = ami_key bug = create_bug(connection, summary, version, arch, component, bugzilla_product) create_bug_log_attachment(connection, bug, ami, ami_data) ami_result = RESULT_PASSED data = aggregate.nested(ami_data, "cloudhwname") for hwp in data: sub_result, sub_log = get_hwp_result(data[hwp], whitelist, verbose) if sub_result not in [RESULT_PASSED, RESULT_SKIP] and ami_result == RESULT_PASSED: ami_result = sub_result bug.addcomment("# %s: %s\n%s" % (hwp, sub_result, "\n".join(sub_log))) bug.setstatus(ami_result == RESULT_PASSED and "VERIFIED" or "ON_QA") return bug.bug_id, ami, ami_result
def get_overall_result(data, verbose=False): """ Get human-readable representation of the result; partitioned by ami returns a tuple of an overal result and list of tuples overal_result, [(ami_resutl, ami_log), ...] """ agg_data = aggregate.nested(data, 'region', 'arch', 'itype', 'ami', 'cloudhwname') ret = RESULT_PASSED log = [] for region in agg_data: for arch in agg_data[region]: for itype in agg_data[region][arch]: for ami in agg_data[region][arch][itype]: sub_result, sub_log = get_ami_result(agg_data[region][arch][itype][ami], verbose) if sub_result != RESULT_PASSED and ret == RESULT_PASSED: ret = sub_result ami_header = '%s %s %s %s: %s' % (region, arch, itype, ami, ret) sub_log.insert(0, '-' * len(ami_header)) sub_log.insert(0, ami_header) log.append((sub_result, sub_log)) return ret, log
def main(config, istream, ostream, user=None, password=None, url=DEFAULT_URL, component=DEFAULT_COMPONENT, product=DEFAULT_PRODUCT, verbose=False, pool_size=128): user, password = bugzilla_credentials(config) logger.debug('got credentials: %s, %s', user, password) statuses = [] data = load_yaml(istream) agg_data = aggregate.nested(data, 'region', 'version', 'arch', 'itype', 'ami', 'cloudhwname') for region in agg_data: logger.debug(region) for version in agg_data[region]: logger.debug(version) for arch in agg_data[region][version]: logger.debug(arch) for itype in agg_data[region][version][arch]: logger.debug(itype) for ami in agg_data[region][version][arch][itype]: logger.debug(ami) statuses.append( (ami, version, arch, region, itype, user, password, agg_data[region][version][arch][itype][ami], url, component, product)) pool = Pool(size=pool_size) statuses = pool.map(lambda args: process_ami_record(*args), statuses) for bug, ami, status in statuses: save_result(ostream, dict(bug=bug, id=ami, status=status)) return all([status == RESULT_PASSED for _, status, _ in statuses]) and 0 or 1