Ejemplo n.º 1
0
def main():
    collection_uri, team_project, test_run_id, access_token = process_args()

    print "Got args", collection_uri, team_project, test_run_id, access_token

    worker_count = 10
    q = Queue()

    print "Main thread starting workers"

    for i in range(worker_count):
        worker = UploadWorker(q, i, collection_uri, team_project, test_run_id, access_token)
        worker.daemon = True
        worker.start()

    print "Beginning reading of test results."

    all_results = read_results(os.getcwd())
    batch_size = 1000
    batches = batch(all_results, batch_size)

    print "Uploading results in batches of size {}".format(batch_size)

    for b in batches:
        q.put(b)

    print "Main thread finished queueing batches"

    q.join()

    print "Main thread exiting"
Ejemplo n.º 2
0
def main():
    global workerFailed, workerFailedLock

    try:
        logging.basicConfig(
            format='%(asctime)s: %(levelname)s: %(thread)d: %(module)s(%(lineno)d): %(funcName)s: %(message)s',
            level=logging.INFO,
            handlers=[
                logging.StreamHandler()
            ]
        )
        log = logging.getLogger(__name__)

        collection_uri, team_project, test_run_id, access_token = process_args()

        worker_count = 10
        q = Queue()

        log.info("Main thread starting {0} workers".format(worker_count))

        for i in range(worker_count):
            worker = UploadWorker(q, i, collection_uri, team_project, test_run_id, access_token)
            worker.daemon = True
            worker.start()

        log.info("Beginning to read test results...")

        # In case the user puts the results in HELIX_WORKITEM_UPLOAD_ROOT for upload, check there too.
        all_results = read_results([os.getcwd(),
                                    get_env("HELIX_WORKITEM_UPLOAD_ROOT")])

        batch_size = 1000
        batches = batch(all_results, batch_size)

        log.info("Uploading results in batches of size {}".format(batch_size))

        for b in batches:
            q.put(b)

        log.info("Main thread finished queueing batches")

        q.join()

        log.info("Main thread exiting")

        with workerFailedLock:
            if workerFailed:
                if check_passed_to_workaround_ado_api_failure([os.getcwd(), get_env("HELIX_WORKITEM_UPLOAD_ROOT")]):
                    sys.exit(0)
                else:
                    sys.exit(1337)
    except Exception as anything:
        log.warning("Unhandled exception trying to report to ADO: {}".format(str(anything)))
        log.warning("We'll attempt to count the XUnit results and if XML is present and no failures, return 0")
        if check_passed_to_workaround_ado_api_failure([os.getcwd(), get_env("HELIX_WORKITEM_UPLOAD_ROOT")]):
            sys.exit(0)
        else:
            sys.exit(1138)
Ejemplo n.º 3
0
def main():
    global workerFailed, workerFailedLock
    logging.basicConfig(
        format='%(asctime)s: %(levelname)s: %(thread)d: %(module)s(%(lineno)d): %(funcName)s: %(message)s',
        level=logging.INFO,
        handlers=[
            logging.StreamHandler()
        ]
    )
    log = logging.getLogger(__name__)

    collection_uri, team_project, test_run_id, access_token = process_args()

    worker_count = 10
    q = Queue()

    log.info("Main thread starting {0} workers".format(worker_count))

    for i in range(worker_count):
        worker = UploadWorker(q, i, collection_uri, team_project, test_run_id, access_token)
        worker.daemon = True 
        worker.start()

    log.info("Beginning reading of test results.")

    # In case the user puts the results in HELIX_WORKITEM_UPLOAD_ROOT for upload, check there too.
    all_results = read_results([os.getcwd(),
                                get_env("HELIX_WORKITEM_UPLOAD_ROOT")])

    batch_size = 1000
    batches = batch(all_results, batch_size)

    log.info("Uploading results in batches of size {}".format(batch_size))

    for b in batches:
        q.put(b)

    log.info("Main thread finished queueing batches")

    q.join()

    log.info("Main thread exiting")
    
    with workerFailedLock:
        if workerFailed:
            sys.exit(1337)
Ejemplo n.º 4
0
def main():
    logging.basicConfig(
        format=
        '%(asctime)s: %(levelname)s: %(thread)d: %(module)s(%(lineno)d): %(funcName)s: %(message)s',
        level=logging.INFO,
        handlers=[logging.StreamHandler()])
    log = logging.getLogger(__name__)

    collection_uri, team_project, test_run_id, access_token = process_args()

    log.info("Beginning reading of test results.")

    # In case the user puts the results in HELIX_WORKITEM_UPLOAD_ROOT for upload, check there too.
    all_results = read_results([
        os.getcwd(),
        get_env("HELIX_WORKITEM_UPLOAD_ROOT"),
    ])

    reporter = DefaultTestReporter(
        AzureDevOpsReportingParameters(collection_uri, team_project,
                                       test_run_id, access_token))

    reporter.report_results(all_results)
Ejemplo n.º 5
0
def main():
    logging.basicConfig(
        format=
        '%(asctime)s: %(levelname)s: %(thread)d: %(module)s(%(lineno)d): %(funcName)s: %(message)s',
        level=logging.INFO,
        handlers=[logging.StreamHandler()])
    log = logging.getLogger(__name__)

    collection_uri, team_project, test_run_id, access_token = process_args()

    worker_count = 10
    q = Queue()

    log.info("Main thread starting {0} workers".format(worker_count))

    for i in range(worker_count):
        worker = UploadWorker(q, i, collection_uri, team_project, test_run_id,
                              access_token)
        worker.daemon = True
        worker.start()

    log.info("Beginning reading of test results.")

    all_results = read_results(os.getcwd())
    batch_size = 1000
    batches = batch(all_results, batch_size)

    log.info("Uploading results in batches of size {}".format(batch_size))

    for b in batches:
        q.put(b)

    log.info("Main thread finished queueing batches")

    q.join()

    log.info("Main thread exiting")