Example #1
0
def prepare_linux_for_perf():
    settings = settings_from_env()
    correlation_dir = fix_path(settings.correlation_payload_dir)
    dotnet_cli_dir = os.path.join(correlation_dir, "perf.dotnetcli")
    dotnet_cli = os.path.join(dotnet_cli_dir, "dotnet")
    # if local dotnet cli is already installed, skip
    if not os.path.exists(dotnet_cli):
        # install dotnet cli locally
        log.info(
            'Local dotnet cli install not found, launching the installation script'
        )
        dotnet_installer = os.path.join(correlation_dir, "RunnerScripts",
                                        "xunitrunner-perf",
                                        "ubuntu-dotnet-local-install.sh")
        try:
            log.info('Setting dotnet cli installation script at ' +
                     dotnet_installer + ' as executable')
            helix.proc.run_and_log_output(
                ("chmod 777 " + dotnet_installer).split(" "))
            log.info('Running script ' + dotnet_installer)
            helix.proc.run_and_log_output(
                (dotnet_installer + " -d " +
                 dotnet_cli_dir + " -v " + os.path.join(
                     correlation_dir, "RunnerScripts", "xunitrunner-perf",
                     "DotNetCliVersion.txt")).split(" "))
        except:
            log.error("Exception when running the installation scripts: " +
                      traceback.format_exc())
    else:
        log.info('Local dotnet cli install found')
def post_process_perf_results():
    settings = settings_from_env()
    perf_settings_json = ""
    perf_settings_json_file = os.path.join(
        fix_path(settings.correlation_payload_dir), "RunnerScripts", "xunitrunner-perf", "xunitrunner-perf.json"
    )
    with open(perf_settings_json_file) as perf_settings_json:
        # read the perf-specific settings
        perf_settings_json = json.loads(perf_settings_json.read())

    json_file = os.path.join(
        settings.workitem_working_dir, perf_settings_json["TestProduct"] + "-" + settings.workitem_id + ".json"
    )
    json_file = json_file.encode("ascii", "ignore")
    csv_file = os.path.join(settings.workitem_working_dir, "execution", "testResults.csv")
    json_cmd = (
        sys.executable
        + " "
        + os.path.join(
            fix_path(settings.correlation_payload_dir), "RunnerScripts", "xunitrunner-perf", "csvjsonconvertor.py"
        )
        + " --jobName "
        + settings.correlation_id
        + " --csvFile "
        + csv_file
        + " --jsonFile "
        + json_file
        + " --perfSettingsJson "
        + perf_settings_json_file
    )
    try:
        return_code = helix.proc.run_and_log_output(
            json_cmd.split(" "), cwd=os.path.join(fix_path(settings.workitem_working_dir), "execution"), env=None
        )
    except:
        log.error("Exception when running the installation scripts: " + traceback.format_exc())

    log.info("Uploading {}".format(csv_file))
    result_url = _upload_file(csv_file, settings)
    log.info("Location {}".format(result_url))

    # Upload json with rest of the results
    log.info("Uploaded {} to results container".format(json_file))
    result_url = _upload_file(json_file, settings)
    log.info("Location {}".format(result_url))

    # create deep copy and set perf container keys

    perf_settings = copy.deepcopy(settings)
    perf_settings.output_uri = perf_settings_json["RootURI"]
    perf_settings.output_write_token = perf_settings_json["WriteToken"]
    perf_settings.output_read_token = perf_settings_json["ReadToken"]
    log.info("Uploaded {} to perf container".format(json_file))
    # Upload json to the perf specific container
    result_url = _upload_file(json_file, perf_settings)
    log.info("Location {}".format(result_url))
def post_process_perf_results(opts):
    csvs_dir = opts['--csvDir']
    if not os.path.exists(csvs_dir):
        log.error('Not able to find csvs directory {}'.format(csvs_dir))
        return -1

    settings = settings_from_env()
    perf_settings_json = ''
    perf_settings_json_file = os.path.join(
        fix_path(settings.correlation_payload_dir), 'RunnerScripts',
        'xunitrunner-perf', 'xunitrunner-perf.json')
    with open(perf_settings_json_file) as perf_settings_json:
        # read the perf-specific settings
        perf_settings_json = json.loads(perf_settings_json.read())

    json_file = os.path.join(
        settings.workitem_working_dir, perf_settings_json['TestProduct'] +
        '-' + settings.workitem_id + '.json')
    json_file = json_file.encode('ascii', 'ignore')

    json_cmd = sys.executable+' '+os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'csvjsonconvertor.py')+\
               ' --jobName '+settings.correlation_id+' --csvDir '+csvs_dir+' --jsonFile '+json_file+' --perfSettingsJson '+perf_settings_json_file
    try:
        return_code = helix.proc.run_and_log_output(
            json_cmd.split(' '),
            cwd=os.path.join(fix_path(settings.workitem_working_dir),
                             'execution'),
            env=None)
    except:
        log.error("Exception when running the installation scripts: " +
                  traceback.format_exc())

    for item in os.listdir(csvs_dir):
        if item.endswith('.csv'):
            csv_file = os.path.join(csvs_dir, item)
            log.info('Uploading {}'.format(csv_file))
            _upload_file(csv_file, settings)
            result_url = _upload_file(csv_file, settings)
            log.info('Location {}'.format(result_url))

    # Upload json with rest of the results
    log.info('Uploaded {} to results container'.format(json_file))
    result_url = _upload_file(json_file, settings)
    log.info('Location {}'.format(result_url))

    # create deep copy and set perf container keys

    perf_settings = copy.deepcopy(settings)
    perf_settings.output_uri = perf_settings_json['RootURI']
    perf_settings.output_write_token = perf_settings_json['WriteToken']
    perf_settings.output_read_token = perf_settings_json['ReadToken']
    log.info('Uploaded {} to perf container'.format(json_file))
    # Upload json to the perf specific container
    result_url = _upload_file(json_file, perf_settings)
    log.info('Location {}'.format(result_url))
def post_process_perf_results(opts):
    csvs_dir = opts['--csvDir']
    if not os.path.exists(csvs_dir):
        log.error('Not able to find csvs directory {}'.format(csvs_dir))
        return -1

    settings = settings_from_env()
    perf_settings_json = ''
    perf_settings_json_file = os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'xunitrunner-perf.json')
    with open(perf_settings_json_file) as perf_settings_json:
        # read the perf-specific settings
        perf_settings_json = json.loads(perf_settings_json.read())

    json_file = os.path.join(settings.workitem_working_dir, perf_settings_json['TestProduct']+'-'+settings.workitem_id+'.json')
    json_file = json_file.encode('ascii', 'ignore')

    json_cmd = sys.executable+' '+os.path.join(fix_path(settings.correlation_payload_dir), 'RunnerScripts', 'xunitrunner-perf', 'csvjsonconvertor.py')+\
               ' --jobName '+settings.correlation_id+' --csvDir '+csvs_dir+' --jsonFile '+json_file+' --perfSettingsJson '+perf_settings_json_file
    try:
        return_code = helix.proc.run_and_log_output(
                json_cmd.split(' '),
                cwd=os.path.join(fix_path(settings.workitem_working_dir), 'execution'),
                env=None
            )
    except:
            log.error("Exception when running the installation scripts: " + traceback.format_exc())

    for item in os.listdir(csvs_dir):
        if item.endswith('.csv'):
            csv_file = os.path.join(csvs_dir, item)
            log.info('Uploading {}'.format(csv_file))
            _upload_file(csv_file, settings)
            result_url = _upload_file(csv_file, settings)
            log.info('Location {}'.format(result_url))

    # Upload json with rest of the results
    log.info('Uploaded {} to results container'.format(json_file))
    result_url = _upload_file(json_file, settings)
    log.info('Location {}'.format(result_url))

    # create deep copy and set perf container keys

    perf_settings = copy.deepcopy(settings)
    perf_settings.output_uri = perf_settings_json['RootURI']
    perf_settings.output_write_token = perf_settings_json['WriteToken']
    perf_settings.output_read_token = perf_settings_json['ReadToken']
    log.info('Uploaded {} to perf container'.format(json_file))
    # Upload json to the perf specific container
    result_url = _upload_file(json_file, perf_settings)
    log.info('Location {}'.format(result_url))
Example #5
0
def prepare_linux_for_perf():
    settings = settings_from_env()
    correlation_dir = fix_path(settings.correlation_payload_dir)
    dotnet_cli_dir = os.path.join(correlation_dir, "perf.dotnetcli")
    dotnet_cli = os.path.join(dotnet_cli_dir, "dotnet")
    # if local dotnet cli is already installed, skip
    if not os.path.exists(dotnet_cli):
        # install dotnet cli locally
        log.info('Local dotnet cli install not found, launching the installation script')
        dotnet_installer = os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "ubuntu-dotnet-local-install.sh")
        try:
            log.info('Setting dotnet cli installation script at '+dotnet_installer+' as executable')
            helix.proc.run_and_log_output(("chmod 777 "+dotnet_installer).split(" "))
            log.info('Running script '+dotnet_installer)
            helix.proc.run_and_log_output((dotnet_installer+" -d "+dotnet_cli_dir+" -v "+os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "DotNetCliVersion.txt")).split(" "))
        except:
            log.error("Exception when running the installation scripts: " + traceback.format_exc())
    else:
        log.info('Local dotnet cli install found')
Example #6
0
    def _main(settings, optlist, args):
        """
        Usage::
            continuationrunner
                [--config config.json]
                [--setting name=value]
                --script
                [--args arg1 arg2...]
        """
        optdict = dict(optlist)
        log.info("BuildTools Helix Continuation Runner starting")

        if '--args' in optdict:
            script_arguments = optdict['--args']
            log.info("Script Arguments: " + script_arguments)

        if '--script' in optdict:
            script_to_execute = optdict['--script']
        else:
            log.error("Value for parameter '--script' is required")
            return -1

        if '--next_queue' in optdict:
            next_queue = optdict['--next_queue']
        else:
            log.error("Need a secondary queue id to continue execution.")
            return -1
        if '--next_payload_dir' in optdict:
            next_payload_dir = optdict['--next_payload_dir']
        else:
            log.error("Need a secondary payload to continue execution.")
            return -1

        unpack_dir = fix_path(settings.workitem_payload_dir)

        execution_args = [os.path.join(unpack_dir, script_to_execute)] + args

        return_code = helix.proc.run_and_log_output(execution_args,
                                                    cwd=unpack_dir,
                                                    env=None)

        if return_code == 0:
            # currently there's no use for it, but here's where we'd choose to send out XUnit results
            # if desired at some point.
            log.info(
                "First stage of execution succeded.  Sending a new work item to "
                + next_queue)
            log.info("Will include contents of " + next_payload_dir)

            settings = settings_from_env()
            # load Client-specific settings
            config_path = os.path.join(settings.config_root,
                                       "ClientSettings.json")
            settings.__dict__.update(json.load(open(config_path)))
            service_bus_repository = ServiceBusRepository(
                settings.ServiceBusRoot, settings.QueueId,
                settings.LongPollTimeout, settings.SAS,
                settings.servicebus_retry_count,
                settings.servicebus_retry_delay)
            # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality
            # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario)
            call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation"
            if is_windows():
                continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd"
            else:
                continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh"

            # Prep the follow-up work item ...
            new_work_item = HelixWorkItem(
                correlation_id=settings.correlation_id,
                work_item_friendly_name=settings.workitem_friendly_name +
                ".Execution",
                command=continuation_command,
                results_output_uri=settings.output_uri + "/continuation",
                results_output_write_token=settings.output_write_token,
                results_output_read_token=settings.output_read_token)

            # This may eventually cause trouble if zips with identical names are somehow included inside
            # other payload zips. Chained continuation will be OK as there will be a new results
            # directory to upload to for each leg.
            new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip"
            secondary_zip_path = os.path.join(settings.workitem_working_dir,
                                              new_workitem_payload_name)

            zip_directory(secondary_zip_path, next_payload_dir)
            log.info("Zipped into " + secondary_zip_path)

            # Upload the payloads for the job
            upload_client = helix.azure_storage.BlobUploadClient(
                settings.output_uri, settings.output_write_token,
                settings.output_read_token)
            new_payload_uri = upload_client.upload(secondary_zip_path,
                                                   new_workitem_payload_name)
            new_work_item.WorkItemPayloadUris.append(new_payload_uri)

            # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts)
            # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from
            #       storage will involve plumbing that through or re-uploading it (can be huge)
            supplemental_payload_path = os.path.join(
                settings.work_root, settings.correlation_id, "work",
                "SupplementalPayload.zip")

            supplemental_payload_uri = upload_client.upload(
                supplemental_payload_path, "SupplementalPayload.zip")
            log.info("Uploaded " + secondary_zip_path + " to " +
                     new_payload_uri)
            log.info("Uploaded SupplementalPayload.zip to " +
                     supplemental_payload_uri)
            new_work_item.CorrelationPayloadUris.append(
                supplemental_payload_uri)

            if service_bus_repository.post_new_workitem(
                    queue_id=next_queue, work_item=new_work_item):
                log.info("Successfully queued new work item.")
            else:
                log.error("Failure to send to Service bus.")
                return -1

        else:
            log.error(
                "Got non-zero exit code for first stage of execution.  Skipping further processing."
            )

        return return_code
Example #7
0
    def _main(settings, optlist, args):
        """
        Usage::
            continuationrunner
                [--config config.json]
                [--setting name=value]
                --script
                [--args arg1 arg2...]
        """
        optdict = dict(optlist)
        log.info("BuildTools Helix Continuation Runner starting")

        if '--args' in optdict:
            script_arguments = optdict['--args']
            log.info("Script Arguments: " + script_arguments)

        if '--script' in optdict:
            script_to_execute = optdict['--script']
        else:
            log.error("Value for parameter '--script' is required")
            return -1

        if '--next_queue' in optdict:
            next_queue = optdict['--next_queue']
        else:
            log.error("Need a secondary queue id to continue execution.")
            return -1
        if '--next_payload_dir' in optdict:
            next_payload_dir = optdict['--next_payload_dir']
        else:
            log.error("Need a secondary payload to continue execution.")
            return -1

        unpack_dir = fix_path(settings.workitem_payload_dir)

        execution_args = [os.path.join(unpack_dir, script_to_execute)] + args

        return_code = helix.proc.run_and_log_output(
            execution_args,
            cwd=unpack_dir,
            env=None
        )

        if return_code == 0:
            # currently there's no use for it, but here's where we'd choose to send out XUnit results
            # if desired at some point.
            log.info("First stage of execution succeded.  Sending a new work item to " + next_queue)
            log.info("Will include contents of " + next_payload_dir)

            settings = settings_from_env()
            # load Client-specific settings
            config_path = os.path.join(settings.config_root, "ClientSettings.json")
            settings.__dict__.update(json.load(open(config_path)))
            service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot,
                                                          settings.QueueId,
                                                          settings.LongPollTimeout,
                                                          settings.SAS,
                                                          settings.servicebus_retry_count,
                                                          settings.servicebus_retry_delay
                                                          )
            # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality
            # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario)
            call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation"
            if is_windows():
                continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd"
            else:
                continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh"

            # Prep the follow-up work item ...
            new_work_item = HelixWorkItem(
                correlation_id=settings.correlation_id,
                work_item_friendly_name=settings.workitem_friendly_name + ".Execution",
                command=continuation_command,
                results_output_uri=settings.output_uri + "/continuation",
                results_output_write_token=settings.output_write_token,
                results_output_read_token=settings.output_read_token)

            # This may eventually cause trouble if zips with identical names are somehow included inside
            # other payload zips. Chained continuation will be OK as there will be a new results
            # directory to upload to for each leg.
            new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip"
            secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name)

            zip_directory(secondary_zip_path, next_payload_dir)
            log.info("Zipped into " + secondary_zip_path)

            # Upload the payloads for the job
            upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri,
                                                                 settings.output_write_token,
                                                                 settings.output_read_token)
            new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name)
            new_work_item.WorkItemPayloadUris.append(new_payload_uri)

            # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts)
            # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from
            #       storage will involve plumbing that through or re-uploading it (can be huge)
            supplemental_payload_path = os.path.join(settings.work_root,
                                                     settings.correlation_id,
                                                     "work", "SupplementalPayload.zip")

            supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip")
            log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri)
            log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri)
            new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri)

            if service_bus_repository.post_new_workitem(queue_id=next_queue,
                                                        work_item=new_work_item):
                log.info("Successfully queued new work item.")
            else:
                log.error("Failure to send to Service bus.")
                return -1

        else:
            log.error("Got non-zero exit code for first stage of execution.  Skipping further processing.")

        return return_code