def submit_job(self, batch_service_client: batch.BatchExtensionsClient,
                   template: str, parameters: str):
        """
        Submits a Job against the batch service.

        :param batch_service_client: The batch client to use.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The json description of the job
        :type template: str
        :param parameters: The job parameters of the job
        :type parameters: str
        """
        try:
            job_json = batch_service_client.job.expand_template(
                template, parameters)
            job_parameters = batch_service_client.job.jobparameter_from_json(
                job_json)
            batch_service_client.job.add(job_parameters)
        except batchmodels.batch_error.BatchErrorException as err:
            logger.info("Failed to submit job\n{}\n with params\n{}".format(
                template, parameters))
            traceback.print_exc()
            utils.print_batch_exception(err)
            raise
        except batch.errors.MissingParameterValue as mpv:
            logger.error(
                "Job {}, failed to submit, because of the error: {}".format(
                    self.raw_job_id, mpv))
            raise
        except:
            logger.error(
                "Job {}, failed to submit, because of the error: {}".format(
                    self.raw_job_id,
                    sys.exc_info()[0]))
            raise
    def delete_pool(self, batch_service_client: batch.BatchExtensionsClient):
        """
        Deletes the pool the if the pool, if the pool has already been deleted or marked for deletion it
        should ignore the batch exception that is thrown. These errors come up due to multiple jobs using the same pool
        and when a the job cleans up after it's self it will call delete on the same pool since they are a shared resource.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        """
        logger.info("Deleting pool: {}.".format(self.pool_id))
        try:
            batch_service_client.pool.delete(self.pool_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(
                    batch_exception,
                    "The specified pool has been marked for deletion"):
                logger.warning(
                    "The specified pool [{}] has been marked for deletion.".
                    format(self.pool_id))
            elif utils.expected_exception(batch_exception,
                                          "The specified pool does not exist"):
                logger.warning(
                    "The specified pool [{}] has been deleted.".format(
                        self.pool_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)
    def submit_job(self, batch_service_client: batch.BatchExtensionsClient, template: str, parameters: str):
        """
        Submits a Job against the batch service.

        :param batch_service_client: The batch client to use.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The json description of the job
        :type template: str
        :param parameters: The job parameters of the job
        :type parameters: str
        """
        try:
            job_json = batch_service_client.job.expand_template(
                template, parameters)
            job_parameters = batch_service_client.job.jobparameter_from_json(
                job_json)
            batch_service_client.job.add(job_parameters)
        except batchmodels.batch_error.BatchErrorException as err:
            logger.error(
                "Failed to submit job\n{}\n with params\n{}".format(
                    template, parameters))
            traceback.print_exc()
            utils.print_batch_exception("Error: {}".format(err))
            raise
        except batch.errors.MissingParameterValue as mpv:
            logger.error("Job {}, failed to submit, because of the error: {}".format(self.raw_job_id, mpv))
            raise
        except:
            logger.error("Job {}, failed to submit, because of the error: {}".format(self.raw_job_id, sys.exc_info()[0]))
            raise
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient,
                    template: str):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        """
        parameters = ctm.load_file(self.parameters_file)

        #updates any placeholder parameter values with the values from keyVault, if required
        utils.update_params_with_values_from_keyvault(
            parameters, self.keyvault_client_with_url)
        pool_json = batch_service_client.pool.expand_template(
            template, parameters)
        ctm.set_template_pool_id(template, self.pool_id)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(pool))
        try:
            batch_service_client.pool.add(pool)
        except batchmodels.BatchErrorException as err:
            if utils.expected_exception(err,
                                        "The specified pool already exists"):
                logger.warning("Pool [{}] is already being created.".format(
                    self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
    def delete_pool(self, batch_service_client: batch.BatchExtensionsClient):
        """
        Deletes the pool the if the pool, if the pool has already been deleted or marked for deletion it
        should ignore the batch exception that is thrown. These errors come up due to multiple jobs using the same pool
        and when a the job cleans up after it's self it will call delete on the same pool since they are a shared resource.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        """
        logger.info("Deleting pool: {}.".format(self.pool_id))
        try:
            batch_service_client.pool.delete(self.pool_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(
                    batch_exception, "The specified pool has been marked for deletion"):
                logger.warning(
                    "The specified pool [{}] has been marked for deletion.".format(
                        self.pool_id))
            elif utils.expected_exception(batch_exception, "The specified pool does not exist"):
                logger.warning(
                    "The specified pool [{}] has been deleted.".format(
                        self.pool_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient, template: str):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        """
        parameters = ctm.load_file(self.parameters_file)
        pool_json = batch_service_client.pool.expand_template(template, parameters)
        ctm.set_template_pool_id(template, self.pool_id)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(self.pool_id))
        try:
            batch_service_client.pool.add(pool)
        except batchmodels.batch_error.BatchErrorException as err:
            if utils.expected_exception(
                    err, "The specified pool already exists"):
                logger.warning(
                    "Pool [{}] is already being created.".format(
                        self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
def main():
    args = runner_arguments()
    logger.account_info(args)
    start_time = datetime.datetime.now().replace(microsecond=0)
    logger.info('Template runner start time: [{}]'.format(start_time))

    # Create the blob client, for use in obtaining references to
    # blob storage containers and uploading files to containers.
    blob_client = azureblob.BlockBlobService(
        account_name=args.StorageAccountName,
        account_key=args.StorageAccountKey)

    # Create a batch account using AAD    
    batch_client = create_batch_client(args)

    # Clean up any storage container that is older than a 7 days old.
    utils.cleanup_old_resources(blob_client)

    try:
        images_refs = []  # type: List[utils.ImageReference]
        with open(args.TestConfig) as f:
            template = json.load(f)

            for jobSetting in template["tests"]:
                application_licenses = None
                if 'applicationLicense' in jobSetting:
                    application_licenses = jobSetting["applicationLicense"]

                _job_managers.append(job_manager.JobManager(
                    jobSetting["template"],
                    jobSetting["poolTemplate"],
                    jobSetting["parameters"],
                    jobSetting["expectedOutput"],
                    application_licenses))

            for image in template["images"]:
                images_refs.append(utils.ImageReference(image["osType"], image["offer"], image["version"]))

        run_job_manager_tests(blob_client, batch_client, images_refs, args.VMImageURL, args.VMImageType)

    except batchmodels.batch_error.BatchErrorException as err:
        traceback.print_exc()
        utils.print_batch_exception(err)
        raise
    finally:
        # Delete all the jobs and containers needed for the job
        # Reties any jobs that failed
        utils.execute_parallel_jobmanagers("retry", _job_managers, batch_client, blob_client, _timeout / 2)
        utils.execute_parallel_jobmanagers("delete_resources", _job_managers, batch_client, blob_client)
        utils.execute_parallel_jobmanagers("delete_pool", _job_managers, batch_client)
        end_time = datetime.datetime.now().replace(microsecond=0)
        logger.print_result(_job_managers)
        logger.export_result(_job_managers, (end_time - start_time))
    logger.info('Sample end: {}'.format(end_time))
    logger.info('Elapsed time: {}'.format(end_time - start_time))
    def delete_resources(self,
                         batch_service_client: batch.BatchExtensionsClient,
                         blob_client: azureblob.BlockBlobService,
                         force_delete: bool = None):
        """
        Deletes the job, pool and the containers used for the job. If the job fails the output container will not be deleted.
        The non deleted container is used for debugging.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param blob_client: A blob service client.
        :type blob_client: `azure.storage.blob.BlockBlobService`
        :param force_delete: Forces the deletion of all the containers related this job.
        :type force_delete: bool
        """
        # delete the job
        try:
            batch_service_client.job.delete(self.job_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(batch_exception,
                                        "The specified job does not exist"):
                logger.error("The specified Job [{}] was not created.".format(
                    self.job_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)

        if self.status.job_state in {
                utils.JobState.COMPLETE, utils.JobState.POOL_FAILED,
                utils.JobState.NOT_STARTED
        } or force_delete:
            logger.info('Deleting container [{}]...'.format(
                self.storage_info.input_container))
            blob_client.delete_container(self.storage_info.input_container)

            logger.info('Deleting container [{}]...'.format(
                self.storage_info.output_container))
            blob_client.delete_container(self.storage_info.output_container)
        else:
            logger.info("Did not delete the output container")
            logger.info(
                "Job: {}. did not complete successfully, Container {} was not deleted."
                .format(self.job_id, self.storage_info.output_container))
Пример #9
0
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient, template: str, UseLowPriorityVMs: bool):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        :param UseLowPriorityVMs: True: Use low priority nodes, False: will use the type in the template file
        :type UseLowPriorityVMs: bool
        """
        parameters = ctm.load_file(self.parameters_file)
        if UseLowPriorityVMs==True:
            #swap the dedicated vm count to low pri and zero out the dedicated count (to reduce testing COGS)
            ctm.set_low_priority_vm_count(template, str(self.min_required_vms))

            #some tests set the dedicated vm count in the test parameters file, and some use the default in the template, so override both to 0
            ctm.set_dedicated_vm_count(parameters, 0)
            ctm.set_dedicated_vm_count(template, 0)

        # updates any placeholder parameter values with the values from
        # keyVault, if required
        utils.update_params_with_values_from_keyvault(
            parameters, self.keyvault_client_with_url)
        pool_json = batch_service_client.pool.expand_template(
            template, parameters)
        ctm.set_template_pool_id(template, self.pool_id)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(pool))
        try:
            utils.run_with_jitter_retry(batch_service_client.pool.add, pool)
        except batchmodels.BatchErrorException as err:
            if utils.expected_exception(
                    err, "The specified pool already exists"):
                logger.warning(
                    "Pool [{}] is already being created.".format(
                        self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
    def delete_resources(self, batch_service_client: batch.BatchExtensionsClient,
                         blob_client: azureblob.BlockBlobService, force_delete: bool = None):
        """
        Deletes the job, pool and the containers used for the job. If the job fails the output container will not be deleted.
        The non deleted container is used for debugging.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param blob_client: A blob service client.
        :type blob_client: `azure.storage.blob.BlockBlobService`
        :param force_delete: Forces the deletion of all the containers related this job.
        :type force_delete: bool
        """
        # delete the job
        try:
            batch_service_client.job.delete(self.job_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(
                    batch_exception, "The specified job does not exist"):
                logger.error(
                    "The specified Job [{}] was not created.".format(
                        self.job_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)

        if self.status.job_state in {
            utils.JobState.COMPLETE, utils.JobState.POOL_FAILED, utils.JobState.NOT_STARTED} or force_delete:
            logger.info('Deleting container [{}]...'.format(
                self.storage_info.input_container))
            blob_client.delete_container(self.storage_info.input_container)

            logger.info('Deleting container [{}]...'.format(
                self.storage_info.output_container))
            blob_client.delete_container(self.storage_info.output_container)
        else:
            logger.info("Did not delete the output container")
            logger.info(
                "Job: {}. did not complete successfully, Container {} was not deleted.".format(
                    self.job_id, self.storage_info.output_container))
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient,
                    template: str):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        """
        pool_json = batch_service_client.pool.expand_template(template)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(self.pool_id))
        try:
            batch_service_client.pool.add(pool)
        except batchmodels.batch_error.BatchErrorException as err:
            if utils.expected_exception(err,
                                        "The specified pool already exists"):
                logger.warning("Pool [{}] is already being created.".format(
                    self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
Пример #12
0
def main():
    args = runner_arguments()
    logger.account_info(args)
    start_time = datetime.datetime.now().replace(microsecond=0)
    logger.info('Template runner start time: [{}]'.format(start_time))

    # Create the blob client, for use in obtaining references to
    # blob storage containers and uploading files to containers.
    blob_client = azureblob.BlockBlobService(
        account_name=args.StorageAccountName,
        account_key=args.StorageAccountKey)

    # Create a batch account using AAD
    batch_client = create_batch_client(args)

    # Clean up any storage container that is older than a 7 days old.
    utils.cleanup_old_resources(blob_client)

    try:
        images_refs = []  # type: List[utils.ImageReference]
        with open(args.TestConfig) as f:
            template = json.load(f)

            for jobSetting in template["tests"]:
                application_licenses = None
                if 'applicationLicense' in jobSetting:
                    application_licenses = jobSetting["applicationLicense"]

                _job_managers.append(
                    job_manager.JobManager(jobSetting["template"],
                                           jobSetting["poolTemplate"],
                                           jobSetting["parameters"],
                                           jobSetting["expectedOutput"],
                                           application_licenses))

            for image in template["images"]:
                images_refs.append(
                    utils.ImageReference(image["osType"], image["offer"],
                                         image["version"]))

        run_job_manager_tests(blob_client, batch_client, images_refs,
                              args.VMImageURL, args.VMImageType)

    except batchmodels.batch_error.BatchErrorException as err:
        traceback.print_exc()
        utils.print_batch_exception(err)
        raise
    finally:
        # Delete all the jobs and containers needed for the job
        # Reties any jobs that failed
        utils.execute_parallel_jobmanagers("retry", _job_managers,
                                           batch_client, blob_client,
                                           _timeout / 2)
        utils.execute_parallel_jobmanagers("delete_resources", _job_managers,
                                           batch_client, blob_client)
        utils.execute_parallel_jobmanagers("delete_pool", _job_managers,
                                           batch_client)
        end_time = datetime.datetime.now().replace(microsecond=0)
        logger.print_result(_job_managers)
        logger.export_result(_job_managers, (end_time - start_time))
    logger.info('Sample end: {}'.format(end_time))
    logger.info('Elapsed time: {}'.format(end_time - start_time))
Пример #13
0
def main():
    args = runner_arguments()
    logger.account_info(args)
    start_time = datetime.datetime.now().replace(microsecond=0)
    logger.info('Template runner start time: [{}]'.format(start_time))

    # Create the blob client, for use in obtaining references to
    # blob storage containers and uploading files to containers.
    blob_client = azureblob.BlockBlobService(
        account_name=args.StorageAccountName,
        account_key=args.StorageAccountKey)

    # Create a batch account using AAD
    batch_client = create_batch_client(args)

    # Create a keyvault client using AAD
    keyvault_client_with_url = create_keyvault_client(args)

    # Clean up any storage container that is older than a 7 days old.
    utils.cleanup_old_resources(blob_client)

    repository_branch_name = args.RepositoryBranchName
    if repository_branch_name == "current":
        repository_branch_name = Repository('../').head.shorthand

    logger.info('Pulling resource files from the branch: {}'.format(
        repository_branch_name))

    try:
        images_refs = []  # type: List[utils.ImageReference]
        with open(args.TestConfig) as f:
            try:
                template = json.load(f)
            except ValueError as e:
                logger.err(
                    "Failed to read test config file due to the following error",
                    e)
                raise e

            for jobSetting in template["tests"]:
                application_licenses = None
                if 'applicationLicense' in jobSetting:
                    application_licenses = jobSetting["applicationLicense"]

                _job_managers.append(
                    job_manager.JobManager(
                        jobSetting["template"], jobSetting["poolTemplate"],
                        jobSetting["parameters"], keyvault_client_with_url,
                        jobSetting["expectedOutput"], application_licenses,
                        repository_branch_name))

            for image in template["images"]:
                images_refs.append(
                    utils.ImageReference(image["osType"], image["offer"],
                                         image["version"]))

        run_job_manager_tests(blob_client, batch_client, images_refs,
                              args.VMImageURL)

    except batchmodels.BatchErrorException as err:
        utils.print_batch_exception(err)
        raise
    finally:
        # Delete all the jobs and containers needed for the job
        # Reties any jobs that failed

        if args.CleanUpResources:
            utils.execute_parallel_jobmanagers("retry", _job_managers,
                                               batch_client, blob_client,
                                               _timeout / 2)
            utils.execute_parallel_jobmanagers("delete_resources",
                                               _job_managers, batch_client,
                                               blob_client)
            utils.execute_parallel_jobmanagers("delete_pool", _job_managers,
                                               batch_client)
        end_time = datetime.datetime.now().replace(microsecond=0)
        logger.print_result(_job_managers)
        logger.export_result(_job_managers, (end_time - start_time))
    logger.info('Sample end: {}'.format(end_time))
    logger.info('Elapsed time: {}'.format(end_time - start_time))
Пример #14
0
def main():
    args = runner_arguments()
    logger.account_info(args)
    start_time = datetime.now(timezone.utc).replace(microsecond=0)
    logger.info('Template runner start time: [{}]'.format(start_time))

    # generate unique id for this run to prevent collisions
    run_unique_id = str(uuid.uuid4())[0:7]

    # Create the blob client, for use in obtaining references to
    # blob storage containers and uploading files to containers.
    blob_client = azureblob.BlockBlobService(
        account_name=args.StorageAccountName,
        account_key=args.StorageAccountKey)

    # Create a batch client using AAD
    batch_client = create_batch_client(args)

    # Create a keyvault client using AAD
    keyvault_client_with_url = create_keyvault_client(args)

    repository_branch_name = args.RepositoryBranchName
    if repository_branch_name == "current":
        repository_branch_name = Repository('../').head.shorthand

    logger.info('Pulling resource files from commit: {}'.format(
        repository_branch_name))

    # Clean up any storage container, pool or jobs older than some threshold.
    utils.cleanup_old_resources(blob_client, batch_client)

    try:
        images_refs = []  # type: List[utils.ImageReference]
        with open(args.TestConfig) as f:
            try:
                template = json.load(f)
            except ValueError as e:
                logger.error(
                    "Failed to read test config file due to the following error: {}"
                    .format(e))
                raise e

            for jobSetting in template["tests"]:
                application_licenses = None
                if 'applicationLicense' in jobSetting:
                    application_licenses = jobSetting["applicationLicense"]

                _test_managers.append(
                    test_manager.TestManager(
                        jobSetting["template"], jobSetting["poolTemplate"],
                        jobSetting["parameters"], keyvault_client_with_url,
                        jobSetting["expectedOutput"], application_licenses,
                        repository_branch_name, run_unique_id))

            for image in template["images"]:
                images_refs.append(
                    utils.ImageReference(image["osType"], image["offer"],
                                         image["version"]))

        run_test_manager_tests(blob_client, batch_client, images_refs,
                               args.VMImageURL, args.VMImageOS,
                               args.UseLowPriorityVMs)

    except batchmodels.BatchErrorException as err:
        utils.print_batch_exception(err)
        raise
    finally:
        #try run delete on pools / jobs again single threaded in case the thread-per-test cleanup failed
        #(which it still does sometimes)
        for test in _test_managers:
            test.delete_resources(batch_client, blob_client, False)

        end_time = datetime.now(timezone.utc).replace(microsecond=0)
        logger.print_result(_test_managers)
        logger.export_result(_test_managers, (end_time - start_time))
    logger.info('Sample end: {}'.format(end_time))
    logger.info('Elapsed time: {}'.format(end_time - start_time))