def __init__(self, job_info):

        self.send_mail_command = "/opt/cccp-service/mail_service/send_mail.sh"
        self.job_info = job_info

        # the logs directory
        self.logs_dir = os.path.join(LOGS_DIR_PARENT,
                                     self.job_info["test_tag"])

        # linter execution status file
        self.linter_status_file = os.path.join(self.logs_dir,
                                               LINTER_STATUS_FILE)

        # scanners execution status file
        self.scanners_status_file = os.path.join(self.logs_dir,
                                                 SCANNERS_STATUS_FILE)

        # if image has successful build
        if self.job_info.get("build_status"):
            logger.debug("Processing mail for SUCCESS build.")
            self.image_under_test = job_info.get("output_image")

            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is weekly scan job
        elif self.job_info.get("weekly"):
            logger.debug("Processing mail for Weekly scan.")
            self.image_under_test = job_info.get("output_image")
            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is a failed build
        else:
            logger.debug("Processing mail for failed build.")
            self.image_under_test = job_info.get("project_name")
            # projet_name / self.image_under_test and self.project are same
            self.project = job_info.get("project_name")

        # build_logs filename
        self.build_logs = urljoin(LOGS_URL_BASE, self.job_info["test_tag"],
                                  BUILD_LOGS_FILENAME)

        self.openshift = Openshift(logger=logger)
    def __init__(self, job_info):

        self.send_mail_command = "/opt/cccp-service/mail_service/send_mail.sh"
        self.job_info = job_info

        # the logs directory
        self.logs_dir = os.path.join(
            LOGS_DIR_PARENT,
            self.job_info["test_tag"])

        # linter execution status file
        self.linter_status_file = os.path.join(
            self.logs_dir, LINTER_STATUS_FILE)

        # scanners execution status file
        self.scanners_status_file = os.path.join(
            self.logs_dir, SCANNERS_STATUS_FILE)

        # if image has successful build
        if self.job_info.get("build_status"):
            logger.debug("Processing mail for SUCCESS build.")
            self.image_under_test = job_info.get("output_image")

            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is weekly scan job
        elif self.job_info.get("weekly"):
            logger.debug("Processing mail for Weekly scan.")
            self.image_under_test = job_info.get("output_image")
            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is a failed build
        else:
            logger.debug("Processing mail for failed build.")
            self.image_under_test = job_info.get("project_name")
            # projet_name / self.image_under_test and self.project are same
            self.project = job_info.get("project_name")

        # build_logs filename
        self.build_logs = urljoin(
            LOGS_URL_BASE,
            self.job_info["test_tag"],
            BUILD_LOGS_FILENAME
        )

        self.openshift = Openshift(logger=logger)
class NotifyUser(object):
    "Compose and send build status, linter and scanners results"

    def __init__(self, job_info):

        self.send_mail_command = "/opt/cccp-service/mail_service/send_mail.sh"
        self.job_info = job_info

        # the logs directory
        self.logs_dir = os.path.join(
            LOGS_DIR_PARENT,
            self.job_info["test_tag"])

        # linter execution status file
        self.linter_status_file = os.path.join(
            self.logs_dir, LINTER_STATUS_FILE)

        # scanners execution status file
        self.scanners_status_file = os.path.join(
            self.logs_dir, SCANNERS_STATUS_FILE)

        # if image has successful build
        if self.job_info.get("build_status"):
            logger.debug("Processing mail for SUCCESS build.")
            self.image_under_test = job_info.get("output_image")

            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is weekly scan job
        elif self.job_info.get("weekly"):
            logger.debug("Processing mail for Weekly scan.")
            self.image_under_test = job_info.get("output_image")
            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is a failed build
        else:
            logger.debug("Processing mail for failed build.")
            self.image_under_test = job_info.get("project_name")
            # projet_name / self.image_under_test and self.project are same
            self.project = job_info.get("project_name")

        # build_logs filename
        self.build_logs = urljoin(
            LOGS_URL_BASE,
            self.job_info["test_tag"],
            BUILD_LOGS_FILENAME
        )

        self.openshift = Openshift(logger=logger)

    def _escape_text_(self, text):
        "Escapes \n,\t with \\n,\\tt for rendering in email body"

        return text.replace("\n", "\\n").replace("\t", "\\t")

    def update_subject_of_email(self, subject):
        """
        Mail server container is created with a environment variable
        "ENVIRONMENT", its value should be among [production,pre-prod,test].
        If given production as value, the subject is kept intact, else
        the value is pre-pended with the subject. Like [test] SUCCESS [..]
        """
        deployment = os.environ.get("DEPLOYMENT", False)

        logger.debug("Got environment variable DEPLOYMENT=%s", deployment)
        # if environment variable is not found, consider production
        if not deployment:
            return subject

        # case insensitive check for string 'production'
        elif deployment.strip().lower() == "production":
            # default is production environment
            return subject
        else:
            return "[" + deployment + "] " + subject

    def send_email(self, subject, contents):
        "Sends email to user"

        # process subject of email based on if it is production or not
        subject = self.update_subject_of_email(subject)

        subprocess.call([
            self.send_mail_command,
            subject,
            self.job_info["notify_email"],
            self._escape_text_(contents)])

    def _read_status(self, filepath):
        "Method to read status JSON files"
        try:
            fin = open(filepath)
        except IOError as e:
            logger.warning("Failed to read %s file, error: %s" %
                           (filepath, str(e)))
            return None
        else:
            return json.load(fin)

    def _read_text_file(self, text_file):
        "Method to read text files"

        try:
            fin = open(text_file)
        except IOError as e:
            logger.warning("Failed to read %s file, error: %s" %
                           (text_file, str(e)))
            return None
        else:
            return fin.read()

    def _dump_logs(self, logs, logfile):
        "Method to dump logs into logfile"

        try:
            # open in append mode, if there are more logs already
            fin = open(logfile, "a+")
        except IOError as e:
            logger.warning("Failed to open %s file in append mode. Error: %s"
                           % (logfile, str(e)))
        else:
            fin.write(logs)

    def _separate_section(self, char="-", count=99):
        " Creates string with char x count and returns"

        return char * count

    def compose_email_subject(self):
        " Composes email subject based on build status"

        if self.job_info.get("build_status"):
            return SUCCESS_EMAIL_SUBJECT % self.project
        else:
            return FAILURE_EMAIL_SUBJECT % self.project

    def compose_success_build_contents(self):
        "Composes email contents for completed builds"

        # need output image name and build logs
        return SUCCESS_EMAIL_MSG % (
            self.job_info["output_image"],
            self.build_logs,
            self.job_info["cause_of_build"])

    def compose_failed_build_contents(self):
        "Composes email contents for email of failed build"

        # need output image name and build logs
        return FAILURE_EMAIL_MSG % (
            self.project,
            self.build_logs)

    def compose_scanners_summary(self):
        "Composes scanners result summary"

        scanners_status = self._read_status(self.scanners_status_file)
        if not scanners_status:
            # TODO: Better handling and reporting here
            return ""

        text = ""
        for scanner in scanners_status["logs_file_path"]:
            text += scanner + ":\n"
            text += scanners_status["msg"][scanner] + "\n"
            text += "Detailed logs link: "
            text += scanners_status["logs_URL"][scanner]
            text += "\n\n"

        return SCANNERS_RESULTS % (self.image_under_test, text)

    def compose_linter_summary(self):
        "Composes Dockerfile Linter results summary"

        linter_status = self._read_status(self.linter_status_file)
        if not linter_status:
            # TODO: Better handling and reporting here
            return ""

        if not linter_status["lint_status"]:
            # TODO: Better handling and reporting here
            return ""

        linter_results = self._read_text_file(
            linter_status["linter_results_path"])

        if not linter_results:
            # TODO: Better handling and reporting here
            return ""

        return LINTER_RESULTS % linter_results

    def compose_email_contents(self):
        "Aggregates contents from different modules and composes one email"

        text = EMAIL_HEADER

        text += "\n"

        # if build has failed
        if not self.job_info.get("build_status"):
            text += self.compose_failed_build_contents()
            # see if job_info has logs keyword and append those logs to
            # build_logs
            if self.job_info.get("logs"):
                # build_logs.txt file path on the disk
                logfile = os.path.join(self.logs_dir, BUILD_LOGS_FILENAME)
                self._dump_logs(str(self.job_info.get("logs")), logfile)

        else:
            text += self.compose_success_build_contents()

            # scanners will run only on success builds
            # new line and separate section with hyphens
            text += "\n" + self._separate_section()

            # scanners results
            text += self.compose_scanners_summary()

        # linter has already run for project irrespective of
        # build failure or success

        # new line and separate section with hyphens
        text += "\n" + self._separate_section()

        # linter results
        text += self.compose_linter_summary()

        # put email footer
        text += EMAIL_FOOTER

        return text

    def compose_weekly_email(self):
        "Compose weekly scanning email artifcats"

        subject = WEEKLY_EMAIL_SUBJECT % self.image_under_test
        text = EMAIL_HEADER + "\n" + self.compose_scanners_summary() +\
            EMAIL_FOOTER
        return subject, text

    def notify_user(self):
        """
        Main method to orchestrate the email body composition
        and sending email
        """
        if self.job_info.get("weekly"):
            subject, email_contents = self.compose_weekly_email()
            self.remove_status_files([self.scanners_status_file])
        else:
            subject = self.compose_email_subject()
            email_contents = self.compose_email_contents()
            self.remove_status_files([
                self.linter_status_file,
                self.scanners_status_file])
        # send email
        logger.info("Sending email to user %s" %
                    self.job_info["notify_email"])
        self.send_email(subject, email_contents)

        """
        This is for cleaning up the openshift envrionment after the build
        is over. We are putting some delay so that the built image is
        pushed to registry properly and it does not give error while deleting
        """
        time.sleep(50)
        try:
            self.openshift.delete(self.job_info['project_hash_key'])
        except Exception as e:
            logger.critical("Failed to delete OpenShift project: {} error: {}"
                            .format(self.job_info['project_name'], e))

        # if it is a weekly scan, return True to delete service_debug_log.txt
        if self.job_info.get("weekly", False):
            return True
        # if build status if False, do not delete service_debug_log.txt
        return self.job_info.get("build_status", False)

    def remove_status_files(self, status_files):
        "Removes the status file"
        logger.debug("Cleaning statuses files %s" % str(status_files))
        for each in status_files:
            try:
                os.remove(each)
            except OSError as e:
                logger.info("Failed to remove file: %s , error: %s" %
                            (each, str(e)))
示例#4
0
 def __init__(self, logger=None, sub=None, pub=None):
     super(DeliveryWorker, self).__init__(logger, sub, pub)
     self.build_phase_name = 'delivery'
     self.openshift = Openshift(logger=self.logger)
示例#5
0
class DeliveryWorker(BaseWorker):
    """
    Delivery Worker tags the image built by Build Worker using the
    `desired-tag` field in index entry
    """
    NAME = 'Delivery worker'

    def __init__(self, logger=None, sub=None, pub=None):
        super(DeliveryWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = 'delivery'
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """Handles a job meant for delivery worker"""
        # TODO: this needs to be addressed after addressing CentOS#278
        self.job = job
        self.setup_data()
        self.set_buildphase_data(build_phase_status='processing',
                                 build_phase_start_time=timezone.now())
        self.logger.info('Starting delivery for job: {}'.format(self.job))

        success = self.deliver_build()

        if success:
            self.handle_delivery_success()
        else:
            self.handle_delivery_failure()

    def deliver_build(self):
        """
        Runs an `oc build` with the `run_delivery.sh` script as a part of build
        template. It mainly changes the tag of the image from a test tag
        generated by build process to the tag desired by user as mentioned in
        `desired-tag` field in cccp.yml
        """
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            # start the 'delivery' build
            delivery_id = self.openshift.build(project_hash_key, 'delivery')
        except OpenshiftError as e:
            self.logger.error(e)
            return False
        else:
            if not delivery_id:
                return False

        delivery_status = self.openshift.wait_for_build_status(
            project_hash_key, delivery_id, 'Complete', status_index=2)
        logs = self.openshift.get_build_logs(project_hash_key, delivery_id,
                                             "delivery")
        delivery_logs_file = os.path.join(self.job['logs_dir'],
                                          'delivery_logs.txt')
        self.set_buildphase_data(build_phase_log_file=delivery_logs_file)
        self.export_logs(logs, delivery_logs_file)
        return delivery_status

    def handle_delivery_success(self):
        """
        - Marks project build as complete
        - Sends job details to RPM tracking piece and deletes the job from the
        tube
        """
        # Mark project build as complete
        BuildTracker(self.job['namespace'], logger=self.logger).complete()
        self.logger.debug('Marked project build: {} as complete.'.format(
            self.job['namespace']))
        self.logger.debug('Putting job details to master_tube for tracker\'s'
                          ' consumption')

        self.set_buildphase_data(build_phase_status='complete',
                                 build_phase_end_time=timezone.now())
        self.set_build_data(build_status='complete',
                            build_end_time=timezone.now())
        # sending notification as delivery complete and also addingn this into
        # tracker.
        self.job['action'] = 'notify_user'
        self.queue.put(json.dumps(self.job), 'master_tube')

        # Put some delay to avoid mismatch in uploading jod details to
        # master_tube
        time.sleep(10)
        self.job['action'] = 'tracking'
        self.queue.put(json.dumps(self.job), 'master_tube')

    def handle_delivery_failure(self):
        """
        Puts the job back to the delivery tube for later attempt at delivery
        and requests to notify the user about failure to deliver
        """
        self.job["build_status"] = False
        self.job['action'] = "notify_user"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning("Delivery is not successful. Notifying the user.")
def create_project(queue, job, logger):
    job_name = job.get("job_name")
    project_name_hash = utils.get_job_hash(job_name)
    openshift = Openshift(logger=logger)
    try:
        openshift.login("test-admin", "test")
        max_retry = 10
        retry = 0
        # waiting for delivery get completed before next job for the same
        # project overrides the job parameters
        while openshift.get_project(project_name_hash) and (retry < max_retry):
            time.sleep(50)
            retry += 1

        if openshift.get_project(project_name_hash):
            logger.error(
                "OpenShift is not able to delete project: {}".format(job_name))
            raise
        else:
            openshift.create(project_name_hash)
    except OpenshiftError:
        try:
            openshift.delete(project_name_hash)
        except OpenshiftError as e:
            logger.error(e)
        return

    try:
        template_path = os.path.join(os.path.dirname(__file__),
                                     'template.json')
        openshift.upload_template(
            project_name_hash, template_path, {
                'SOURCE_REPOSITORY_URL': job.get("repo_url"),
                'REPO_BRANCH': job.get("repo_branch"),
                'APPID': job.get("appid"),
                'JOBID': job.get("jobid"),
                'REPO_BUILD_PATH': job.get("repo_build_path"),
                'TARGET_FILE': job.get("target_file"),
                'NOTIFY_EMAIL': job.get("notify_email"),
                'DESIRED_TAG': job.get("desired_tag"),
                'TEST_TAG': job.get("test_tag")
            })
    except OpenshiftError:
        try:
            openshift.delete(project_name_hash)
        except OpenshiftError as e:
            logger.error(e)
        return

    job["action"] = "start_build"
    queue.put(json.dumps(job), 'master_tube')
class TestWorker(BaseWorker):
    """
    Test Worker.

    Runs the user defined tests on a built container in the pipeline.
    """

    NAME = 'Test worker'

    def __init__(self, logger=None, sub=None, pub=None):
        super(TestWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = 'test'
        self.openshift = Openshift(logger=self.logger)

    def run_test(self):
        """Run Openshift test build for job, which runs the user
        defined tests."""
        namespace = self.job["namespace"]
        project = self.job["project_hash_key"]
        self.setup_data()
        self.set_buildphase_data(build_phase_status='processing',
                                 build_phase_start_time=timezone.now())

        try:
            self.openshift.login()

            # TODO: This needs to be addressed after addressing Issue #276
            build_id = self.openshift.build(project, 'test')
            if not build_id:
                return False
        except OpenshiftError as e:
            self.logger.error(e)
            return False

        BuildTracker(namespace).start()
        test_status = self.openshift.wait_for_build_status(project,
                                                           build_id,
                                                           'Complete',
                                                           status_index=2)
        logs = self.openshift.get_build_logs(project, build_id, "test")
        test_logs_file = os.path.join(self.job['logs_dir'], 'test_logs.txt')
        self.set_buildphase_data(build_phase_log_file=test_logs_file)
        self.export_logs(logs, test_logs_file)
        return test_status

    def handle_test_success(self):
        """Handle test success for job."""
        self.set_buildphase_data(build_phase_status='complete',
                                 build_phase_end_time=timezone.now())
        self.init_next_phase_data('scan')
        self.job['action'] = "start_scan"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.debug("Test is successful going for next job")

    def handle_test_failure(self):
        """Handle test failure for job"""
        self.job["build_status"] = False
        self.set_buildphase_data(build_phase_status='failed',
                                 build_phase_end_time=timezone.now())
        self.job['action'] = "notify_user"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning("Test is not successful. Notifying the user.")
        # data = {
        #     'action': 'notify_user',
        #     'namespace': self.job["namespace"],
        #     'build_status': False,
        #     'notify_email': self.job['notify_email'],
        #     'test_logs_file': os.path.join(
        #         self.job['logs_dir'], 'test_logs.txt'),
        #     'project_name': self.job["project_name"],
        #     'job_name': self.job['jobid'],
        #     'test_tag': self.job['test_tag']}
        # self.logger.debug('Notify test failure: {}'.format(data))
        # self.notify(data)

    def handle_job(self, job):
        """This runs the test worker"""
        self.job = job

        success = self.run_test()
        if success:
            self.handle_test_success()
        else:
            self.handle_test_failure()
class DeliveryWorker(BaseWorker):
    """
    Delivery Worker tags the image built by Build Worker using the
    `desired-tag` field in index entry
    """
    NAME = 'Delivery worker'

    def __init__(self, logger=None, sub=None, pub=None):
        super(DeliveryWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = 'delivery'
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """Handles a job meant for delivery worker"""
        # TODO: this needs to be addressed after addressing CentOS#278
        self.job = job
        self.setup_data()
        self.set_buildphase_data(
            build_phase_status='processing',
            build_phase_start_time=timezone.now()
        )
        self.logger.info('Starting delivery for job: {}'.format(self.job))

        success = self.deliver_build()

        if success:
            self.handle_delivery_success()
        else:
            self.handle_delivery_failure()

    def deliver_build(self):
        """
        Runs an `oc build` with the `run_delivery.sh` script as a part of build
        template. It mainly changes the tag of the image from a test tag
        generated by build process to the tag desired by user as mentioned in
        `desired-tag` field in cccp.yml
        """
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            # start the 'delivery' build
            delivery_id = self.openshift.build(project_hash_key, 'delivery')
        except OpenshiftError as e:
            self.logger.error(e)
            return False
        else:
            if not delivery_id:
                return False

        delivery_status = self.openshift.wait_for_build_status(
            project_hash_key, delivery_id, 'Complete', status_index=2)
        logs = self.openshift.get_build_logs(
            project_hash_key, delivery_id, "delivery")
        delivery_logs_file = os.path.join(
            self.job['logs_dir'], 'delivery_logs.txt')
        self.set_buildphase_data(build_phase_log_file=delivery_logs_file)
        self.export_logs(logs, delivery_logs_file)
        return delivery_status

    def handle_delivery_success(self):
        """
        - Marks project build as complete
        - Sends job details to RPM tracking piece and deletes the job from the
        tube
        """
        # Mark project build as complete
        BuildTracker(self.job['namespace'], logger=self.logger).complete()
        self.logger.debug('Marked project build: {} as complete.'.format(
            self.job['namespace']))
        self.logger.debug('Putting job details to master_tube for tracker\'s'
                          ' consumption')

        self.set_buildphase_data(
            build_phase_status='complete',
            build_phase_end_time=timezone.now()
        )
        self.set_build_data(
            build_status='complete',
            build_end_time=timezone.now()
        )
        # sending notification as delivery complete and also addingn this into
        # tracker.
        self.job['action'] = 'notify_user'
        self.queue.put(json.dumps(self.job), 'master_tube')

        # Put some delay to avoid mismatch in uploading jod details to
        # master_tube
        time.sleep(10)
        self.job['action'] = 'tracking'
        self.queue.put(json.dumps(self.job), 'master_tube')

    def handle_delivery_failure(self):
        """
        Puts the job back to the delivery tube for later attempt at delivery
        and requests to notify the user about failure to deliver
        """
        self.job["build_status"] = False
        self.job['action'] = "notify_user"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning(
            "Delivery is not successful. Notifying the user.")
def create_project(queue, job, logger):
    """
    Creates a new project in OpenShift. This function expects a queue on which
    the job is to be put, the job itself and logger to be used.

    :param queue: beanstalkd queue on which job is to be put
    :param job: job to be put on the beanstalkd queue
    :param logger: logger to be used to log things
    :type queue: str
    :type job: dict
    :logger: logging.logger
    """
    job_name = job.get("job_name")
    project_name_hash = utils.get_job_hash(job_name)
    openshift = Openshift(logger=logger)

    try:
        openshift.login("test-admin", "test")
        max_retry = 10
        retry = 0
        # waiting for delivery get completed before next job for the same
        # project overrides the job parameters
        while openshift.get_project(project_name_hash) and (retry < max_retry):
            time.sleep(50)
            retry += 1

        if openshift.get_project(project_name_hash):
            logger.error(
                "OpenShift is not able to delete project: {}".format(job_name))
            return False
        else:
            openshift.create(project_name_hash)
    except OpenshiftError:
        try:
            openshift.delete(project_name_hash)
        except OpenshiftError as e:
            logger.error(e)
        return False

    try:
        template_path = os.path.join(os.path.dirname(__file__),
                                     'template.json')
        openshift.upload_template(
            project_name_hash, template_path, {
                'SOURCE_REPOSITORY_URL': job.get("repo_url"),
                'REPO_BRANCH': job.get("repo_branch"),
                'APPID': job.get("appid"),
                'JOBID': job.get("jobid"),
                'REPO_BUILD_PATH': job.get("repo_build_path"),
                'TARGET_FILE': job.get("target_file"),
                'NOTIFY_EMAIL': job.get("notify_email"),
                'DESIRED_TAG': job.get("desired_tag"),
                'TEST_TAG': job.get("test_tag")
            })
    except OpenshiftError:
        try:
            openshift.delete(project_name_hash)
        except OpenshiftError as e:
            logger.error(e)
        return False

    return True
 def __init__(self, logger=None, sub=None, pub=None):
     super(BuildWorker, self).__init__(logger, sub, pub)
     self.build_phase_name = "build"
     self.openshift = Openshift(logger=self.logger)
class BuildWorker(BaseWorker):
    """Build worker"""
    NAME = 'BUILD WORKER'

    def __init__(self, logger=None, sub=None, pub=None):
        super(BuildWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = "build"
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """
        This checks if parents for the current project are being built.
        If any parent build is in progress, it pushes the job back to the
        queue to be processed later. Else, it goes ahead with running
        build for the job.
        """
        self.job = job
        self.setup_data()
        self.set_buildphase_data(
            build_phase_status='processing',
            build_phase_start_time=timezone.now()
        )
        cause_of_build = get_cause_of_build(
            os.environ.get('JENKINS_MASTER'),
            self.job["job_name"],
            self.job["jenkins_build_number"]
        )
        self.job["cause_of_build"] = cause_of_build
        self.set_build_data(build_trigger=cause_of_build)

        parent_build_running = False
        parents = self.job.get('depends_on', '').split(',')
        parents_in_build = []

        # Reset retry params
        self.job['retry'] = None
        self.job['retry_delay'] = None
        self.job['last_run_timestamp'] = None

        for parent in parents:
            is_build_running = BuildTracker(
                parent, logger=self.logger).is_running()
            if is_build_running:
                parents_in_build.append(parent)
            parent_build_running = parent_build_running or \
                is_build_running

        if parent_build_running:
            self.logger.info('Parents in build: {}, pushing job: {} back '
                             'to queue'.format(parents_in_build, self.job))
            self.set_buildphase_data(
                build_phase_status='requeuedparent'
            )
            # Retry delay in seconds
            self.job['retry'] = True
            self.job['retry_delay'] = settings.BUILD_RETRY_DELAY
            self.job['last_run_timestamp'] = time.time()
            self.queue.put(json.dumps(self.job), 'master_tube')
        else:
            self.logger.info('Starting build for job: {}'.format(self.job))
            success = self.build_container()
            if success:
                self.job["build_status"] = True
                self.handle_build_success()
            else:
                self.job["build_status"] = False
                self.handle_build_failure()

    def build_container(self):
        """Run Openshift build for job"""
        namespace = self.job["namespace"]
        # project_name = self.job["project_name"]
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            build_id = self.openshift.build(project_hash_key, 'build')
            if not build_id:
                return False
        except OpenshiftError as e:
            self.logger.error(e)
            return False

        BuildTracker(namespace).start()
        build_status = self.openshift.wait_for_build_status(
            project_hash_key, build_id, 'Complete')
        logs = self.openshift.get_build_logs(project_hash_key, build_id)
        build_logs_file = os.path.join(self.job['logs_dir'], 'build_logs.txt')
        self.set_buildphase_data(build_phase_log_file=build_logs_file)
        self.export_logs(logs, build_logs_file)
        return build_status

    def handle_build_success(self):
        """Handle build success for job."""
        self.job['action'] = 'start_test'
        self.set_buildphase_data(
            build_phase_status='complete',
            build_phase_end_time=timezone.now()
        )
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.init_next_phase_data('test')
        self.logger.debug("Build is successful going for next job")

    def handle_build_failure(self):
        """Handle build failure for job"""
        self.job['action'] = "notify_user"
        self.set_buildphase_data(
            build_phase_status='failed',
            build_phase_end_time=timezone.now()
        )
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning(
            "Build is not successful. Notifying the user.")
 def __init__(self, logger=None, sub=None, pub=None):
     super(DeliveryWorker, self).__init__(logger, sub, pub)
     self.openshift = Openshift(logger=self.logger)
class DeliveryWorker(BaseWorker):
    """
    Delivery Worker tags the image built by Build Worker using the
    `desired-tag` field in index entry
    """
    NAME = 'Delivery worker'

    def __init__(self, logger=None, sub=None, pub=None):
        super(DeliveryWorker, self).__init__(logger, sub, pub)
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """Handles a job meant for delivery worker"""
        # TODO: this needs to be addressed after addressing CentOS#278
        self.job = job
        self.logger.info('Starting delivery for job: {}'.format(self.job))

        success = self.deliver_build()

        if success:
            self.handle_delivery_success()
        else:
            self.handle_delivery_failure()

    def deliver_build(self):
        """
        Runs an `oc build` with the `run_delivery.sh` script as a part of build
        template. It mainly changes the tag of the image from a test tag
        generated by build process to the tag desired by user as mentioned in
        `desired-tag` field in cccp.yml
        """
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            # start the 'delivery' build
            delivery_id = self.openshift.build(project_hash_key, 'delivery')
        except OpenshiftError as e:
            self.logger.error(e)
            return False
        else:
            if not delivery_id:
                return False

        delivery_status = self.openshift.wait_for_build_status(
            project_hash_key, delivery_id, 'Complete', status_index=2)
        logs = self.openshift.get_build_logs(project_hash_key, delivery_id)
        delivery_logs_file = os.path.join(self.job['logs_dir'],
                                          'delivery_logs.txt')
        self.export_logs(logs, delivery_logs_file)
        return delivery_status

    def handle_delivery_success(self):
        """
        - Marks project build as complete
        - Sends job details to RPM tracking piece and deletes the job from the
        tube
        """
        # Mark project build as complete
        Build(self.job['namespace'], logger=self.logger).complete()
        self.logger.debug('Marked project build: {} as complete.'.format(
            self.job['namespace']))
        self.logger.debug('Putting job details to master_tube for tracker\'s'
                          ' consumption')
        project_hash_key = self.job["project_hash_key"]
        # This is for cleaning up the openshift envrionment after the build is over
        # We are putting some delay so that the built image is pushed to registry
        # properly and it does not give error while deleting
        time.sleep(50)
        self.openshift.delete(project_hash_key)

        # sending notification as delivery complete and also addingn this into
        # tracker.
        self.job['action'] = 'notify_user'
        self.queue.put(json.dumps(self.job), 'master_tube')

        # Put some delay to avoid mismatch in uploading jod details to master_tube
        time.sleep(10)
        self.job['action'] = 'tracking'
        self.queue.put(json.dumps(self.job), 'master_tube')

    def handle_delivery_failure(self):
        """
        Puts the job back to the delivery tube for later attempt at delivery
        and requests to notify the user about failure to deliver
        """
        self.job.pop('action', None)
        self.job['action'] = "delivery_failure"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning(
            "Delivery is not successful putting it to failed delivery tube")
        data = {
            'action':
            'notify_user',
            'namespace':
            self.job["namespace"],
            'build_status':
            False,
            'notify_email':
            self.job['notify_email'],
            'delivery_logs_file':
            os.path.join(self.job['logs_dir'], 'delivery_logs.txt'),
            'logs_dir':
            self.job['logs_dir'],
            'project_name':
            self.job["project_name"],
            'job_name':
            self.job['jobid'],
            'test_tag':
            self.job['test_tag']
        }
        self.notify(data)
class TestWorker(BaseWorker):
    """
    Test Worker.

    Runs the user defined tests on a built container in the pipeline.
    """

    NAME = 'Test worker'

    def __init__(self, logger=None, sub=None, pub=None):
        super(TestWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = 'test'
        self.openshift = Openshift(logger=self.logger)

    def run_test(self):
        """Run Openshift test build for job, which runs the user
        defined tests."""
        namespace = self.job["namespace"]
        project = self.job["project_hash_key"]
        self.setup_data()
        self.set_buildphase_data(
            build_phase_status='processing',
            build_phase_start_time=timezone.now()
        )

        try:
            self.openshift.login()

            # TODO: This needs to be addressed after addressing Issue #276
            build_id = self.openshift.build(project, 'test')
            if not build_id:
                return False
        except OpenshiftError as e:
            self.logger.error(e)
            return False

        BuildTracker(namespace).start()
        test_status = self.openshift.wait_for_build_status(
            project, build_id, 'Complete', status_index=2)
        logs = self.openshift.get_build_logs(project, build_id, "test")
        test_logs_file = os.path.join(self.job['logs_dir'], 'test_logs.txt')
        self.set_buildphase_data(build_phase_log_file=test_logs_file)
        self.export_logs(logs, test_logs_file)
        return test_status

    def handle_test_success(self):
        """Handle test success for job."""
        self.set_buildphase_data(
            build_phase_status='complete',
            build_phase_end_time=timezone.now()
        )
        self.init_next_phase_data('scan')
        self.job['action'] = "start_scan"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.debug("Test is successful going for next job")

    def handle_test_failure(self):
        """Handle test failure for job"""
        self.job["build_status"] = False
        self.set_buildphase_data(
            build_phase_status='failed',
            build_phase_end_time=timezone.now()
        )
        self.job['action'] = "notify_user"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning(
            "Test is not successful. Notifying the user.")
        # data = {
        #     'action': 'notify_user',
        #     'namespace': self.job["namespace"],
        #     'build_status': False,
        #     'notify_email': self.job['notify_email'],
        #     'test_logs_file': os.path.join(
        #         self.job['logs_dir'], 'test_logs.txt'),
        #     'project_name': self.job["project_name"],
        #     'job_name': self.job['jobid'],
        #     'test_tag': self.job['test_tag']}
        # self.logger.debug('Notify test failure: {}'.format(data))
        # self.notify(data)

    def handle_job(self, job):
        """This runs the test worker"""
        self.job = job

        success = self.run_test()
        if success:
            self.handle_test_success()
        else:
            self.handle_test_failure()
class NotifyUser(object):
    "Compose and send build status, linter and scanners results"

    def __init__(self, job_info):

        self.send_mail_command = "/opt/cccp-service/mail_service/send_mail.sh"
        self.job_info = job_info

        # the logs directory
        self.logs_dir = os.path.join(LOGS_DIR_PARENT,
                                     self.job_info["test_tag"])

        # linter execution status file
        self.linter_status_file = os.path.join(self.logs_dir,
                                               LINTER_STATUS_FILE)

        # scanners execution status file
        self.scanners_status_file = os.path.join(self.logs_dir,
                                                 SCANNERS_STATUS_FILE)

        # if image has successful build
        if self.job_info.get("build_status"):
            logger.debug("Processing mail for SUCCESS build.")
            self.image_under_test = job_info.get("output_image")

            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is weekly scan job
        elif self.job_info.get("weekly"):
            logger.debug("Processing mail for Weekly scan.")
            self.image_under_test = job_info.get("output_image")
            # for eg: the value would be
            # registry.centos.org/nshaikh/scanner-rpm-verify:latest
            self.project = self.image_under_test.replace(
                "registry.centos.org/", "")

        # if it is a failed build
        else:
            logger.debug("Processing mail for failed build.")
            self.image_under_test = job_info.get("project_name")
            # projet_name / self.image_under_test and self.project are same
            self.project = job_info.get("project_name")

        # build_logs filename
        self.build_logs = urljoin(LOGS_URL_BASE, self.job_info["test_tag"],
                                  BUILD_LOGS_FILENAME)

        self.openshift = Openshift(logger=logger)

    def _escape_text_(self, text):
        "Escapes \n,\t with \\n,\\tt for rendering in email body"

        return text.replace("\n", "\\n").replace("\t", "\\t")

    def update_subject_of_email(self, subject):
        """
        Mail server container is created with a environment variable
        "ENVIRONMENT", its value should be among [production,pre-prod,test].
        If given production as value, the subject is kept intact, else
        the value is pre-pended with the subject. Like [test] SUCCESS [..]
        """
        deployment = os.environ.get("DEPLOYMENT", False)

        logger.debug("Got environment variable DEPLOYMENT=%s", deployment)
        # if environment variable is not found, consider production
        if not deployment:
            return subject

        # case insensitive check for string 'production'
        elif deployment.strip().lower() == "production":
            # default is production environment
            return subject
        else:
            return "[" + deployment + "] " + subject

    def send_email(self, subject, contents):
        "Sends email to user"

        # process subject of email based on if it is production or not
        subject = self.update_subject_of_email(subject)

        subprocess.call([
            self.send_mail_command, subject, self.job_info["notify_email"],
            self._escape_text_(contents)
        ])

    def _read_status(self, filepath):
        "Method to read status JSON files"
        try:
            fin = open(filepath)
        except IOError as e:
            logger.warning("Failed to read %s file, error: %s" %
                           (filepath, str(e)))
            return None
        else:
            return json.load(fin)

    def _read_text_file(self, text_file):
        "Method to read text files"

        try:
            fin = open(text_file)
        except IOError as e:
            logger.warning("Failed to read %s file, error: %s" %
                           (text_file, str(e)))
            return None
        else:
            return fin.read()

    def _dump_logs(self, logs, logfile):
        "Method to dump logs into logfile"

        try:
            # open in append mode, if there are more logs already
            fin = open(logfile, "a+")
        except IOError as e:
            logger.warning("Failed to open %s file in append mode. Error: %s" %
                           (logfile, str(e)))
        else:
            fin.write(logs)

    def _separate_section(self, char="-", count=99):
        " Creates string with char x count and returns"

        return char * count

    def compose_email_subject(self):
        " Composes email subject based on build status"

        if self.job_info.get("build_status"):
            return SUCCESS_EMAIL_SUBJECT % self.project
        else:
            return FAILURE_EMAIL_SUBJECT % self.project

    def compose_success_build_contents(self):
        "Composes email contents for completed builds"

        # need output image name and build logs
        return SUCCESS_EMAIL_MSG % (self.job_info["output_image"],
                                    self.build_logs,
                                    self.job_info["cause_of_build"])

    def compose_failed_build_contents(self):
        "Composes email contents for email of failed build"

        # need output image name and build logs
        return FAILURE_EMAIL_MSG % (self.project, self.build_logs)

    def compose_scanners_summary(self):
        "Composes scanners result summary"

        scanners_status = self._read_status(self.scanners_status_file)
        if not scanners_status:
            # TODO: Better handling and reporting here
            return ""

        text = ""
        for scanner in scanners_status["logs_file_path"]:
            text += scanner + ":\n"
            text += scanners_status["msg"][scanner] + "\n"
            text += "Detailed logs link: "
            text += scanners_status["logs_URL"][scanner]
            text += "\n\n"

        return SCANNERS_RESULTS % (self.image_under_test, text)

    def compose_linter_summary(self):
        "Composes Dockerfile Linter results summary"

        linter_status = self._read_status(self.linter_status_file)
        if not linter_status:
            # TODO: Better handling and reporting here
            return ""

        if not linter_status["lint_status"]:
            # TODO: Better handling and reporting here
            return ""

        linter_results = self._read_text_file(
            linter_status["linter_results_path"])

        if not linter_results:
            # TODO: Better handling and reporting here
            return ""

        return LINTER_RESULTS % linter_results

    def compose_email_contents(self):
        "Aggregates contents from different modules and composes one email"

        text = EMAIL_HEADER

        text += "\n"

        # if build has failed
        if not self.job_info.get("build_status"):
            text += self.compose_failed_build_contents()
            # see if job_info has logs keyword and append those logs to
            # build_logs
            if self.job_info.get("logs"):
                # build_logs.txt file path on the disk
                logfile = os.path.join(self.logs_dir, BUILD_LOGS_FILENAME)
                self._dump_logs(str(self.job_info.get("logs")), logfile)

        else:
            text += self.compose_success_build_contents()

            # scanners will run only on success builds
            # new line and separate section with hyphens
            text += "\n" + self._separate_section()

            # scanners results
            text += self.compose_scanners_summary()

        # linter has already run for project irrespective of
        # build failure or success

        # new line and separate section with hyphens
        text += "\n" + self._separate_section()

        # linter results
        text += self.compose_linter_summary()

        # put email footer
        text += EMAIL_FOOTER

        return text

    def compose_weekly_email(self):
        "Compose weekly scanning email artifcats"

        subject = WEEKLY_EMAIL_SUBJECT % self.image_under_test
        text = EMAIL_HEADER + "\n" + self.compose_scanners_summary() +\
            EMAIL_FOOTER
        return subject, text

    def notify_user(self):
        """
        Main method to orchestrate the email body composition
        and sending email
        """
        if self.job_info.get("weekly"):
            subject, email_contents = self.compose_weekly_email()
            self.remove_status_files([self.scanners_status_file])
        else:
            subject = self.compose_email_subject()
            email_contents = self.compose_email_contents()
            self.remove_status_files(
                [self.linter_status_file, self.scanners_status_file])
        # send email
        logger.info("Sending email to user %s" % self.job_info["notify_email"])
        self.send_email(subject, email_contents)
        """
        This is for cleaning up the openshift envrionment after the build
        is over. We are putting some delay so that the built image is
        pushed to registry properly and it does not give error while deleting
        """
        time.sleep(50)
        try:
            self.openshift.delete(self.job_info['project_hash_key'])
        except Exception as e:
            logger.critical(
                "Failed to delete OpenShift project: {} error: {}".format(
                    self.job_info['project_name'], e))

        # if it is a weekly scan, return True to delete service_debug_log.txt
        if self.job_info.get("weekly", False):
            return True
        # if build status if False, do not delete service_debug_log.txt
        return self.job_info.get("build_status", False)

    def remove_status_files(self, status_files):
        "Removes the status file"
        logger.debug("Cleaning statuses files %s" % str(status_files))
        for each in status_files:
            try:
                os.remove(each)
            except OSError as e:
                logger.info("Failed to remove file: %s , error: %s" %
                            (each, str(e)))
示例#16
0
class BuildWorker(BaseWorker):
    """Build worker"""
    NAME = 'BUILD WORKER'

    def __init__(self, logger=None, sub=None, pub=None):
        super(BuildWorker, self).__init__(logger, sub, pub)
        self.build_phase_name = "build"
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """
        This checks if parents for the current project are being built.
        If any parent build is in progress, it pushes the job back to the
        queue to be processed later. Else, it goes ahead with running
        build for the job.
        """
        self.job = job
        self.setup_data()
        self.set_buildphase_data(build_phase_status='processing',
                                 build_phase_start_time=timezone.now())
        cause_of_build = get_cause_of_build(os.environ.get('JENKINS_MASTER'),
                                            self.job["job_name"],
                                            self.job["jenkins_build_number"])
        self.job["cause_of_build"] = cause_of_build
        self.set_build_data(build_trigger=cause_of_build)

        parent_build_running = False
        parents = self.job.get('depends_on', '').split(',')
        parents_in_build = []

        # Reset retry params
        self.job['retry'] = None
        self.job['retry_delay'] = None
        self.job['last_run_timestamp'] = None

        for parent in parents:
            is_build_running = BuildTracker(parent,
                                            logger=self.logger).is_running()
            if is_build_running:
                parents_in_build.append(parent)
            parent_build_running = parent_build_running or \
                is_build_running

        if parent_build_running:
            self.logger.info('Parents in build: {}, pushing job: {} back '
                             'to queue'.format(parents_in_build, self.job))
            self.set_buildphase_data(build_phase_status='requeuedparent')
            # Retry delay in seconds
            self.job['retry'] = True
            self.job['retry_delay'] = settings.BUILD_RETRY_DELAY
            self.job['last_run_timestamp'] = time.time()
            self.queue.put(json.dumps(self.job), 'master_tube')
        else:
            self.logger.info('Starting build for job: {}'.format(self.job))
            success = self.build_container()
            if success:
                self.job["build_status"] = True
                self.handle_build_success()
            else:
                self.job["build_status"] = False
                self.handle_build_failure()

    def build_container(self):
        """Run Openshift build for job"""
        namespace = self.job["namespace"]
        # project_name = self.job["project_name"]
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            build_id = self.openshift.build(project_hash_key, 'build')
            if not build_id:
                return False
        except OpenshiftError as e:
            self.logger.error(e)
            return False

        BuildTracker(namespace).start()
        build_status = self.openshift.wait_for_build_status(
            project_hash_key, build_id, 'Complete')
        logs = self.openshift.get_build_logs(project_hash_key, build_id)
        build_logs_file = os.path.join(self.job['logs_dir'], 'build_logs.txt')
        self.set_buildphase_data(build_phase_log_file=build_logs_file)
        self.export_logs(logs, build_logs_file)
        return build_status

    def handle_build_success(self):
        """Handle build success for job."""
        self.job['action'] = 'start_test'
        self.set_buildphase_data(build_phase_status='complete',
                                 build_phase_end_time=timezone.now())
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.init_next_phase_data('test')
        self.logger.debug("Build is successful going for next job")

    def handle_build_failure(self):
        """Handle build failure for job"""
        self.job['action'] = "notify_user"
        self.set_buildphase_data(build_phase_status='failed',
                                 build_phase_end_time=timezone.now())
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning("Build is not successful. Notifying the user.")
示例#17
0
 def __init__(self, logger=None, sub=None, pub=None):
     super(BuildWorker, self).__init__(logger, sub, pub)
     self.build_phase_name = "build"
     self.openshift = Openshift(logger=self.logger)
 def __init__(self, logger=None, sub=None, pub=None):
     super(DeliveryWorker, self).__init__(logger, sub, pub)
     self.build_phase_name = 'delivery'
     self.openshift = Openshift(logger=self.logger)
示例#19
0
class BuildWorker(BaseWorker):
    """Build worker"""
    NAME = 'BUILD WORKER'

    def __init__(self, logger=None, sub=None, pub=None):
        super(BuildWorker, self).__init__(logger, sub, pub)
        self.openshift = Openshift(logger=self.logger)

    def handle_job(self, job):
        """
        This checks if parents for the current project are being built.
        If any parent build is in progress, it pushes the job back to the
        queue to be processed later. Else, it goes ahead with running
        build for the job.
        """
        self.job = job

        parent_build_running = False
        parents = self.job.get('depends_on', '').split(',')
        parents_in_build = []

        # Reset retry params
        self.job['retry'] = None
        self.job['retry_delay'] = None
        self.job['last_run_timestamp'] = None

        for parent in parents:
            is_build_running = Build(parent, logger=self.logger).is_running()
            if is_build_running:
                parents_in_build.append(parent)
            parent_build_running = parent_build_running or \
                is_build_running

        if parent_build_running:
            self.logger.info('Parents in build: {}, pushing job: {} back '
                             'to queue'.format(parents_in_build, self.job))
            # Retry delay in seconds
            self.job['retry'] = True
            self.job['retry_delay'] = settings.BUILD_RETRY_DELAY
            self.job['last_run_timestamp'] = time.time()
            self.queue.put(json.dumps(self.job), 'master_tube')
        else:
            self.logger.info('Starting build for job: {}'.format(self.job))
            success = self.build()
            if success:
                self.job["build_status"] = True
                self.handle_build_success()
            else:
                self.job["build_status"] = False
                self.handle_build_failure()

    def build(self):
        """Run Openshift build for job"""
        namespace = self.job["namespace"]
        # project_name = self.job["project_name"]
        project_hash_key = self.job["project_hash_key"]

        try:
            self.openshift.login()
            build_id = self.openshift.build(project_hash_key, 'build')
            if not build_id:
                return False
        except OpenshiftError as e:
            self.logger.error(e)
            return False

        Build(namespace).start()
        build_status = self.openshift.wait_for_build_status(
            project_hash_key, build_id, 'Complete')
        logs = self.openshift.get_build_logs(project_hash_key, build_id)
        build_logs_file = os.path.join(self.job['logs_dir'], 'build_logs.txt')
        self.export_logs(logs, build_logs_file)
        return build_status

    def handle_build_success(self):
        """Handle build success for job."""
        self.job['action'] = 'start_test'
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.debug("Build is successful going for next job")

    def handle_build_failure(self):
        """Handle build failure for job"""
        self.job.pop('action', None)
        self.job['action'] = "build_failure"
        self.queue.put(json.dumps(self.job), 'master_tube')
        self.logger.warning(
            "Build is not successful putting it to failed build tube")
        data = {
            'action': 'notify_user',
            'namespace': self.job["namespace"],
            'build_status': False,
            'notify_email': self.job['notify_email'],
            'build_logs_file': os.path.join(self.job['logs_dir'],
                                            'build_logs.txt'),
            'logs_dir': self.job['logs_dir'],
            'project_name': self.job['project_name'],
            'job_name': self.job['job_name'],
            'test_tag': self.job['test_tag']
        }
        self.notify(data)