Пример #1
0
 def _parse_fp(self, fp):
     # wrap provided file streams to ensure correct encoding used
     data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
     if data:
         if not isinstance(data, list):
             raise JenkinsJobsException(
                 "The topmost collection in file '{fname}' must be a list,"
                 " not a {cls}".format(fname=getattr(fp, 'name', fp),
                                       cls=type(data)))
         for item in data:
             cls, dfn = next(iter(item.items()))
             group = self.data.get(cls, {})
             if len(item.items()) > 1:
                 n = None
                 for k, v in item.items():
                     if k == "name":
                         n = v
                         break
                 # Syntax error
                 raise JenkinsJobsException("Syntax error, for item "
                                            "named '{0}'. Missing indent?"
                                            .format(n))
             # allow any entry to specify an id that can also be used
             _id = dfn.get('id', dfn['name'])
             if _id in group:
                 self._handle_dups(
                     "Duplicate entry found in '{0}: '{1}' already "
                     "defined".format(fp.name, _id))
             group[_id] = dfn
             self.data[cls] = group
Пример #2
0
 def _parse_fp(self, fp):
     # wrap provided file streams to ensure correct encoding used
     data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
     if data:
         if not isinstance(data, list):
             raise JenkinsJobsException(
                 "The topmost collection in file '{fname}' must be a list,"
                 " not a {cls}".format(fname=getattr(fp, 'name', fp),
                                       cls=type(data)))
         for item in data:
             cls, dfn = next(iter(item.items()))
             group = self.data.get(cls, {})
             if len(item.items()) > 1:
                 n = None
                 for k, v in item.items():
                     if k == "name":
                         n = v
                         break
                 # Syntax error
                 raise JenkinsJobsException("Syntax error, for item "
                                            "named '{0}'. Missing indent?"
                                            .format(n))
             # allow any entry to specify an id that can also be used
             _id = dfn.get('id', dfn['name'])
             if _id in group:
                 self._handle_dups(
                     "Duplicate entry found in '{0}: '{1}' already "
                     "defined".format(fp.name, _id))
             group[_id] = dfn
             self.data[cls] = group
Пример #3
0
    def update_job(self, input_fn, jobs_glob=None, output=None):
        self.load_files(input_fn)
        self.parser.expandYaml(jobs_glob)
        self.parser.generateXML()

        logger.info("Number of jobs generated:  %d", len(self.parser.xml_jobs))
        self.parser.xml_jobs.sort(key=operator.attrgetter('name'))

        if (output and not hasattr(output, 'write')
                and not os.path.isdir(output)):
            logger.info("Creating directory %s" % output)
            try:
                os.makedirs(output)
            except OSError:
                if not os.path.isdir(output):
                    raise

        updated_jobs = 0
        for job in self.parser.xml_jobs:
            if output:
                if hasattr(output, 'write'):
                    # `output` is a file-like object
                    logger.info("Job name:  %s", job.name)
                    logger.debug("Writing XML to '{0}'".format(output))
                    output = utils.wrap_stream(output)
                    try:
                        output.write(job.output())
                    except IOError as exc:
                        if exc.errno == errno.EPIPE:
                            # EPIPE could happen if piping output to something
                            # that doesn't read the whole input (e.g.: the UNIX
                            # `head` command)
                            return
                        raise
                    continue

                output_fn = os.path.join(output, job.name)
                logger.debug("Writing XML to '{0}'".format(output_fn))
                with io.open(output_fn, 'w', encoding='utf-8') as f:
                    f.write(job.output().decode('utf-8'))
                continue
            md5 = job.md5()
            if (self.jenkins.is_job(job.name)
                    and not self.cache.is_cached(job.name)):
                old_md5 = self.jenkins.get_job_md5(job.name)
                self.cache.set(job.name, old_md5)

            if self.cache.has_changed(job.name, md5) or self.ignore_cache:
                self.jenkins.update_job(job.name, job.output().decode('utf-8'))
                updated_jobs += 1
                self.cache.set(job.name, md5)
            else:
                logger.debug("'{0}' has not changed".format(job.name))
        return self.parser.xml_jobs, updated_jobs
Пример #4
0
    def execute(self, options, jjb_config):
        self.jjb_config = jjb_config
        self.jenkins = builder.JenkinsManager(jjb_config)

        jobs = self.get_jobs(options.names, options.path)

        logging.info("Matching jobs: %d", len(jobs))
        stdout = utils.wrap_stream(sys.stdout)

        for job in jobs:
            stdout.write((job + '\n').encode('utf-8'))
Пример #5
0
    def update_jobs(self, xml_jobs, output=None, n_workers=None):
        orig = time.time()

        logger.info("Number of jobs generated:  %d", len(xml_jobs))
        xml_jobs.sort(key=operator.attrgetter('name'))

        if (output and not hasattr(output, 'write')
                and not os.path.isdir(output)):
            logger.info("Creating directory %s" % output)
            try:
                os.makedirs(output)
            except OSError:
                if not os.path.isdir(output):
                    raise

        if output:
            # ensure only wrapped once
            if hasattr(output, 'write'):
                output = utils.wrap_stream(output)

            for job in xml_jobs:
                if hasattr(output, 'write'):
                    # `output` is a file-like object
                    logger.info("Job name:  %s", job.name)
                    logger.debug("Writing XML to '{0}'".format(output))
                    try:
                        output.write(job.output())
                    except IOError as exc:
                        if exc.errno == errno.EPIPE:
                            # EPIPE could happen if piping output to something
                            # that doesn't read the whole input (e.g.: the UNIX
                            # `head` command)
                            return
                        raise
                    continue

                output_fn = os.path.join(output, job.name)
                logger.debug("Writing XML to '{0}'".format(output_fn))
                with io.open(output_fn, 'w', encoding='utf-8') as f:
                    f.write(job.output().decode('utf-8'))
            return xml_jobs, len(xml_jobs)

        # Filter out the jobs that did not change
        logging.debug('Filtering %d jobs for changed jobs', len(xml_jobs))
        step = time.time()
        jobs = [job for job in xml_jobs if self.changed(job)]
        logging.debug("Filtered for changed jobs in %ss", (time.time() - step))

        if not jobs:
            return [], 0

        # Update the jobs
        logging.debug('Updating jobs')
        step = time.time()
        p_params = [{'job': job} for job in jobs]
        results = self.parallel_update_job(n_workers=n_workers,
                                           concurrent=p_params)
        logging.debug("Parsing results")
        # generalize the result parsing, as a concurrent job always returns a
        # list
        if len(p_params) in (1, 0):
            results = [results]
        for result in results:
            if isinstance(result, Exception):
                raise result
            else:
                # update in-memory cache
                j_name, j_md5 = result
                self.cache.set(j_name, j_md5)
        # write cache to disk
        self.cache.save()
        logging.debug("Updated %d jobs in %ss", len(jobs), time.time() - step)
        logging.debug("Total run took %ss", (time.time() - orig))
        return jobs, len(jobs)
Пример #6
0
    def update_views(self, xml_views, output=None, n_workers=None,
                     config_xml=False):
        orig = time.time()

        logger.info("Number of views generated:  %d", len(xml_views))
        xml_views.sort(key=operator.attrgetter('name'))

        if output:
            # ensure only wrapped once
            if hasattr(output, 'write'):
                output = utils.wrap_stream(output)

            for view in xml_views:
                if hasattr(output, 'write'):
                    # `output` is a file-like object
                    logger.info("View name:  %s", view.name)
                    logger.debug("Writing XML to '{0}'".format(output))
                    try:
                        output.write(view.output())
                    except IOError as exc:
                        if exc.errno == errno.EPIPE:
                            # EPIPE could happen if piping output to something
                            # that doesn't read the whole input (e.g.: the UNIX
                            # `head` command)
                            return
                        raise
                    continue

                if config_xml:
                    output_dir = os.path.join(output, view.name)
                    logger.info("Creating directory %s" % output_dir)
                    try:
                        os.makedirs(output_dir)
                    except OSError:
                        if not os.path.isdir(output_dir):
                            raise
                    output_fn = os.path.join(output_dir, 'config.xml')
                else:
                    output_fn = os.path.join(output, view.name)
                logger.debug("Writing XML to '{0}'".format(output_fn))
                with io.open(output_fn, 'w', encoding='utf-8') as f:
                    f.write(view.output().decode('utf-8'))
            return xml_views, len(xml_views)

        # Filter out the views that did not change
        logging.debug('Filtering %d views for changed views',
                      len(xml_views))
        step = time.time()
        views = [view for view in xml_views
                 if self.changed(view)]
        logging.debug("Filtered for changed views in %ss",
                      (time.time() - step))

        if not views:
            return [], 0

        # Update the views
        logging.debug('Updating views')
        step = time.time()
        p_params = [{'view': view} for view in views]
        results = self.parallel_update_view(
            n_workers=n_workers,
            concurrent=p_params)
        logging.debug("Parsing results")
        # generalize the result parsing, as a concurrent view always returns a
        # list
        if len(p_params) in (1, 0):
            results = [results]
        for result in results:
            if isinstance(result, Exception):
                raise result
            else:
                # update in-memory cache
                v_name, v_md5 = result
                self.cache.set(v_name, v_md5)
        # write cache to disk
        self.cache.save()
        logging.debug("Updated %d views in %ss",
                      len(views),
                      time.time() - step)
        logging.debug("Total run took %ss", (time.time() - orig))
        return views, len(views)
Пример #7
0
    def update_views(self,
                     xml_views,
                     output=None,
                     n_workers=None,
                     config_xml=False):
        orig = time.time()

        logger.info("Number of views generated:  %d", len(xml_views))
        xml_views.sort(key=AlphanumSort)

        if output:
            # ensure only wrapped once
            if hasattr(output, 'write'):
                output = utils.wrap_stream(output)

            for view in xml_views:
                if hasattr(output, 'write'):
                    # `output` is a file-like object
                    logger.info("View name:  %s", view.name)
                    logger.debug("Writing XML to '{0}'".format(output))
                    try:
                        output.write(view.output())
                    except IOError as exc:
                        if exc.errno == errno.EPIPE:
                            # EPIPE could happen if piping output to something
                            # that doesn't read the whole input (e.g.: the UNIX
                            # `head` command)
                            return
                        raise
                    continue

                output_fn = self._setup_output(output, view.name, config_xml)

                logger.debug("Writing XML to '{0}'".format(output_fn))
                with io.open(output_fn, 'w', encoding='utf-8') as f:
                    f.write(view.output().decode('utf-8'))
            return xml_views, len(xml_views)

        # Filter out the views that did not change
        logging.debug('Filtering %d views for changed views', len(xml_views))
        step = time.time()
        views = [view for view in xml_views if self.changed(view)]
        logging.debug("Filtered for changed views in %ss",
                      (time.time() - step))

        if not views:
            return [], 0

        # Update the views
        logging.debug('Updating views')
        step = time.time()
        p_params = [{'view': view} for view in views]
        results = self.parallel_update_view(n_workers=n_workers,
                                            concurrent=p_params)
        logging.debug("Parsing results")
        # generalize the result parsing, as a concurrent view always returns a
        # list
        if len(p_params) in (1, 0):
            results = [results]
        for result in results:
            if isinstance(result, Exception):
                raise result
            else:
                # update in-memory cache
                v_name, v_md5 = result
                self.cache.set(v_name, v_md5)
        # write cache to disk
        self.cache.save()
        logging.debug("Updated %d views in %ss", len(views),
                      time.time() - step)
        logging.debug("Total run took %ss", (time.time() - orig))
        return views, len(views)
Пример #8
0
    def update_jobs(self, input_fn, jobs_glob=None, output=None,
                    n_workers=None):
        orig = time.time()
        self.load_files(input_fn)
        self.parser.expandYaml(jobs_glob)
        self.parser.generateXML()
        step = time.time()
        logging.debug('%d XML files generated in %ss',
                      len(self.parser.jobs), str(step - orig))

        logger.info("Number of jobs generated:  %d", len(self.parser.xml_jobs))
        self.parser.xml_jobs.sort(key=operator.attrgetter('name'))

        if (output and not hasattr(output, 'write')
                and not os.path.isdir(output)):
            logger.info("Creating directory %s" % output)
            try:
                os.makedirs(output)
            except OSError:
                if not os.path.isdir(output):
                    raise

        if output:
            for job in self.parser.xml_jobs:
                if hasattr(output, 'write'):
                    # `output` is a file-like object
                    logger.info("Job name:  %s", job.name)
                    logger.debug("Writing XML to '{0}'".format(output))
                    output = utils.wrap_stream(output)
                    try:
                        output.write(job.output())
                    except IOError as exc:
                        if exc.errno == errno.EPIPE:
                            # EPIPE could happen if piping output to something
                            # that doesn't read the whole input (e.g.: the UNIX
                            # `head` command)
                            return
                        raise
                    continue

                output_fn = os.path.join(output, job.name)
                logger.debug("Writing XML to '{0}'".format(output_fn))
                with io.open(output_fn, 'w', encoding='utf-8') as f:
                    f.write(job.output().decode('utf-8'))
            return self.parser.xml_jobs, len(self.parser.xml_jobs)

        # Filter out the jobs that did not change
        logging.debug('Filtering %d jobs for changed jobs',
                      len(self.parser.xml_jobs))
        step = time.time()
        jobs = [job for job in self.parser.xml_jobs
                if self.changed(job)]
        logging.debug("Filtered for changed jobs in %ss",
                      (time.time() - step))

        if not jobs:
            return [], 0

        # Update the jobs
        logging.debug('Updating jobs')
        step = time.time()
        p_params = [{'job': job} for job in jobs]
        results = self.parallel_update_job(
            n_workers=n_workers,
            parallelize=p_params)
        logging.debug("Parsing results")
        # generalize the result parsing, as a parallelized job always returns a
        # list
        if len(p_params) in (1, 0):
            results = [results]
        for result in results:
            if isinstance(result, Exception):
                raise result
            else:
                # update in-memory cache
                j_name, j_md5 = result
                self.cache.set(j_name, j_md5)
        # write cache to disk
        self.cache.save()
        logging.debug("Updated %d jobs in %ss",
                      len(jobs),
                      time.time() - step)
        logging.debug("Total run took %ss", (time.time() - orig))
        return jobs, len(jobs)