Ejemplo n.º 1
0
 def run_args(cls, config, args):
   session_factory = orm.init_sql(config['db_url'])
   _, queue = functions.get_queue(session_factory(), args.project_filter,
                                  args.branch_filter, args.offset, args.limit)
   queue = [item.as_dict() for item in queue]
   json.dump(queue, sys.stdout, indent=2, separators=(',', ': '))
   sys.stdout.write('\n')
Ejemplo n.º 2
0
    def get_queue(self):
        """
    Return json-encoded list of ChangeInfo objects for the current queue.

    Query params:
    `project` : SQL `LIKE` expression for projects to match
    `branch` : SQL `LIKE` expression for branches to match
    `offset` : start offset for pagination
    `limit` : maximum number of records to return
    """

        project_filter, branch_filter, offset, limit \
            = extract_common_args(flask.request.args)

        sql = self.sql_factory()
        result = functions.get_queue(sql, project_filter, branch_filter,
                                     offset, limit)
        sql.close()
        return flask.jsonify(result)
Ejemplo n.º 3
0
 def run_args(cls, config, args):
   session_factory = orm.init_sql(config['db_url'])
   queue = functions.get_queue(session_factory(), args.project_filter,
                               args.branch_filter, 0, -1)
   json.dump(queue, sys.stdout, indent=2, separators=(',', ': '))
   sys.stdout.write('\n')
Ejemplo n.º 4
0
    def run(self, watch_manifest):
        pidfile_path = self.config.get('daemon.pidfile_path', './pid')
        handle_pid_file(pidfile_path)
        poll_period = self.config.get('daemon.poll_period', 60)
        offline_sentinel_path = self.config.get('daemon.offline_sentinel_path',
                                                './pause')

        mark_old_changes_as_failed(self.sql_session)
        last_poll_time = 0

        while True:
            functions.restart_if_modified(watch_manifest, pidfile_path)

            try:
                if os.path.exists(offline_sentinel_path):
                    logging.info('Offline sentinal exists, bypassing merges')
                    while os.path.exists(offline_sentinel_path):
                        functions.restart_if_modified(watch_manifest,
                                                      pidfile_path)
                        time.sleep(1)
                    logging.info('Offline sentinel removed, continuing')
                    continue

                # If the loop was faster than poll period, then wait for
                # the remainder of the period to prevent spamming gerrit
                loop_duration = time.time() - last_poll_time
                backoff_duration = poll_period - loop_duration
                if backoff_duration > 0:
                    logging.info(
                        'Loop was very fast, waiting for '
                        '%6.2f seconds', backoff_duration)
                    time.sleep(backoff_duration)

                last_poll_time = time.time()
                poll_id = functions.get_next_poll_id(self.sql_session)
                functions.poll_gerrit(self.gerrit, self.sql_session, poll_id)
                _, global_queue = functions.get_queue(self.sql_session)

                queue_spec, request_queue = \
                    get_requests_from_single_queue(global_queue, self.queues)

                if queue_spec is None or not request_queue:
                    # If there are no changes to any of the queues that this daemon is
                    # monitoring then we have nothing to do here.
                    continue

                if queue_spec.coalesce_count > 0 and len(request_queue) > 1:
                    # NOTE(josh): Only coalesce changes that have never failed
                    # verification before.
                    coalesce_queue = []
                    for changeinfo in request_queue:
                        if changeinfo.change_id in queue_spec.dirty_changes:
                            logging.info(
                                'ceasing merge colation since %s is dirty',
                                changeinfo.change_id)
                            break
                        else:
                            coalesce_queue.append(changeinfo)
                        if len(coalesce_queue) >= queue_spec.coalesce_count:
                            break

                    if len(coalesce_queue) > 1:
                        result = self.coalesce_merge(queue_spec,
                                                     coalesce_queue)
                        if result == 0:
                            # The coalition of changes was verified together, they have all
                            # been merged so we can poll gerrit and move on to more changes.
                            continue
                        else:
                            for changeinfo in coalesce_queue:
                                queue_spec.dirty_changes.add(
                                    changeinfo.change_id)
                    else:
                        logging.info(
                            'falling back to single-merge since coalition '
                            'contains only one clean change')
                else:
                    logging.info(
                        'skipping merge coalition, coalesce_count: %d, '
                        'len(request_queue): %d', queue_spec.coalesce_count,
                        len(request_queue))

                # NOTE(josh): only do one merge per request to gerrit so that
                # any changes to the queue (i.e. gerrit state through review
                # updates or priority changes) are reflected in the merge order,
                # as well as allowing us to pick-up on the pause sentinel
                self.coalesce_merge(queue_spec, request_queue[:1])
                queue_spec.dirty_changes.discard(request_queue[0].change_id)

            except (httplib2.HttpLib2Error, requests.RequestException):
                logging.exception(
                    'Error retrieving merge requests from gerrit')
                continue

            except KeyboardInterrupt:
                break

        logging.info('Exiting main loop')

        return 0