Пример #1
0
 def _create_job(self, job_id, data):
     query = Job.query.options(load_only('id', 'job_id'))
     query = query.filter_by(job_id=job_id, run=self.run)
     if query.first():
         error('/errors/invalid/',
               "job with job_id %s already exists" % job_id)
     else:
         log.info("Creating job: %s/%s", data.get('name', '<no name!>'),
                  job_id)
         self.job = Job(data, self.run)
         Session.commit()
         return self.job
Пример #2
0
 def index_post(self):
     """
     We update a job here, it should obviously exist already but most likely
     the data is empty.
     """
     if not self.job:
         error('/errors/not_found/',
               'attempted to update a non-existent job')
     old_job_status = self.job.status
     self.job.update(request.json)
     Session.commit()
     if self.job.status != old_job_status:
         log.info("Job %s/%s status changed from %s to %s", self.job.name,
                  self.job.job_id, old_job_status, self.job.status)
     return dict()
Пример #3
0
    def job_stats(self, machine_type="", since_days=14):
        since_days = int(since_days)
        if since_days < 1:
            error("/errors/invalid/", "since_days must be a positive integer")

        now = datetime.utcnow()
        past = now - timedelta(days=since_days)
        recent_jobs = Job.query.filter(Job.posted.between(past, now)).subquery()
        RecentJob = aliased(Job, recent_jobs)

        query = Session.query(Node.name, RecentJob.status, func.count("*"))

        if machine_type:
            # Note: filtering by Job.machine_type (as below) greatly improves
            # performance but could lead slightly incorrect values if many jobs
            # are being scheduled using mixed machine types. We work around
            # this by including the 'multi' machine type (which is the name of
            # the queue Inktank uses for such jobs.
            query = query.filter(RecentJob.machine_type.in_((machine_type, "multi")))
            query = query.filter(Node.machine_type == machine_type)

        query = query.join(RecentJob.target_nodes).group_by(Node).group_by(RecentJob.status)

        all_stats = {}
        results = query.all()
        for (name, status, count) in results:
            node_stats = all_stats.get(name, {})
            node_stats[status] = count
            all_stats[name] = node_stats

        stats_sorter = lambda t: sum(t[1].values())
        ordered_stats = OrderedDict(sorted(all_stats.items(), key=stats_sorter))
        return ordered_stats
Пример #4
0
    def update(self, values):
        """
        :param values: a dict.
        """
        self._check_for_update(values)
        was_locked = self.locked

        for k, v in values.items():
            if k in self.allowed_update_keys:
                if k == 'vm_host':
                    vm_host_name = v
                    query = self.query.filter(Node.name == vm_host_name)
                    v = query.one()
                setattr(self, k, v)

        if 'locked' in values:
            if self.locked != was_locked:
                self.locked_since = datetime.utcnow() if self.locked else None
            if not self.locked:
                self.locked_by = None
        Session.flush()
Пример #5
0
    def job_stats(self, machine_type='', since_days=14):
        since_days = int(since_days)
        if since_days < 1:
            error('/errors/invalid/', "since_days must be a positive integer")

        now = datetime.utcnow()
        past = now - timedelta(days=since_days)
        recent_jobs = Job.query.filter(Job.posted.between(past,
                                                          now)).subquery()
        RecentJob = aliased(Job, recent_jobs)

        query = Session.query(Node.name, RecentJob.status, func.count('*'))

        if machine_type:
            # Note: filtering by Job.machine_type (as below) greatly improves
            # performance but could lead slightly incorrect values if many jobs
            # are being scheduled using mixed machine types. We work around
            # this by including the 'multi' machine type (which is the name of
            # the queue Inktank uses for such jobs.
            query = query.filter(
                RecentJob.machine_type.in_((machine_type, 'multi')))
            query = query.filter(Node.machine_type == machine_type)

        query = query.join(RecentJob.target_nodes).group_by(Node)\
            .group_by(RecentJob.status)

        all_stats = {}
        results = query.all()
        for (name, status, count) in results:
            node_stats = all_stats.get(name, {})
            node_stats[status] = count
            all_stats[name] = node_stats

        stats_sorter = lambda t: sum(t[1].values())
        ordered_stats = OrderedDict(sorted(all_stats.items(),
                                           key=stats_sorter))
        return ordered_stats
Пример #6
0
 def index(self):
     query = Session.query(Run).join(Job).filter(Job.status == 'queued')\
         .group_by(Run).order_by(Run.scheduled)
     return query.all()
Пример #7
0
 def _create_run(cls, name):
     log.info("Creating run: %s", name)
     Session.flush()
     return Run(name)
Пример #8
0
 def index(self):
     query = Session.query(Run).join(Job).filter(Job.status == 'queued')\
         .group_by(Run).order_by(Run.scheduled)
     return query.all()
Пример #9
0
 def update(self, json_data):
     self.set_or_update(json_data)
     Session.flush()