Exemple #1
0
    def reload_nginx(self, cr, uid, context=None):
        """
        completely override the method
        """
        settings = {}
        settings['port'] = config['xmlrpc_port']
        nginx_dir = os.path.join(self.root(cr, uid), 'nginx')
        settings['nginx_dir'] = nginx_dir
        ids = self.search(cr, uid, [('nginx','=',True)], order='id')
        if ids:
            build_ids = self.pool['runbot.build'].search(cr, uid, [('repo_id','in',ids), ('state','=','running')])
            settings['builds'] = self.pool['runbot.build'].browse(cr, uid, build_ids)

            nginx_config = self.pool['ir.ui.view'].render(cr, uid, "runbot.nginx_config", settings)
            mkdirs([nginx_dir])
            open(os.path.join(nginx_dir, 'nginx.conf'),'w').write(nginx_config)
            _logger.debug('reload nginx')
            run(['sudo', '/bin/systemctl', 'reload', 'nginx'])
 def job_21_coverage(self, build, lock_path, log_path):
     if not build.repo_id.use_coverage:
         return
     output = build.path('logs/job_21_coverage')
     mkdirs([output])
     result = None
     with build._chdir():
         result = run(
             build.repo_id._coverage_command('html', '--directory', output,
                                             '--title', build.name))
     if result:
         build.write({
             'result': 'ko',
         })
         build.github_status()
     output = os.path.join(output, 'index.html')
     if os.path.exists(output):
         doc = etree.fromstring(open(output).read(), etree.HTMLParser())
         coverage = 0.0
         for node in doc.xpath("//tr[@class='total']/td[@data-ratio]"):
             covered_lines, all_lines = node.get('data-ratio').split()
             coverage = float(covered_lines or 0) / float(all_lines or 1)
             coverage *= 100
         build.write({
             'coverage': coverage,
         })
         version = (build.branch_id.branch_name or '').split('-')[0]
         target_build = self.env['runbot.build'].search(
             [('id', 'not in', build.ids),
              ('repo_id', 'in', build.repo_id.ids),
              ('branch_id.branch_name', '=', version)],
             limit=1)
         if target_build.coverage and target_build.coverage > coverage:
             build._log(
                 'coverage', 'coverage dropped from %.2f in %s to %.2f' %
                 (target_build.coverage, target_build.branch_id.branch_name,
                  coverage))
             build.write({
                 'result': 'ko',
             })
             build.github_status()
     return result
Exemple #3
0
    def checkout(self, cr, uid, ids, context=None):
        """Checkout in custom build directories if they are specified
        Do same as superclass except for git_export path.
        """
        for build in self.browse(cr, uid, ids, context=context):
            if build.prebuilt:
                continue
            # starts from scratch
            if os.path.isdir(build.path()):
                shutil.rmtree(build.path())

            # runbot log path
            mkdirs([build.path("logs")])

            # checkout branch
            build_path = build.path()
            custom_build_dir = build.repo_id.custom_build_dir
            if custom_build_dir:
                mkdirs([build.path(custom_build_dir)])
                build_path = os.path.join(build_path, custom_build_dir)
            build.repo_id.git_export(build.name, build_path)
Exemple #4
0
    def checkout(self, cr, uid, ids, context=None):
        """Checkout in custom build directories if they are specified
        Do same as superclass except for git_export path.
        """
        for build in self.browse(cr, uid, ids, context=context):
            if build.prebuilt:
                continue
            # starts from scratch
            if os.path.isdir(build.path()):
                shutil.rmtree(build.path())

            # runbot log path
            mkdirs([build.path("logs")])

            # checkout branch
            build_path = build.path()
            custom_build_dir = build.repo_id.custom_build_dir
            if custom_build_dir:
                mkdirs([build.path(custom_build_dir)])
                build_path = os.path.join(build_path, custom_build_dir)
            build.repo_id.git_export(build.name, build_path)
Exemple #5
0
    def reload_nginx(self, cr, uid, context=None):
        """
        completely override the method
        """
        settings = {}
        settings['port'] = config['xmlrpc_port']
        nginx_dir = os.path.join(self.root(cr, uid), 'nginx')
        settings['nginx_dir'] = nginx_dir
        ids = self.search(cr, uid, [('nginx', '=', True)], order='id')
        if ids:
            build_ids = self.pool['runbot.build'].search(
                cr, uid, [('repo_id', 'in', ids), ('state', '=', 'running')])
            settings['builds'] = self.pool['runbot.build'].browse(
                cr, uid, build_ids)

            nginx_config = self.pool['ir.ui.view'].render(
                cr, uid, "runbot.nginx_config", settings)
            mkdirs([nginx_dir])
            open(os.path.join(nginx_dir, 'nginx.conf'),
                 'w').write(nginx_config)
            _logger.debug('reload nginx')
            run(['sudo', '/usr/sbin/service', 'nginx', 'reload'])
Exemple #6
0
    def schedule(self, cr, uid, ids, context=None):
        all_jobs = self.list_jobs()
        icp = self.pool['ir.config_parameter']
        timeout = int(icp.get_param(cr, uid, 'runbot.timeout', default=1800))

        for build in self.browse(cr, uid, ids, context=context):
            #remove skipped jobs
            jobs = all_jobs[:]
            for job_to_skip in build.repo_id.skip_job_ids:
                jobs.remove(job_to_skip.name)
            if build.state == 'pending':
                # allocate port and schedule first job
                port = self.find_port(cr, uid)
                values = {
                    'host': fqdn(),
                    'port': port,
                    'state': 'testing',
                    'job': jobs[0],
                    'job_start': now(),
                    'job_end': False,
                }
                build.write(values)
                cr.commit()
            else:
                # check if current job is finished
                lock_path = build.path('logs', '%s.lock' % build.job)
                if locked(lock_path):
                    # kill if overpassed
                    if build.job != jobs[-1] and build.job_time > timeout:
                        build.logger('%s time exceded (%ss)', build.job, build.job_time)
                        build.write({'job_end': now()})
                        build.kill(result='killed')
                    continue
                build.logger('%s finished', build.job)
                # schedule
                v = {}
                # testing -> running
                if build.job == jobs[-2]:
                    v['state'] = 'running'
                    v['job'] = jobs[-1]
                    v['job_end'] = now(),
                # running -> done
                elif build.job == jobs[-1]:
                    v['state'] = 'done'
                    v['job'] = ''
                # testing
                else:
                    v['job'] = jobs[jobs.index(build.job) + 1]
                build.write(v)
            build.refresh()

            # run job
            pid = None
            if build.state != 'done':
                build.logger('running %s', build.job)
                job_method = getattr(self,build.job)
                mkdirs([build.path('logs')])
                lock_path = build.path('logs', '%s.lock' % build.job)
                log_path = build.path('logs', '%s.txt' % build.job)
                pid = job_method(cr, uid, build, lock_path, log_path)
                build.write({'pid': pid})
            # needed to prevent losing pids if multiple jobs are started and one them raise an exception
            cr.commit()

            if pid == -2:
                # no process to wait, directly call next job
                # FIXME find a better way that this recursive call
                build.schedule()

            # cleanup only needed if it was not killed
            if build.state == 'done':
                build._local_cleanup()
    def _schedule(self, cr, uid, ids, context=None):
        all_jobs = self._list_jobs()
        icp = self.pool['ir.config_parameter']
        timeout = int(icp.get_param(cr, uid, 'runbot.timeout', default=1800))

        for build in self.browse(cr, uid, ids, context=context):
            #remove skipped jobs
            jobs = all_jobs[:]
            for job_to_skip in build.repo_id.skip_job_ids:
                jobs.remove(job_to_skip.name)
            if build.state == 'pending':
                # allocate port and schedule first job
                port = self._find_port(cr, uid)
                values = {
                    'host': fqdn(),
                    'port': port,
                    'state': 'testing',
                    'job': jobs[0],
                    'job_start': now(),
                    'job_end': False,
                }
                build.write(values)
                cr.commit()
            else:
                # check if current job is finished
                lock_path = build._path('logs', '%s.lock' % build.job)
                if locked(lock_path):
                    # kill if overpassed
                    if build.job != jobs[-1] and build.job_time > timeout:
                        build._logger('%s time exceded (%ss)', build.job,
                                      build.job_time)
                        build.write({'job_end': now()})
                        build._kill(result='killed')
                    continue
                build._logger('%s finished', build.job)
                # schedule
                v = {}
                # testing -> running
                if build.job == jobs[-2]:
                    v['state'] = 'running'
                    v['job'] = jobs[-1]
                    v['job_end'] = now(),
                # running -> done
                elif build.job == jobs[-1]:
                    v['state'] = 'done'
                    v['job'] = ''
                # testing
                else:
                    v['job'] = jobs[jobs.index(build.job) + 1]
                build.write(v)
            build.refresh()

            # run job
            pid = None
            if build.state != 'done':
                build._logger('running %s', build.job)
                job_method = getattr(self, '_' + build.job)
                mkdirs([build._path('logs')])
                lock_path = build._path('logs', '%s.lock' % build.job)
                log_path = build._path('logs', '%s.txt' % build.job)
                try:
                    pid = job_method(cr, uid, build, lock_path, log_path)
                    build.write({'pid': pid})
                except Exception:
                    _logger.exception('%s failed running method %s',
                                      build.dest, build.job)
                    build._log(build.job,
                               "failed running job method, see runbot log")
                    build._kill(result='ko')
                    continue
            # needed to prevent losing pids if multiple jobs are started and one them raise an exception
            cr.commit()

            if pid == -2:
                # no process to wait, directly call next job
                # FIXME find a better way that this recursive call
                build._schedule()

            # cleanup only needed if it was not killed
            if build.state == 'done':
                build._local_cleanup()