def schedule(self, cr, uid, ids, context=None): res = super(RunbotBuild, self).schedule(cr, uid, ids, context=context) current_host = fqdn() for build in self.browse(cr, uid, ids, context=context): if not all([build.state == 'running', build.job == 'job_30_run', build.result in ['ok', 'warn'], not build.docker_executed_commands, build.repo_id.is_travis2docker_build]): continue time.sleep(20) build.write({'docker_executed_commands': True}) run(['docker', 'exec', '-d', '--user', 'root', build.docker_container, '/etc/init.d/ssh', 'start']) ssh_keys = self.get_ssh_keys(cr, uid, build, context=context) or '' f_extra_keys = os.path.expanduser('~/.ssh/runbot_authorized_keys') if os.path.isfile(f_extra_keys): with open(f_extra_keys) as fobj_extra_keys: ssh_keys += "\n" + fobj_extra_keys.read() ssh_keys = ssh_keys.strip(" \n") if ssh_keys: run(['docker', 'exec', '-d', '--user', 'odoo', build.docker_container, "bash", "-c", "echo '%(keys)s' | tee -a '%(dir)s'" % dict( keys=ssh_keys, dir="/home/odoo/.ssh/authorized_keys")]) if current_host == build.host: urlopen_t = threading.Thread(target=RunbotBuild._open_url, args=(build.port,)) urlopen_t.start() return res
def cron(self, cr, uid, ids=None, context=None): if fqdn() == 'runbot.odoo-communty.org': # phase out builds on main server return return super(RunbotRepo, self).cron(cr, uid, ids, context=context)
def schedule(self, cr, uid, ids, context=None): all_jobs = self.list_jobs() icp = self.pool['ir.config_parameter'] timeout = int(icp.get_param(cr, uid, 'runbot.timeout', default=1800)) for build in self.browse(cr, uid, ids, context=context): #remove skipped jobs jobs = all_jobs[:] for job_to_skip in build.repo_id.skip_job_ids: jobs.remove(job_to_skip.name) if build.state == 'pending': # allocate port and schedule first job port = self.find_port(cr, uid) values = { 'host': fqdn(), 'port': port, 'state': 'testing', 'job': jobs[0], 'job_start': now(), 'job_end': False, } build.write(values) cr.commit() else: # check if current job is finished lock_path = build.path('logs', '%s.lock' % build.job) if locked(lock_path): # kill if overpassed if build.job != jobs[-1] and build.job_time > timeout: build.logger('%s time exceded (%ss)', build.job, build.job_time) build.write({'job_end': now()}) build.kill(result='killed') continue build.logger('%s finished', build.job) # schedule v = {} # testing -> running if build.job == jobs[-2]: v['state'] = 'running' v['job'] = jobs[-1] v['job_end'] = now(), # running -> done elif build.job == jobs[-1]: v['state'] = 'done' v['job'] = '' # testing else: v['job'] = jobs[jobs.index(build.job) + 1] build.write(v) build.refresh() # run job pid = None if build.state != 'done': build.logger('running %s', build.job) job_method = getattr(self,build.job) mkdirs([build.path('logs')]) lock_path = build.path('logs', '%s.lock' % build.job) log_path = build.path('logs', '%s.txt' % build.job) pid = job_method(cr, uid, build, lock_path, log_path) build.write({'pid': pid}) # needed to prevent losing pids if multiple jobs are started and one them raise an exception cr.commit() if pid == -2: # no process to wait, directly call next job # FIXME find a better way that this recursive call build.schedule() # cleanup only needed if it was not killed if build.state == 'done': build._local_cleanup()
def _schedule(self, cr, uid, ids, context=None): all_jobs = self._list_jobs() icp = self.pool['ir.config_parameter'] timeout = int(icp.get_param(cr, uid, 'runbot.timeout', default=1800)) for build in self.browse(cr, uid, ids, context=context): #remove skipped jobs jobs = all_jobs[:] for job_to_skip in build.repo_id.skip_job_ids: jobs.remove(job_to_skip.name) if build.state == 'pending': # allocate port and schedule first job port = self._find_port(cr, uid) values = { 'host': fqdn(), 'port': port, 'state': 'testing', 'job': jobs[0], 'job_start': now(), 'job_end': False, } build.write(values) cr.commit() else: # check if current job is finished lock_path = build._path('logs', '%s.lock' % build.job) if locked(lock_path): # kill if overpassed if build.job != jobs[-1] and build.job_time > timeout: build._logger('%s time exceded (%ss)', build.job, build.job_time) build.write({'job_end': now()}) build._kill(result='killed') continue build._logger('%s finished', build.job) # schedule v = {} # testing -> running if build.job == jobs[-2]: v['state'] = 'running' v['job'] = jobs[-1] v['job_end'] = now(), # running -> done elif build.job == jobs[-1]: v['state'] = 'done' v['job'] = '' # testing else: v['job'] = jobs[jobs.index(build.job) + 1] build.write(v) build.refresh() # run job pid = None if build.state != 'done': build._logger('running %s', build.job) job_method = getattr(self, '_' + build.job) mkdirs([build._path('logs')]) lock_path = build._path('logs', '%s.lock' % build.job) log_path = build._path('logs', '%s.txt' % build.job) try: pid = job_method(cr, uid, build, lock_path, log_path) build.write({'pid': pid}) except Exception: _logger.exception('%s failed running method %s', build.dest, build.job) build._log(build.job, "failed running job method, see runbot log") build._kill(result='ko') continue # needed to prevent losing pids if multiple jobs are started and one them raise an exception cr.commit() if pid == -2: # no process to wait, directly call next job # FIXME find a better way that this recursive call build._schedule() # cleanup only needed if it was not killed if build.state == 'done': build._local_cleanup()
def cron(self, cr, uid, ids=None, context=None): if fqdn() == 'runbot.odoo-communty.org': # phase out builds on main server return return super(runbot_repo, self).cron(cr, uid, ids, context=context)