def run(self, args): super(SetTargetsCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() out("STARTING A TRANSACTION...") start() count = Job.query.filter(~Job.target_nodes.any()).count() jobs_q = Job.query.filter(~Job.target_nodes.any()) n = 0 for job in jobs_q.yield_per(10): try: n += 1 print "Processing Job {n}/{t}\r".format(n=n, t=count), self._populate(job) except: print rollback() out("ROLLING BACK... ") raise else: flush() print nodes = Node.query.filter(Node.machine_type.is_(None)).all() for node in nodes: node.machine_type = self.parse_machine_type(node.name) commit()
def test_basic_deletion(self): new_node = Node('test_basic_deletion') models.commit() new_node.delete() models.commit() query = Node.query.filter(Node.name == 'test_basic_deletion') assert not query.count()
def run(self, args): super(ImportNodesCommand, self).run(args) response = requests.get(self.lockserver) nodes_json = response.json() print "Found {count} nodes to import".format(count=len(nodes_json)) out("LOADING ENVIRONMENT") self.load_app() try: out("STARTING A TRANSACTION...") start() self.vm_hosts = {} count = len(nodes_json) for i in range(count): node_json = nodes_json[i] verb = self.update_node(node_json) print "{verb} {n}/{count}\r".format(verb=verb, n=i+1, count=count), print self.set_vm_hosts() except: rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") commit()
def test_basic_deletion(self): new_run = Run('test_basic_deletion') new_job = Job({'job_id': '42'}, new_run) models.commit() new_job.delete() models.commit() assert not Job.filter_by(job_id='42').first()
def lock_many(cls, count, locked_by, machine_type, description=None, os_type=None, os_version=None, arch=None): update_dict = dict(locked=True, locked_by=locked_by, description=description) query = cls.query if "|" in machine_type: machine_types = machine_type.split("|") query = query.filter(Node.machine_type.in_(machine_types)) else: query = query.filter(Node.machine_type == machine_type) if os_type: query = query.filter(Node.os_type == os_type) if os_version: query = query.filter(Node.os_version == os_version) if arch: query = query.filter(Node.arch == arch) query = query.filter(Node.up.is_(True)) # Find unlocked nodes query = query.filter(Node.locked.is_(False)) query = query.limit(count) nodes = query.all() nodes_avail = len(nodes) if nodes_avail < count: raise ResourceUnavailableError("only {count} nodes available".format(count=nodes_avail)) for node in nodes: node.update(update_dict) try: commit() except (sqlalchemy.exc.DBAPIError, sqlalchemy.exc.InvalidRequestError): rollback() raise RaceConditionError("error locking nodes. please retry request.") return nodes
def run(self, args): super(ImportNodesCommand, self).run(args) response = requests.get(self.lockserver) nodes_json = response.json() print("Found {count} nodes to import".format(count=len(nodes_json))) out("LOADING ENVIRONMENT") self.load_app() try: out("STARTING A TRANSACTION...") start() self.vm_hosts = {} count = len(nodes_json) for i in range(count): node_json = nodes_json[i] verb = self.update_node(node_json) print("{verb} {n}/{count}\r".format(verb=verb, n=i + 1, count=count)), print() self.set_vm_hosts() except: rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") commit()
def commit(self): try: models.commit() except: print("Rolling back") models.rollback() raise
def test_empty_run_deletion(self): run_name = 'test_empty_run_deletion' new_run = Run(run_name) models.commit() new_run.delete() models.commit() assert not Run.filter_by(name=run_name).first()
def commit(self): try: models.commit() except: print "Rolling back" models.rollback() raise
def test_run_multi_machine_type(self): run_name = 'teuthology-2014-10-06_19:30:01-upgrade:dumpling-firefly-x:stress-split-giant-distro-basic-multi' new_run = Run(run_name) machine_type = 'plana,mira,burnupi' Job(dict(job_id=1, machine_type=machine_type), new_run) models.commit() assert new_run.machine_type == machine_type
def test_init(self): name = 'test_init' mtype = 'vps' Node(name=name, machine_type=mtype) models.commit() query = Node.query.filter(Node.name == name)\ .filter(Node.machine_type == mtype) assert query.one()
def test_updated(self): run_name = 'test_updated' new_run = Run(run_name) for i in range(1, 5): Job(dict(job_id=i), new_run) models.commit() new_run = Run.filter_by(name=run_name).first() assert new_run.updated == new_run.get_jobs()[-1].updated
def test_run_suite_gets_corrected(self): run_name = 'teuthology-2014-05-01_07:54:18-new-suite-name:new-subsuite:new-subsub-new-branch-testing-basic-plana' # noqa suite_name = 'new-suite-name:new-subsuite:new-subsub' branch_name = 'new-branch' new_run = Run(run_name) Job(dict(job_id='11', suite=suite_name, branch=branch_name), new_run) models.commit() assert new_run.suite == suite_name assert new_run.branch == branch_name
def test_job_deletion(self): run_name = 'test_job_deletion' new_run = Run(run_name) Job({'job_id': '42'}, new_run) Job({'job_id': '9999'}, new_run) models.commit() new_run.delete() models.commit() assert not Job.filter_by(job_id='9999').first()
def test_delete_empties_run(self): new_run = Run('test_delete_empties_run') new_job = Job(dict(job_id='42', status='queued'), new_run) models.commit() assert new_run.status == 'queued' new_job.delete() models.commit() new_run_copy = Run.query.filter(Run.name == new_run.name).one() assert not new_run_copy.status == 'empty'
def test_jobs_count(self): Run.query.delete() Job.query.delete() new_run = Run('test_jobs_count') Job({}, new_run) Job({}, new_run) models.commit() run_as_json = Run.get(1).__json__() assert run_as_json['jobs_count'] == 2
def test_success_updates_status(self): run_name = 'test_success_updates_status' run = Run(run_name) job_id = '27' job = Job(dict(name=run_name, job_id=job_id, status='running'), run) models.commit() job.update(dict(success=True)) models.commit() assert job.status == 'pass'
def test_run_deletion(self): run_name = 'test_run_deletion' new_run = Run(run_name) Job({'job_id': '42'}, new_run) Job({'job_id': '120'}, new_run) Job({'job_id': '4'}, new_run) models.commit() new_run.delete() models.commit() assert not Run.filter_by(name=run_name).first()
def test_force_updated_time(self): run_name = 'test_force_updated_time' run = Run(run_name) time_stamp = '2014-03-31 21:25:43' Job(dict(updated=time_stamp), run) models.commit() local_dt = datetime.strptime(time_stamp, '%Y-%m-%d %H:%M:%S') utc_dt = local_datetime_to_utc(local_dt) job = Run.query.filter(Run.name == run.name).one().jobs[0] assert str(job.updated) == str(utc_dt)
def run(self, args): super(PopulateCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() out("BUILDING SCHEMA") try: out("STARTING A TRANSACTION...") models.start() models.Base.metadata.create_all(conf.sqlalchemy.engine) except: models.rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") models.commit()
def test_statsd_update(self, m_get_client): m_client = Mock() m_counter = Mock() m_client.get_counter.return_value = m_counter m_get_client.return_value = m_client run_name = 'test_statsd_update' run = Run(run_name) job_id = '27' job = Job(dict(name=run_name, job_id=job_id, status='running'), run) models.commit() job.update({'status': 'pass'}) models.commit() assert job.status == 'pass' assert m_get_client.called_once_with() assert m_client.get_counter.called_once_with('jobs.status') assert m_counter.increment.called_once_with('pass')
def setup_class(cls): # Bind and create the database tables pmodels.clear() db_engine = create_engine( cls.engine_url, encoding='utf-8', poolclass=NullPool) # AKA models.start() pmodels.Base.metadata.drop_all(db_engine) pmodels.Session.bind = db_engine pmodels.metadata.bind = pmodels.Session.bind pmodels.Base.metadata.create_all(db_engine) pmodels.commit() pmodels.clear()
def run(self, args): super(ReparseCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() try: out("STARTING A TRANSACTION...") models.start() runs = models.Run.query.all() for run in runs: self._reparse(run) except: models.rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") models.commit()
def lock_many(cls, count, locked_by, machine_type, description=None, os_type=None, os_version=None, arch=None): update_dict = dict( locked=True, locked_by=locked_by, description=description, ) query = cls.query if '|' in machine_type: machine_types = machine_type.split('|') query = query.filter(Node.machine_type.in_(machine_types)) else: query = query.filter(Node.machine_type == machine_type) if os_type: query = query.filter(Node.os_type == os_type) if os_version: query = query.filter(Node.os_version == os_version) if arch: query = query.filter(Node.arch == arch) query = query.filter(Node.up.is_(True)) # Find unlocked nodes query = query.filter(Node.locked.is_(False)) query = query.limit(count) nodes = query.all() nodes_avail = len(nodes) if nodes_avail < count: raise ResourceUnavailableError( "only {count} nodes available".format(count=nodes_avail)) for node in nodes: node.update(update_dict) try: commit() except (sqlalchemy.exc.DBAPIError, sqlalchemy.exc.InvalidRequestError): rollback() raise RaceConditionError( "error locking nodes. please retry request.") return nodes
def run(self, args): super(DeleteCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() try: out("STARTING A TRANSACTION...") models.start() query = Run.query.filter(Run.name == args.name) run = query.one() out("Deleting run named %s" % run.name) run.delete() except: models.rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") models.commit()
def test_vm_host(self): vm_host_name = 'vm_host' vm_guest_names = ['vm_guest_1', 'vm_guest_2'] host_node = Node(name=vm_host_name) guest_nodes = [] for name in vm_guest_names: node = Node(name=name) node.vm_host = host_node guest_nodes.append(node) models.commit() query = Node.query.filter(Node.vm_host == host_node) assert query.count() == len(vm_guest_names) # Test that the backref 'vm_guests' works as well. I am intentionally # testing two things here. query = Node.query for guest in guest_nodes: query = query.filter(Node.vm_guests.contains(guest)) assert host_node == query.one()
def setup_class(cls): if TestModel.__db__ is None: TestModel.__db__ = 'paddles_test' # Bind and create the database tables pmodels.clear() db_engine = create_engine( cls.engine_url, encoding='utf-8', poolclass=NullPool) # AKA models.start() pmodels.Base.metadata.drop_all(db_engine) pmodels.Session.bind = db_engine pmodels.metadata.bind = pmodels.Session.bind pmodels.Base.metadata.create_all(db_engine) pmodels.commit() pmodels.clear()
def run(self, args): super(DedupeCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() try: out("STARTING A TRANSACTION...") models.start() query = Run.query.filter(Run.name.like(args.pattern)) names = [val[0] for val in query.values(Run.name)] out("Found {count} runs to process".format(count=len(names))) for name in names: self._fix_dupe_runs(name) self._fix_dupe_jobs(name) except: models.rollback() out("ROLLING BACK... ") raise else: out("COMMITING... ") models.commit()
def lock_many(cls, count, locked_by, machine_type, description=None, os_type=None, os_version=None, arch=None): update_dict = dict( locked=True, locked_by=locked_by, description=description, ) query = cls.query if '|' in machine_type: machine_types = machine_type.split('|') query = query.filter(Node.machine_type.in_(machine_types)) else: query = query.filter(Node.machine_type == machine_type) if os_type: query = query.filter(Node.os_type == os_type) if os_version: query = query.filter(Node.os_version == os_version) if arch: query = query.filter(Node.arch == arch) query = query.filter(Node.up.is_(True)) # Find unlocked nodes query = query.filter(Node.locked.is_(False)) query = query.limit(count) nodes = query.all() nodes_avail = len(nodes) if nodes_avail < count: raise ResourceUnavailableError( "only {count} nodes available".format(count=nodes_avail)) for node in nodes: node.update(update_dict) commit() return nodes
def run(self, args): super(SetStatusCommand, self).run(args) out("LOADING ENVIRONMENT") self.load_app() models.start() try: out("SETTING RUN STATUSES...") running = Run.query.filter(Run.status == 'running') to_fix = [] for run in running: if run.jobs.filter(Job.status == 'running').count() == 0: to_fix.append(run) self._set_run_status(run) print "" out("Updated {count} runs...".format(count=len(to_fix))) except: models.rollback() out("ROLLING BACK...") raise else: out("COMMITTING...") models.commit()
def test_status_dead_ignored_when_success_true(self): run_name = 'test_status_dead_ignored_when_success_set' run = Run(run_name) job_id = '27' job = Job(dict(name=run_name, job_id=job_id, status='running'), run) models.commit() job.update(dict(success=True)) models.commit() job.update(dict(status='dead')) models.commit() assert job.status == 'pass'
def teardown_method(self, meth): start() Job.query.delete() Run.query.delete() commit()
def test_basic_creation(self): new_run = Run('test_basic_creation') Job({}, new_run) models.commit() assert Job.get(1)
def test_job_updated(self): new_run = Run('test_job_updated') Job({}, new_run) models.commit() new_job = Job.get(1) assert isinstance(new_job.updated, datetime)
def teardown_method(self, meth): # After each test in this class, delete all the Nodes we created start() Node.query.delete() commit()
def test_relationship_works(self): new_run = Run('test_relationship_works') id_ = Job({}, new_run).id models.commit() new_job = Job.get(id_) assert new_job.run.name == 'test_relationship_works'