def test_basic_deletion(self): new_node = Node('test_basic_deletion') models.commit() new_node.delete() models.commit() query = Node.query.filter(Node.name == 'test_basic_deletion') assert not query.count()
def test_double_lock(self): node_name = 'goldfish' user = '******' node = Node(name=node_name) node.update(dict(locked=True, locked_by=user)) with pytest.raises(ForbiddenRequestError): node.update(dict(locked=True, locked_by=user))
def index_post(self): """ Create a new node """ try: data = request.json name = data.get('name') except ValueError: rollback() error('/errors/invalid/', 'could not decode JSON body') # we allow empty data to be pushed if not name: error('/errors/invalid/', "could not find required key: 'name'") if Node.filter_by(name=name).first(): error('/errors/invalid/', "Node with name %s already exists" % name) else: self.node = Node(name=name) try: self.node.update(data) except PaddlesError as exc: error(exc.url, str(exc)) log.info("Created {node}: {data}".format( node=self.node, data=data, )) return dict()
def test_locked_since_locked(self): node_name = 'cats' user = '******' node = Node(name=node_name) node.update(dict(locked=True, locked_by=user)) # This used to take <100us; since we started flushing on node updates, # it takes around 2-3ms. assert (datetime.utcnow() - node.locked_since) < timedelta(milliseconds=5)
def test_job_creates_node(self): run_name = 'test_job_creates_node' node_name = 'node.name' targets = {'foo@' + node_name: ''} new_run = Run(run_name) Job(dict(targets=targets), new_run) assert Node.get(1).name == node_name
def test_init(self): name = 'test_init' mtype = 'vps' Node(name=name, machine_type=mtype) models.commit() query = Node.query.filter(Node.name == name)\ .filter(Node.machine_type == mtype) assert query.one()
def lock_many_post(self): req = request.json fields = set(("count", "locked_by", "machine_type", "description")) if not fields.issubset(set(req.keys())): error("/errors/invalid/", "must pass these fields: %s" % ", ".join(fields)) req["locked"] = True count = req.pop("count", 0) if count < 1: error("/errors/invalid/", "cannot lock less than 1 node") machine_type = req.pop("machine_type", None) if not machine_type: error("/errors/invalid/", "must specify machine_type") locked_by = req.get("locked_by") description = req.get("description") os_type = req.get("os_type") os_version = req.get("os_version") arch = req.get("arch") if os_version is not None: os_version = str(os_version) attempts = 2 log.debug( "Locking {count} {mtype} nodes for {locked_by}".format(count=count, mtype=machine_type, locked_by=locked_by) ) while attempts > 0: try: result = Node.lock_many( count=count, locked_by=locked_by, machine_type=machine_type, description=description, os_type=os_type, os_version=os_version, arch=arch, ) if description: desc_str = " with description %s" % description else: desc_str = "" log.info( "Locked {names} for {locked_by}{desc_str}".format( names=" ".join([str(node) for node in result]), locked_by=locked_by, desc_str=desc_str ) ) return result except RaceConditionError as exc: log.warn("lock_many() detected race condition") attempts -= 1 if attempts > 0: log.info("retrying after race avoidance (%s tries left)", attempts) else: error(exc.url, str(exc)) except PaddlesError as exc: error(exc.url, str(exc))
def lock_many_post(self): req = request.json fields = set(('count', 'locked_by', 'machine_type', 'description')) if not fields.issubset(set(req.keys())): error('/errors/invalid/', "must pass these fields: %s" % ', '.join(fields)) req['locked'] = True count = req.pop('count', 0) if count < 1: error('/errors/invalid/', "cannot lock less than 1 node") machine_type = req.pop('machine_type', None) if not machine_type: error('/errors/invalid/', "must specify machine_type") locked_by = req.get('locked_by') description = req.get('description') os_type = req.get('os_type') os_version = req.get('os_version') arch = req.get('arch') if os_version is not None: os_version = str(os_version) attempts = 2 log.debug("Locking {count} {mtype} nodes for {locked_by}".format( count=count, mtype=machine_type, locked_by=locked_by)) while attempts > 0: try: result = Node.lock_many(count=count, locked_by=locked_by, machine_type=machine_type, description=description, os_type=os_type, os_version=os_version, arch=arch) if description: desc_str = " with description %s" % description else: desc_str = "" log.info("Locked {names} for {locked_by}{desc_str}".format( names=" ".join([str(node) for node in result]), locked_by=locked_by, desc_str=desc_str, )) return result except RaceConditionError as exc: log.warn("lock_many() detected race condition") attempts -= 1 if attempts > 0: log.info("retrying after race avoidance (%s tries left)", attempts) else: error(exc.url, str(exc)) except PaddlesError as exc: error(exc.url, str(exc))
def test_job_adds_node(self): run_name = 'test_job_adds_node' node_name = 'added_node' assert Node.query.filter(Node.name == node_name).all() == [] node = Node(name=node_name) targets = {'foo@' + node_name: ''} new_run = Run(run_name) job = Job(dict(targets=targets), new_run) assert Node.query.filter(Node.name == node_name).one() assert Job.query.filter(Job.target_nodes.contains(node)).one() == job
def test_vm_host(self): vm_host_name = 'vm_host' vm_guest_names = ['vm_guest_1', 'vm_guest_2'] host_node = Node(name=vm_host_name) guest_nodes = [] for name in vm_guest_names: node = Node(name=name) node.vm_host = host_node guest_nodes.append(node) models.commit() query = Node.query.filter(Node.vm_host == host_node) assert query.count() == len(vm_guest_names) # Test that the backref 'vm_guests' works as well. I am intentionally # testing two things here. query = Node.query for guest in guest_nodes: query = query.filter(Node.vm_guests.contains(guest)) assert host_node == query.one()
def test_locked_since_unlocked(self): node_name = 'cats' user = '******' old_locked_since = datetime(2000, 1, 1, 0, 0) node = Node(name=node_name) node.update(dict(locked=True, locked_by=user)) node.locked_since = old_locked_since node.update(dict(locked=False, locked_by=user)) assert node.locked_since is None
def _populate(self, job): #print "Job: %s/%s" % (job.name, job.job_id) if not job.targets: return for key in job.targets.keys(): name = key.split('@')[1] mtype = self.parse_machine_type(name) node_q = Node.query.filter(Node.name == name) #print " node: exists={count}, name={name}".format( # count=node_q.count(), # name=name, #) if node_q.count() == 0: #print " Creating Node with name: %s" % name node = Node(name=name) else: node = node_q.one() if mtype: node.machine_type = mtype if node not in job.target_nodes: job.target_nodes.append(node)
def test_basic_creation(self): Node(name='new_node') models.commit() assert Node.get(1).name == 'new_node'
class NodesController(object): @expose(generic=True, template='json') def index(self, locked=None, machine_type='', os_type=None, os_version=None, locked_by=None, up=None, count=None): query = Node.query if locked is not None: query = query.filter(Node.locked == locked) if machine_type: if '|' in machine_type: machine_types = machine_type.split('|') query = query.filter(Node.machine_type.in_(machine_types)) else: query = query.filter(Node.machine_type == machine_type) if os_type: query = query.filter(Node.os_type == os_type) if os_version: query = query.filter(Node.os_version == os_version) if locked_by: query = query.filter(Node.locked_by == locked_by) if up is not None: query = query.filter(Node.up == up) if count is not None: if not count.isdigit() or isinstance(count, int): error('/errors/invalid/', 'count must be an integer') query = query.limit(count) return [node.__json__() for node in query.all()] @index.when(method='POST', template='json') def index_post(self): """ Create a new node """ try: data = request.json name = data.get('name') except ValueError: rollback() error('/errors/invalid/', 'could not decode JSON body') # we allow empty data to be pushed if not name: error('/errors/invalid/', "could not find required key: 'name'") if Node.filter_by(name=name).first(): error('/errors/invalid/', "Node with name %s already exists" % name) else: self.node = Node(name=name) try: self.node.update(data) except PaddlesError as exc: error(exc.url, str(exc)) log.info("Created {node}: {data}".format( node=self.node, data=data, )) return dict() @expose(generic=True, template='json') def lock_many(self): error('/errors/invalid/', "this URI only supports POST requests") @isolation_level('SERIALIZABLE') @lock_many.when(method='POST', template='json') def lock_many_post(self): req = request.json fields = set(('count', 'locked_by', 'machine_type', 'description')) if not fields.issubset(set(req.keys())): error('/errors/invalid/', "must pass these fields: %s" % ', '.join(fields)) req['locked'] = True count = req.pop('count', 0) if count < 1: error('/errors/invalid/', "cannot lock less than 1 node") machine_type = req.pop('machine_type', None) if not machine_type: error('/errors/invalid/', "must specify machine_type") locked_by = req.get('locked_by') description = req.get('description') os_type = req.get('os_type') os_version = req.get('os_version') arch = req.get('arch') if os_version is not None: os_version = str(os_version) attempts = 2 log.debug("Locking {count} {mtype} nodes for {locked_by}".format( count=count, mtype=machine_type, locked_by=locked_by)) while attempts > 0: try: result = Node.lock_many(count=count, locked_by=locked_by, machine_type=machine_type, description=description, os_type=os_type, os_version=os_version, arch=arch) if description: desc_str = " with description %s" % description else: desc_str = "" log.info("Locked {names} for {locked_by}{desc_str}".format( names=" ".join([str(node) for node in result]), locked_by=locked_by, desc_str=desc_str, )) return result except RaceConditionError as exc: log.warn("lock_many() detected race condition") attempts -= 1 if attempts > 0: log.info("retrying after race avoidance (%s tries left)", attempts) else: error(exc.url, str(exc)) except PaddlesError as exc: error(exc.url, str(exc)) @expose(generic=True, template='json') def unlock_many(self): error('/errors/invalid/', "this URI only supports POST requests") @unlock_many.when(method='POST', template='json') def unlock_many_post(self): req = request.json fields = ['names', 'locked_by'] if sorted(req.keys()) != sorted(fields): error('/errors/invalid/', "must pass these fields: %s" % ', '.join(fields)) locked_by = req.get('locked_by') names = req.get('names') if not isinstance(names, list): error('/errors/invalid/', "'names' must be a list; got: %s" % str(type(names))) base_query = Node.query query = base_query.filter(Node.name.in_(names)) if query.count() != len(names): error('/errors/invalid/', "Could not find all nodes!") log.info("Unlocking {count} nodes for {locked_by}".format( count=len(names), locked_by=locked_by)) result = [] for node in query.all(): result.append( NodeController._lock(node, dict(locked=False, locked_by=locked_by), 'unlock') ) return result @expose('json') def job_stats(self, machine_type='', since_days=14): since_days = int(since_days) if since_days < 1: error('/errors/invalid/', "since_days must be a positive integer") now = datetime.utcnow() past = now - timedelta(days=since_days) recent_jobs = Job.query.filter(Job.posted.between(past, now)).subquery() RecentJob = aliased(Job, recent_jobs) query = Session.query(Node.name, RecentJob.status, func.count('*')) if machine_type: # Note: filtering by Job.machine_type (as below) greatly improves # performance but could lead slightly incorrect values if many jobs # are being scheduled using mixed machine types. We work around # this by including the 'multi' machine type (which is the name of # the queue Inktank uses for such jobs. query = query.filter(RecentJob.machine_type.in_((machine_type, 'multi'))) query = query.filter(Node.machine_type == machine_type) query = query.join(RecentJob.target_nodes).group_by(Node)\ .group_by(RecentJob.status) all_stats = {} results = query.all() for (name, status, count) in results: node_stats = all_stats.get(name, {}) node_stats[status] = count all_stats[name] = node_stats stats_sorter = lambda t: sum(t[1].values()) ordered_stats = OrderedDict(sorted(all_stats.items(), key=stats_sorter)) return ordered_stats @expose('json') def machine_types(self): query = Node.query.values(Node.machine_type) return sorted(list(set([item[0] for item in query if item[0]]))) @expose('json') def _lookup(self, name, *remainder): return NodeController(name), remainder
class NodesController(object): @expose(generic=True, template='json') def index(self, locked=None, machine_type='', os_type=None, os_version=None, locked_by=None, up=None, count=None): query = Node.query if locked is not None: query = query.filter(Node.locked == locked) if machine_type: if '|' in machine_type: machine_types = machine_type.split('|') query = query.filter(Node.machine_type.in_(machine_types)) else: query = query.filter(Node.machine_type == machine_type) if os_type: query = query.filter(Node.os_type == os_type) if os_version: query = query.filter(Node.os_version == os_version) if locked_by: query = query.filter(Node.locked_by == locked_by) if up is not None: query = query.filter(Node.up == up) if count is not None: if not count.isdigit() or isinstance(count, int): error('/errors/invalid/', 'count must be an integer') query = query.limit(count) return [node.__json__() for node in query.all()] @index.when(method='POST', template='json') def index_post(self): """ Create a new node """ try: data = request.json name = data.get('name') except ValueError: rollback() error('/errors/invalid/', 'could not decode JSON body') # we allow empty data to be pushed if not name: error('/errors/invalid/', "could not find required key: 'name'") if Node.filter_by(name=name).first(): error('/errors/invalid/', "Node with name %s already exists" % name) else: self.node = Node(name=name) try: self.node.update(data) except PaddlesError as exc: error(exc.url, str(exc)) log.info("Created {node}: {data}".format( node=self.node, data=data, )) return dict() @expose(generic=True, template='json') def lock_many(self): error('/errors/invalid/', "this URI only supports POST requests") @isolation_level('SERIALIZABLE') @lock_many.when(method='POST', template='json') def lock_many_post(self): req = request.json fields = set(('count', 'locked_by', 'machine_type', 'description')) if not fields.issubset(set(req.keys())): error('/errors/invalid/', "must pass these fields: %s" % ', '.join(fields)) req['locked'] = True count = req.pop('count', 0) if count < 1: error('/errors/invalid/', "cannot lock less than 1 node") machine_type = req.pop('machine_type', None) if not machine_type: error('/errors/invalid/', "must specify machine_type") locked_by = req.get('locked_by') description = req.get('description') os_type = req.get('os_type') os_version = req.get('os_version') arch = req.get('arch') if os_version is not None: os_version = str(os_version) attempts = 2 log.debug("Locking {count} {mtype} nodes for {locked_by}".format( count=count, mtype=machine_type, locked_by=locked_by)) while attempts > 0: try: result = Node.lock_many(count=count, locked_by=locked_by, machine_type=machine_type, description=description, os_type=os_type, os_version=os_version, arch=arch) if description: desc_str = " with description %s" % description else: desc_str = "" log.info("Locked {names} for {locked_by}{desc_str}".format( names=" ".join([str(node) for node in result]), locked_by=locked_by, desc_str=desc_str, )) return result except RaceConditionError as exc: log.warn("lock_many() detected race condition") attempts -= 1 if attempts > 0: log.info("retrying after race avoidance (%s tries left)", attempts) else: error(exc.url, str(exc)) except PaddlesError as exc: error(exc.url, str(exc)) @expose(generic=True, template='json') def unlock_many(self): error('/errors/invalid/', "this URI only supports POST requests") @unlock_many.when(method='POST', template='json') def unlock_many_post(self): req = request.json fields = ['names', 'locked_by'] if sorted(req.keys()) != sorted(fields): error('/errors/invalid/', "must pass these fields: %s" % ', '.join(fields)) locked_by = req.get('locked_by') names = req.get('names') if not isinstance(names, list): error('/errors/invalid/', "'names' must be a list; got: %s" % str(type(names))) base_query = Node.query query = base_query.filter(Node.name.in_(names)) if query.count() != len(names): error('/errors/invalid/', "Could not find all nodes!") log.info("Unlocking {count} nodes for {locked_by}".format( count=len(names), locked_by=locked_by)) result = [] for node in query.all(): result.append( NodeController._lock(node, dict(locked=False, locked_by=locked_by), 'unlock')) return result @expose('json') def job_stats(self, machine_type='', since_days=14): since_days = int(since_days) if since_days < 1: error('/errors/invalid/', "since_days must be a positive integer") now = datetime.utcnow() past = now - timedelta(days=since_days) recent_jobs = Job.query.filter(Job.posted.between(past, now)).subquery() RecentJob = aliased(Job, recent_jobs) query = Session.query(Node.name, RecentJob.status, func.count('*')) if machine_type: # Note: filtering by Job.machine_type (as below) greatly improves # performance but could lead slightly incorrect values if many jobs # are being scheduled using mixed machine types. We work around # this by including the 'multi' machine type (which is the name of # the queue Inktank uses for such jobs. query = query.filter( RecentJob.machine_type.in_((machine_type, 'multi'))) query = query.filter(Node.machine_type == machine_type) query = query.join(RecentJob.target_nodes).group_by(Node)\ .group_by(RecentJob.status) all_stats = {} results = query.all() for (name, status, count) in results: node_stats = all_stats.get(name, {}) node_stats[status] = count all_stats[name] = node_stats stats_sorter = lambda t: sum(t[1].values()) ordered_stats = OrderedDict(sorted(all_stats.items(), key=stats_sorter)) return ordered_stats @expose('json') def machine_types(self): query = Node.query.values(Node.machine_type) return sorted(list(set([item[0] for item in query if item[0]]))) @expose('json') def _lookup(self, name, *remainder): return NodeController(name), remainder
def update_node(self, node_json): name = node_json['name'].split('@')[1] query = Node.query.filter(Node.name == name) if query.count(): node = query.one() verb = "Updated" else: node = Node(name) verb = "Created" vm_host_name = node_json.get('vpshost', '') is_vm = vm_host_name not in (None, '') if is_vm: self.vm_hosts[name] = node_json['vpshost'] locked_since_local = datetime.strptime(node_json['locked_since'], '%Y-%m-%dT%H:%M:%S') locked_since = local_datetime_to_utc(locked_since_local) node.machine_type = node_json.get('type') node.arch = node_json.get('arch') node.distro = node_json.get('distro') node.up = bool(node_json.get('up', 0)) node.is_vm = is_vm node.mac_address = node_json.get('mac').lower() node.ssh_pub_key = node_json.get('sshpubkey') node.locked = node_json.get('locked') == 1 node.locked_by = node_json.get('locked_by') node.locked_since = locked_since node.description = node_json.get('description') return verb
def test_invalid(self): name = 'test_invalid' Node(name=name, is_vm='invalid') with pytest.raises(StatementError): models.commit()
def test_locked_since_locked(self): node_name = 'cats' user = '******' node = Node(name=node_name) node.update(dict(locked=True, locked_by=user)) assert (datetime.utcnow() - node.locked_since) < timedelta(0, 0, 100)