def test_assign_by_weight(self): jobtype_version = self.create_jobtype_version() high_queue = self.create_queue_with_job("heavyweight", jobtype_version) high_queue.weight = 60 mid_queue = self.create_queue_with_job("mediumweight", jobtype_version) mid_queue.weight = 30 low_queue = self.create_queue_with_job("lightweight", jobtype_version) low_queue.weight = 10 db.session.add_all([high_queue, mid_queue, low_queue]) db.session.commit() agents = [] for i in range(0, 100): agent = Agent(hostname="agent%s" % i, id=uuid.uuid4(), ram=32, free_ram=32, cpus=1, port=50000) db.session.add(agent) agents.append(agent) db.session.commit() for agent in agents: assign_tasks_to_agent(agent.id) self.assertGreaterEqual(high_queue.num_assigned_agents(), 59) self.assertLessEqual(high_queue.num_assigned_agents(), 61) self.assertGreaterEqual(mid_queue.num_assigned_agents(), 29) self.assertLessEqual(mid_queue.num_assigned_agents(), 31) self.assertGreaterEqual(low_queue.num_assigned_agents(), 9) self.assertLessEqual(low_queue.num_assigned_agents(), 11)
def test_basic_insert_nonunique(self): for (hostname, ip, port, cpus, ram, state, ram_allocation, cpu_allocation) in self.modelArguments(limit=1): modelA = Agent() modelA.hostname = hostname modelA.ip = ip modelA.port = port modelB = Agent() modelB.hostname = hostname modelB.ip = ip modelB.port = port db.session.add(modelA) db.session.add(modelB) with self.assertRaises(DatabaseError): db.session.commit() db.session.rollback()
def test_api_url_passive(self): model = Agent(hostname="foo", port=12345, remote_ip="10.56.0.1", ram=1024, free_ram=128, cpus=4, use_address=UseAgentAddress.PASSIVE) # Shouldn't have access to api_url if we're operating under PASSIVE with self.assertRaises(ValueError): model.api_url()
def test_api_url_hostname(self): model = Agent(hostname="foo", port=12345, remote_ip="10.56.0.1", ram=1024, free_ram=128, cpus=4, use_address=UseAgentAddress.HOSTNAME) self.assertEqual( model.api_url(), model.URL_TEMPLATE.format(host=model.hostname, port=model.port))
def models(self, limit=None): """ Iterates over the class level variables and produces an agent model. This is done so that we test endpoints in the extreme ranges. """ generator = self.modelArguments(limit=limit) for (hostname, ip, port, cpus, ram, state, ram_allocation, cpu_allocation) in generator: agent = Agent() agent.hostname = hostname agent.remote_ip = ip agent.port = port agent.cpus = cpus agent.free_ram = agent.ram = ram agent.state = state agent.ram_allocation = ram_allocation agent.cpu_allocation = cpu_allocation yield agent
def test_assign_by_weight_additional_queues(self): jobtype_version = self.create_jobtype_version() high_queue = self.create_queue_with_job("heavyweight", jobtype_version) high_queue.weight = 6 mid_queue = self.create_queue_with_job("mediumweight", jobtype_version) mid_queue.weight = 3 low_queue = self.create_queue_with_job("lightweight", jobtype_version) low_queue.weight = 1 db.session.add_all([high_queue, mid_queue, low_queue]) # The presence of additional queues with arbitrary weights should not # make any difference if they aren't drawing any agents additional_queue1 = JobQueue(name="additional1", weight=10) additional_queue2 = JobQueue(name="additional2", weight=10) additional_queue3 = JobQueue(name="additional3", weight=10) db.session.add_all( [additional_queue1, additional_queue2, additional_queue3]) db.session.commit() agents = [] for i in range(0, 100): agent = Agent(hostname="agent%s" % i, id=uuid.uuid4(), ram=32, free_ram=32, cpus=1, port=50000) db.session.add(agent) agents.append(agent) db.session.commit() for agent in agents: assign_tasks_to_agent(agent.id) self.assertGreaterEqual(high_queue.num_assigned_agents(), 59) self.assertLessEqual(high_queue.num_assigned_agents(), 61) self.assertGreaterEqual(mid_queue.num_assigned_agents(), 29) self.assertLessEqual(mid_queue.num_assigned_agents(), 31) self.assertGreaterEqual(low_queue.num_assigned_agents(), 9) self.assertLessEqual(low_queue.num_assigned_agents(), 11)
def post(self): """ A ``POST`` to this endpoint will either create or update an existing agent. The ``port`` and ``id`` columns will determine if an agent already exists. * If an agent is found matching the ``port`` and ``id`` columns from the request the existing model will be updated and the resulting data and the ``OK`` code will be returned. * If we don't find an agent matching the ``port`` and ``id`` however a new agent will be created and the resulting data and the ``CREATED`` code will be returned. .. note:: The ``remote_ip`` field is not required and should typically not be included in a request. When not provided ``remote_ip`` is be populated by the server based off of the ip of the incoming request. Providing ``remote_ip`` in your request however will override this behavior. .. http:post:: /api/v1/agents/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/agents/ HTTP/1.1 Accept: application/json { "cpu_allocation": 1.0, "cpus": 14, "free_ram": 133, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "remote_ip": "10.196.200.115", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": 8 } **Response (agent created)** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } **Response (existing agent updated)** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } :statuscode 201: a new agent was created :statuscode 200: an existing agent is updated with data from the request :statuscode 400: there was something wrong with the request (such as invalid columns being included) """ # Read in and convert the id field try: g.json["id"] = uuid.UUID(g.json["id"]) except KeyError: return jsonify(error="`id` not provided"), BAD_REQUEST # If the remote user agent is not "PyFarm/1.0 (agent)", this is not an # announce by the agent itself. It could be some other client editing # the agent. if request.headers.get("User-Agent", "") == "PyFarm/1.0 (agent)": # Set remote_ip if it did not come in with the request g.json.setdefault("remote_ip", request.remote_addr) farm_name = g.json.pop("farm_name", None) if farm_name and farm_name != OUR_FARM_NAME: return jsonify(error="Wrong farm name"), BAD_REQUEST current_assignments = g.json.pop("current_assignments", None) mac_addresses = g.json.pop("mac_addresses", None) # TODO return BAD_REQUEST on bad mac addresses if mac_addresses is not None: mac_addresses = [ x.lower() for x in mac_addresses if MAC_RE.match(x) ] gpus = g.json.pop("gpus", None) disks = g.json.pop("disks", None) state = g.json.pop("state", None) agent = Agent.query.filter_by(port=g.json["port"], id=g.json["id"]).first() if agent is None: try: agent = Agent(**g.json) # There may be something wrong with one of the fields # that's causing our sqlalchemy model raise a ValueError. except ValueError as e: return jsonify(error=str(e)), BAD_REQUEST default_versions = SoftwareVersion.query.filter_by(default=True) for version in default_versions: agent.software_versions.append(version) if mac_addresses is not None: for address in mac_addresses: mac_address = AgentMacAddress(agent=agent, mac_address=address) db.session.add(mac_address) if gpus is not None: for gpu_name in gpus: gpu = GPU.query.filter_by(fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None: agent.state = state db.session.add(agent) try: db.session.commit() except Exception as e: e = e.args[0].lower() error = "Unhandled error: %s. This is often an issue " \ "with the agent's data for `ip`, `hostname` and/or " \ "`port` not being unique enough. In other cases " \ "this can sometimes happen if the underlying " \ "database driver is either non-compliant with " \ "expectations or we've encountered a database error " \ "that we don't know how to handle yet. If the " \ "latter is the case, please report this as a bug." % e return jsonify(error=error), INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Created agent %r: %r", agent.id, agent_data) assign_tasks.delay() return jsonify(agent_data), CREATED else: updated = False for key in g.json.copy(): value = g.json.pop(key) if not hasattr(agent, key): return jsonify( error="Agent has no such column `%s`" % key), \ BAD_REQUEST if getattr(agent, key) != value: try: setattr(agent, key, value) except Exception as e: return jsonify( error="Error while setting `%s`: %s" % (key, e)), \ BAD_REQUEST else: updated = True if mac_addresses is not None: updated = True for existing_address in agent.mac_addresses: if existing_address.mac_address.lower( ) not in mac_addresses: logger.debug( "Existing address %s is not in supplied " "mac addresses, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.mac_addresses.remove(existing_address) else: mac_addresses.remove( existing_address.mac_address.lower()) for new_address in mac_addresses: mac_address = AgentMacAddress(agent=agent, mac_address=new_address) db.session.add(mac_address) if gpus is not None: updated = True for existing_gpu in agent.gpus: if existing_gpu.fullname not in gpus: logger.debug( "Existing gpu %s is not in supplied " "gpus, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.gpus.remove(existing_gpu) else: gpus.remove(existing_gpu.fullname) for gpu_name in gpus: gpu = GPU.query.filter_by(fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for old_disk in agent.disks: db.session.delete(old_disk) for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None and agent.state != _AgentState.DISABLED: agent.state = state # TODO Only do that if this is really the agent speaking to us. failed_tasks = [] if (current_assignments is not None and agent.state != AgentState.OFFLINE): fail_missing_assignments(agent, current_assignments) if updated or failed_tasks: agent.last_heard_from = datetime.utcnow() db.session.add(agent) try: db.session.commit() except Exception as e: return jsonify(error="Unhandled error: %s" % e), \ INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Updated agent %r: %r", agent.id, agent_data) for task in failed_tasks: task.job.update_state() db.session.commit() assign_tasks.delay() return jsonify(agent_data), OK