def delete(self, agent_id): """ Delete a single agent .. http:delete:: /api/v1/agents/(uuid:agent_id) HTTP/1.1 **Request (agent exists)** .. sourcecode:: http DELETE /api/v1/agents/b25ee7eb-9586-439a-b131-f5d022e0d403 HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 204 NO CONTENT Content-Type: application/json :statuscode 204: the agent was deleted or did not exist """ agent = Agent.query.filter_by(id=agent_id).first() if agent is None: return jsonify(None), NO_CONTENT else: db.session.delete(agent) db.session.commit() assign_tasks.delay() return jsonify(None), NO_CONTENT
def pause_multiple_jobs(): job_ids = request.form.getlist("job_id") task_ids_to_stop = [] for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) for task in job.tasks: if task.state == WorkState.RUNNING: task_ids_to_stop.append(task.id) job.state = WorkState.PAUSED db.session.add(job) db.session.commit() for task_id in task_ids_to_stop: stop_task.delay(task_id) assign_tasks.delay() flash("Selected jobs will be paused.") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def pause_multiple_jobs(): job_ids = request.form.getlist("job_id") task_ids_to_stop = [] for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) for task in job.tasks: if task.state == WorkState.RUNNING: task_ids_to_stop.append(task.id) job.state = WorkState.PAUSED db.session.add(job) db.session.commit() for task_id in task_ids_to_stop: stop_task.delay(task_id) assign_tasks.delay() flash("Selected jobs will be paused.") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def rerun_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.rerun() db.session.commit() assign_tasks.delay() logger.info("Job %s (job id: %s) is being rerun by request from %s", job.title, job.id, request.remote_addr) flash("Job %s will be run again." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def unpause_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = None job.update_state() db.session.add(job) db.session.commit() assign_tasks.delay() flash("Job %s is unpaused." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def alter_frames_in_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) start = Decimal(request.form['start']) end = Decimal(request.form['end']) by = Decimal(request.form['by']) try: job.alter_frame_range(start, end, by) except ValueError as e: return (render_template("pyfarm/error.html", error=e), BAD_REQUEST) db.session.commit() assign_tasks.delay() flash("Frame selection for job %s has been changed." % job.title) return redirect(url_for("single_job_ui", job_id=job.id), SEE_OTHER)
def rerun_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.rerun() db.session.commit() assign_tasks.delay() logger.info("Job %s (job id: %s) is being rerun by request from %s", job.title, job.id, request.remote_addr) flash("Job %s will be run again." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def unpause_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = None job.update_state() db.session.add(job) db.session.commit() assign_tasks.delay() flash("Job %s is unpaused." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def pause_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = WorkState.PAUSED db.session.add(job) db.session.commit() for task in job.tasks: if task.state == WorkState.RUNNING: stop_task.delay(task.id) assign_tasks.delay() flash("Job %s will be paused." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def alter_frames_in_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) start = Decimal(request.form['start']) end = Decimal(request.form['end']) by = Decimal(request.form['by']) try: job.alter_frame_range(start, end, by) except ValueError as e: return (render_template( "pyfarm/error.html", error=e), BAD_REQUEST) db.session.commit() assign_tasks.delay() flash("Frame selection for job %s has been changed." % job.title) return redirect(url_for("single_job_ui", job_id=job.id), SEE_OTHER)
def rerun_single_task(job_id, task_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) task = Task.query.filter_by(id=task_id, job=job).first() if not task: return (render_template( "pyfarm/error.html", error="Task %s not found" % task_id), NOT_FOUND) if task.state == WorkState.RUNNING: return (render_template( "pyfarm/error.html", error="Cannot rerun task while it is " "still running"), BAD_REQUEST) task.state = None task.agent = None task.failures = 0 if Job.state != WorkState.RUNNING: job.state = None job.completion_notify_sent = False db.session.add(job) db.session.add(task) db.session.commit() assign_tasks.delay() flash("Task %s (frame %s) in job %s will be run again." % (task.id, task.frame, job.title)) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("single_job_ui", job_id=job.id), SEE_OTHER)
def pause_single_job(job_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = WorkState.PAUSED db.session.add(job) db.session.commit() for task in job.tasks: if task.state == WorkState.RUNNING: stop_task.delay(task.id) assign_tasks.delay() flash("Job %s will be paused." % job.title) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def rerun_single_task(job_id, task_id): job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) task = Task.query.filter_by(id=task_id, job=job).first() if not task: return (render_template("pyfarm/error.html", error="Task %s not found" % task_id), NOT_FOUND) if task.state == WorkState.RUNNING: return (render_template("pyfarm/error.html", error="Cannot rerun task while it is " "still running"), BAD_REQUEST) task.state = None task.agent = None task.failures = 0 if Job.state != WorkState.RUNNING: job.state = None job.completion_notify_sent = False db.session.add(job) db.session.add(task) db.session.commit() assign_tasks.delay() flash("Task %s (frame %s) in job %s will be run again." % (task.id, task.frame, job.title)) if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("single_job_ui", job_id=job.id), SEE_OTHER)
def unpause_multiple_jobs(): job_ids = request.form.getlist("job_id") for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = None job.update_state() db.session.add(job) db.session.commit() assign_tasks.delay() flash("Selected jobs are unpaused") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def rerun_failed_in_multiple_jobs(): job_ids = request.form.getlist("job_id") for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template( "pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) logger.info("Failed tasks from job %s (job id: %s) are being rerun by " "request from %s", job.title, job.id, request.remote_addr) job.rerun_failed() db.session.commit() assign_tasks.delay() flash("Failed tasks in selected jobs will be run again.") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def unpause_multiple_jobs(): job_ids = request.form.getlist("job_id") for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) job.state = None job.update_state() db.session.add(job) db.session.commit() assign_tasks.delay() flash("Selected jobs are unpaused") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def rerun_failed_in_multiple_jobs(): job_ids = request.form.getlist("job_id") for job_id in job_ids: job = Job.query.filter_by(id=job_id).first() if not job: return (render_template("pyfarm/error.html", error="Job %s not found" % job_id), NOT_FOUND) logger.info( "Failed tasks from job %s (job id: %s) are being rerun by " "request from %s", job.title, job.id, request.remote_addr) job.rerun_failed() db.session.commit() assign_tasks.delay() flash("Failed tasks in selected jobs will be run again.") if "next" in request.args: return redirect(request.args.get("next"), SEE_OTHER) else: return redirect(url_for("jobs_index_ui"), SEE_OTHER)
def post(self): """ A ``POST`` to this endpoint will either create or update an existing agent. The ``port`` and ``id`` columns will determine if an agent already exists. * If an agent is found matching the ``port`` and ``id`` columns from the request the existing model will be updated and the resulting data and the ``OK`` code will be returned. * If we don't find an agent matching the ``port`` and ``id`` however a new agent will be created and the resulting data and the ``CREATED`` code will be returned. .. note:: The ``remote_ip`` field is not required and should typically not be included in a request. When not provided ``remote_ip`` is be populated by the server based off of the ip of the incoming request. Providing ``remote_ip`` in your request however will override this behavior. .. http:post:: /api/v1/agents/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/agents/ HTTP/1.1 Accept: application/json { "cpu_allocation": 1.0, "cpus": 14, "free_ram": 133, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "remote_ip": "10.196.200.115", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": 8 } **Response (agent created)** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } **Response (existing agent updated)** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } :statuscode 201: a new agent was created :statuscode 200: an existing agent is updated with data from the request :statuscode 400: there was something wrong with the request (such as invalid columns being included) """ # Read in and convert the id field try: g.json["id"] = uuid.UUID(g.json["id"]) except KeyError: return jsonify(error="`id` not provided"), BAD_REQUEST # If the remote user agent is not "PyFarm/1.0 (agent)", this is not an # announce by the agent itself. It could be some other client editing # the agent. if request.headers.get("User-Agent", "") == "PyFarm/1.0 (agent)": # Set remote_ip if it did not come in with the request g.json.setdefault("remote_ip", request.remote_addr) farm_name = g.json.pop("farm_name", None) if farm_name and farm_name != OUR_FARM_NAME: return jsonify(error="Wrong farm name"), BAD_REQUEST current_assignments = g.json.pop("current_assignments", None) mac_addresses = g.json.pop("mac_addresses", None) # TODO return BAD_REQUEST on bad mac addresses if mac_addresses is not None: mac_addresses = [ x.lower() for x in mac_addresses if MAC_RE.match(x) ] gpus = g.json.pop("gpus", None) disks = g.json.pop("disks", None) state = g.json.pop("state", None) agent = Agent.query.filter_by(port=g.json["port"], id=g.json["id"]).first() if agent is None: try: agent = Agent(**g.json) # There may be something wrong with one of the fields # that's causing our sqlalchemy model raise a ValueError. except ValueError as e: return jsonify(error=str(e)), BAD_REQUEST default_versions = SoftwareVersion.query.filter_by(default=True) for version in default_versions: agent.software_versions.append(version) if mac_addresses is not None: for address in mac_addresses: mac_address = AgentMacAddress(agent=agent, mac_address=address) db.session.add(mac_address) if gpus is not None: for gpu_name in gpus: gpu = GPU.query.filter_by(fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None: agent.state = state db.session.add(agent) try: db.session.commit() except Exception as e: e = e.args[0].lower() error = "Unhandled error: %s. This is often an issue " \ "with the agent's data for `ip`, `hostname` and/or " \ "`port` not being unique enough. In other cases " \ "this can sometimes happen if the underlying " \ "database driver is either non-compliant with " \ "expectations or we've encountered a database error " \ "that we don't know how to handle yet. If the " \ "latter is the case, please report this as a bug." % e return jsonify(error=error), INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Created agent %r: %r", agent.id, agent_data) assign_tasks.delay() return jsonify(agent_data), CREATED else: updated = False for key in g.json.copy(): value = g.json.pop(key) if not hasattr(agent, key): return jsonify( error="Agent has no such column `%s`" % key), \ BAD_REQUEST if getattr(agent, key) != value: try: setattr(agent, key, value) except Exception as e: return jsonify( error="Error while setting `%s`: %s" % (key, e)), \ BAD_REQUEST else: updated = True if mac_addresses is not None: updated = True for existing_address in agent.mac_addresses: if existing_address.mac_address.lower( ) not in mac_addresses: logger.debug( "Existing address %s is not in supplied " "mac addresses, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.mac_addresses.remove(existing_address) else: mac_addresses.remove( existing_address.mac_address.lower()) for new_address in mac_addresses: mac_address = AgentMacAddress(agent=agent, mac_address=new_address) db.session.add(mac_address) if gpus is not None: updated = True for existing_gpu in agent.gpus: if existing_gpu.fullname not in gpus: logger.debug( "Existing gpu %s is not in supplied " "gpus, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.gpus.remove(existing_gpu) else: gpus.remove(existing_gpu.fullname) for gpu_name in gpus: gpu = GPU.query.filter_by(fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for old_disk in agent.disks: db.session.delete(old_disk) for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None and agent.state != _AgentState.DISABLED: agent.state = state # TODO Only do that if this is really the agent speaking to us. failed_tasks = [] if (current_assignments is not None and agent.state != AgentState.OFFLINE): fail_missing_assignments(agent, current_assignments) if updated or failed_tasks: agent.last_heard_from = datetime.utcnow() db.session.add(agent) try: db.session.commit() except Exception as e: return jsonify(error="Unhandled error: %s" % e), \ INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Updated agent %r: %r", agent.id, agent_data) for task in failed_tasks: task.job.update_state() db.session.commit() assign_tasks.delay() return jsonify(agent_data), OK
def post(self, agent_id): """ A ``POST`` to this endpoint will assign am existing task to the agent. .. http:post:: /api/v1/agents/<str:agent_id>/tasks/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/agents/238d7334-8ca5-4469-9f54-e76c66614a43/tasks/ HTTP/1.1 Accept: application/json { "id": 2 } **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "agent_id": 1, "parents": [], "attempts": 2, "children": [], "job": { "title": "Test Job", "id": 1 }, "project_id": null, "agent": { "ip": null, "hostname": "agent1", "port": 50000, "id": "238d7334-8ca5-4469-9f54-e76c66614a43" }, "hidden": false, "job_id": 1, "time_submitted": "2014-03-06T15:40:58.338904", "frame": 2.0, "priority": 0, "state": "assign", "time_finished": null, "id": 2, "project": null, "time_started": null } :statuscode 200: no error :statuscode 404: agent not found """ if "id" not in g.json: return jsonify(error="No id given for task"), BAD_REQUEST if len(g.json) > 1: return jsonify(error="Unknown keys in request"), BAD_REQUEST agent = Agent.query.filter_by(id=agent_id).first() if agent is None: return jsonify(error="Agent %r not found" % agent_id), NOT_FOUND task = Task.query.filter_by(id=g.json["id"]).first() if not task: return jsonify(error="Task not found"), NOT_FOUND task.agent = agent db.session.add(task) db.session.commit() logger.info("Assigned task %s (frame %s, job %s) to agent %s (%s)", task.id, task.frame, task.job.title, agent.id, agent.hostname) assign_tasks.delay() return jsonify(task.to_dict()), OK
def post(self): """ A ``POST`` to this endpoint will either create or update an existing agent. The ``port`` and ``id`` columns will determine if an agent already exists. * If an agent is found matching the ``port`` and ``id`` columns from the request the existing model will be updated and the resulting data and the ``OK`` code will be returned. * If we don't find an agent matching the ``port`` and ``id`` however a new agent will be created and the resulting data and the ``CREATED`` code will be returned. .. note:: The ``remote_ip`` field is not required and should typically not be included in a request. When not provided ``remote_ip`` is be populated by the server based off of the ip of the incoming request. Providing ``remote_ip`` in your request however will override this behavior. .. http:post:: /api/v1/agents/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/agents/ HTTP/1.1 Accept: application/json { "cpu_allocation": 1.0, "cpus": 14, "free_ram": 133, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "remote_ip": "10.196.200.115", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": 8 } **Response (agent created)** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } **Response (existing agent updated)** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "cpu_allocation": 1.0, "cpus": 14, "use_address": "remote", "free_ram": 133, "time_offset": 0, "hostname": "agent1", "id": "6a0c11df-660f-4c1e-9fb4-5fe2b8cd2437", "port": 64994, "ram": 2157, "ram_allocation": 0.8, "state": "online", "remote_ip": "10.196.200.115" } :statuscode 201: a new agent was created :statuscode 200: an existing agent is updated with data from the request :statuscode 400: there was something wrong with the request (such as invalid columns being included) """ # Read in and convert the id field try: g.json["id"] = uuid.UUID(g.json["id"]) except KeyError: return jsonify(error="`id` not provided"), BAD_REQUEST # If the remote user agent is not "PyFarm/1.0 (agent)", this is not an # announce by the agent itself. It could be some other client editing # the agent. if request.headers.get("User-Agent", "") == "PyFarm/1.0 (agent)": # Set remote_ip if it did not come in with the request g.json.setdefault("remote_ip", request.remote_addr) farm_name = g.json.pop("farm_name", None) if farm_name and farm_name != OUR_FARM_NAME: return jsonify(error="Wrong farm name"), BAD_REQUEST current_assignments = g.json.pop("current_assignments", None) mac_addresses = g.json.pop("mac_addresses", None) # TODO return BAD_REQUEST on bad mac addresses if mac_addresses is not None: mac_addresses = [x.lower() for x in mac_addresses if MAC_RE.match(x)] gpus = g.json.pop("gpus", None) disks = g.json.pop("disks", None) state = g.json.pop("state", None) agent = Agent.query.filter_by( port=g.json["port"], id=g.json["id"]).first() if agent is None: try: agent = Agent(**g.json) # There may be something wrong with one of the fields # that's causing our sqlalchemy model raise a ValueError. except ValueError as e: return jsonify(error=str(e)), BAD_REQUEST default_versions = SoftwareVersion.query.filter_by(default=True) for version in default_versions: agent.software_versions.append(version) if mac_addresses is not None: for address in mac_addresses: mac_address = AgentMacAddress(agent=agent, mac_address=address) db.session.add(mac_address) if gpus is not None: for gpu_name in gpus: gpu = GPU.query.filter_by( fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None: agent.state = state db.session.add(agent) try: db.session.commit() except Exception as e: e = e.args[0].lower() error = "Unhandled error: %s. This is often an issue " \ "with the agent's data for `ip`, `hostname` and/or " \ "`port` not being unique enough. In other cases " \ "this can sometimes happen if the underlying " \ "database driver is either non-compliant with " \ "expectations or we've encountered a database error " \ "that we don't know how to handle yet. If the " \ "latter is the case, please report this as a bug." % e return jsonify(error=error), INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Created agent %r: %r", agent.id, agent_data) assign_tasks.delay() return jsonify(agent_data), CREATED else: updated = False for key in g.json.copy(): value = g.json.pop(key) if not hasattr(agent, key): return jsonify( error="Agent has no such column `%s`" % key), \ BAD_REQUEST if getattr(agent, key) != value: try: setattr(agent, key, value) except Exception as e: return jsonify( error="Error while setting `%s`: %s" % (key, e)), \ BAD_REQUEST else: updated = True if mac_addresses is not None: updated = True for existing_address in agent.mac_addresses: if existing_address.mac_address.lower() not in mac_addresses: logger.debug("Existing address %s is not in supplied " "mac addresses, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.mac_addresses.remove(existing_address) else: mac_addresses.remove( existing_address.mac_address.lower()) for new_address in mac_addresses: mac_address = AgentMacAddress( agent=agent, mac_address=new_address) db.session.add(mac_address) if gpus is not None: updated = True for existing_gpu in agent.gpus: if existing_gpu.fullname not in gpus: logger.debug("Existing gpu %s is not in supplied " "gpus, for agent %s, removing it.", existing_address.mac_address, agent.hostname) agent.gpus.remove(existing_gpu) else: gpus.remove(existing_gpu.fullname) for gpu_name in gpus: gpu = GPU.query.filter_by(fullname=gpu_name).first() if not gpu: gpu = GPU(fullname=gpu_name) db.session.add(gpu) agent.gpus.append(gpu) if disks is not None: for old_disk in agent.disks: db.session.delete(old_disk) for disk_dict in disks: disk = AgentDisk(agent=agent, mountpoint=disk_dict["mountpoint"], size=disk_dict["size"], free=disk_dict["free"]) db.session.add(disk) if state is not None and agent.state != _AgentState.DISABLED: agent.state = state # TODO Only do that if this is really the agent speaking to us. failed_tasks = [] if (current_assignments is not None and agent.state != AgentState.OFFLINE): fail_missing_assignments(agent, current_assignments) if updated or failed_tasks: agent.last_heard_from = datetime.utcnow() db.session.add(agent) try: db.session.commit() except Exception as e: return jsonify(error="Unhandled error: %s" % e), \ INTERNAL_SERVER_ERROR else: agent_data = agent.to_dict(unpack_relationships=["tags"]) logger.info("Updated agent %r: %r", agent.id, agent_data) for task in failed_tasks: task.job.update_state() db.session.commit() assign_tasks.delay() return jsonify(agent_data), OK