def fail(self, message="Failed", critical=False): if critical: retries = MAX_RETRIES else: retries = self.retries + 1 self.db.query( """ UPDATE jobs SET id_service=NULL, retries=%s, priority=%s, status=3, progress=0, message=%s WHERE id=%s """, [retries, max(0, self.priority - 1), message, self.id], ) self.db.commit() self.status = JobState.FAILED logging.error(f"{self}: {message}") messaging.send( "job_progress", id=self.id, id_asset=self.id_asset, id_action=self.id_action, status=JobState.FAILED, progress=0, message=message, )
def restart(self, message="Restarted"): logging.warning(f"{self} restarted") self.db.query( """ UPDATE jobs SET id_service=NULL, start_time=NULL, end_time=NULL, status=5, retries=0, progress=0, message=%s WHERE id=%s """, [message, self.id], ) self.db.commit() self.status = JobState.RESTART messaging.send( "job_progress", id=self.id, id_asset=self.id_asset, id_action=self.id_action, stime=None, etime=None, status=5, progress=0, message=message, )
def process(self): while True: try: if not self.queue: time.sleep(0.01) if time.time() - self.last_message > 3: logging.debug("Heartbeat") messaging.send("heartbeat") self.last_message = time.time() continue message = self.queue.pop(0) self.last_message = time.time() if message.method != "log": self.relay_message(message) else: if self.log_dir: log = format_log_message(message) if not log: continue log_path = os.path.join(self.log_dir, time.strftime("%Y-%m-%d.txt")) with open(log_path, "a") as f: f.write(log) if self.loki: self.loki(message) except Exception: log_traceback("Unhandled exception during message processing")
def done(self, message="Completed"): now = time.time() self.db.query( """ UPDATE jobs SET status=2, progress=100, end_time=%s, message=%s WHERE id=%s """, [now, message, self.id], ) self.db.commit() self.status = JobState.COMPLETED logging.goodnews(f"{self}: {message}") messaging.send( "job_progress", id=self.id, id_asset=self.asset.id, id_action=self.action.id, status=JobState.COMPLETED, etime=now, progress=100, message=message, )
def abort(self, message="Aborted"): now = time.time() logging.warning(f"{self} aborted") self.db.query( """ UPDATE jobs SET end_time=%s, status=4, message=%s WHERE id=%s """, [now, message, self.id], ) self.db.commit() self.status = JobState.ABORTED messaging.send( "job_progress", id=self.id, id_asset=self.id_asset, id_action=self.id_action, etime=now, status=JobState.ABORTED, progress=0, message=message, )
def take(self, id_service): now = time.time() self.db.query( """ UPDATE jobs SET id_service=%s, start_time=%s, end_time=NULL, status=1, progress=0 WHERE id=%s AND id_service IS NULL """, [id_service, now, self.id], ) self.db.commit() self.db.query("SELECT id FROM jobs WHERE id=%s AND id_service=%s", [self.id, id_service]) if self.db.fetchall(): messaging.send( "job_progress", id=self.id, id_asset=self.id_asset, id_action=self.id_action, stime=now, status=1, progress=0, message="Starting...", ) return True return False
def main(self): messaging.send("heartbeat") if time.time() - self.last_update > 5: try: update_host_info() except Exception: log_traceback("Unable to update host info") self.last_update = time.time()
def on_progress(self): if not self.controller: # fix the race condition, when on_progress is created, # but not yet added to the service return if time.time() - self.last_info > 0.3: messaging.send("playout_status", **self.playout_status) self.last_info = time.time() for plugin in self.plugins: plugin.main()
def save(self, **kwargs): super(ServerObject, self).save(**kwargs) is_new = self.is_new if is_new: self._insert(**kwargs) else: self._update(**kwargs) self.invalidate() if self.text_changed or is_new: self.update_ft_index(is_new) if kwargs.get("commit", True): self.db.commit() self.cache() self.text_changed = self.meta_changed = False self.is_new = False if kwargs.get("notify", True): messaging.send("objects_changed", objects=[self.id], object_type=self.object_type)
def bin_refresh(bins, **kwargs): bins = [b for b in bins if b] if not bins: return True db = kwargs.get("db", DB()) sender = kwargs.get("sender", False) for id_bin in bins: b = Bin(id_bin, db=db) b.save(notify=False) bq = ", ".join([str(b) for b in bins if b]) changed_events = [] db.query(f""" SELECT e.meta FROM events as e, channels AS c WHERE c.channel_type = 0 AND c.id = e.id_channel AND e.id_magic IN ({bq}) """) for (meta, ) in db.fetchall(): event = Event(meta=meta, db=db) if event.id not in changed_events: changed_events.append(event.id) logging.debug(f"Bins changed {bins}.", f"Initiator {kwargs.get('initiator', logging.user)}") messaging.send( "objects_changed", sender=sender, objects=bins, object_type="bin", initiator=kwargs.get("initiator", None), ) if changed_events: logging.debug(f"Events changed {bins}." f"Initiator {kwargs.get('initiator', logging.user)}") messaging.send( "objects_changed", sender=sender, objects=changed_events, object_type="event", initiator=kwargs.get("initiator", None), ) return True
def set_progress(self, progress, message="In progress"): db = DB() progress = round(progress, 2) db.query( """ UPDATE jobs SET status=1, progress=%s, message=%s WHERE id=%s """, [progress, message, self.id], ) db.commit() messaging.send( "job_progress", id=self.id, id_asset=self.id_asset, id_action=self.id_action, status=JobState.IN_PROGRESS, progress=progress, message=message, )
def api_jobs(**kwargs): formatted = kwargs.get("formatted", False) # load titles etc user = kwargs.get("user", anonymous) # query = kwargs.get("query", "") id_asset = kwargs.get("id_asset", False) view = kwargs.get("view", "active") db = kwargs.get("db", DB()) now = time.time() id_user = user.id or None if not user: return NebulaResponse(401, "You are not logged-in") for k in ["restart", "abort"]: if k in kwargs and not user.has_right("job_control", anyval=True): return NebulaResponse( 403, "You are not authorized to control this job") if "restart" in kwargs: jobs = [int(i) for i in kwargs["restart"]] db.query( """ UPDATE jobs SET id_user=%s, status=5, retries=0, creation_time=%s, start_time=NULL, end_time=NULL, id_service=NULL, message='Restart requested' WHERE id IN %s RETURNING id """, [id_user, now, tuple(jobs)], ) result = [r[0] for r in db.fetchall()] db.commit() logging.info("Restarted jobs {}".format(result)) for job_id in result: messaging.send( "job_progress", id=job_id, status=5, progress=0, ctime=now, stime=None, etime=None, message="Restart requested", ) return NebulaResponse(200, "Job restarted", data=result) if "abort" in kwargs: jobs = [int(i) for i in kwargs["abort"]] db.query( """ UPDATE jobs SET status=4, end_time=%s, message='Aborted' WHERE id IN %s RETURNING id """, [now, tuple(jobs)], ) result = [r[0] for r in db.fetchall()] logging.info("Aborted jobs {}".format(result)) db.commit() for job_id in result: messaging.send( "job_progress", id=job_id, status=4, progress=0, etime=now, message="Aborted", ) # TODO: smarter message return NebulaResponse(200, "Job aborted", data=result) # TODO: fulltext try: id_asset = int(id_asset) except ValueError: id_asset = False cond = "" if id_asset: cond = "AND j.id_asset = {}".format(id_asset) elif kwargs.get("fulltext"): fulltext = kwargs["fulltext"] if ":" in fulltext: key, value = fulltext.split(":") key = key.strip() value = value.strip().lower().replace("'", "") cond += " AND a.meta->>'{}' ILIKE '{}'".format(key, value) else: ft = slugify(fulltext, make_set=True) for word in ft: cond += "AND a.id IN (SELECT id FROM ft WHERE object_type=0 AND value LIKE '{}%')".format( word) elif view == "active": # Pending, in_progress, restart cond = "AND (j.status IN (0, 1, 5) OR j.end_time > {})".format( time.time() - 30) elif view == "finished": # completed, aborted, skipped cond = "AND j.status IN (2, 4, 6)" elif view == "failed": # failed cond = "AND j.status IN (3)" data = [] db.query("""SELECT j.id, j.id_asset, j.id_action, j.id_service, j.id_user, j.priority, j.retries, j.status, j.progress, j.message, j.creation_time, j.start_time, j.end_time, a.meta FROM jobs AS j, assets AS a WHERE a.id = j.id_asset {} ORDER BY end_time DESC NULLS FIRST, start_time DESC NULLS LAST, creation_time DESC LIMIT 100 """.format(cond)) for ( id, id_asset, id_action, id_service, id_user, priority, retries, status, progress, message, ctime, stime, etime, meta, ) in db.fetchall(): row = { "id": id, "id_asset": id_asset, "id_action": id_action, "id_service": id_service, "id_user": id_user, "priority": priority, "retries": retries, "status": status, "progress": progress, "message": message, "ctime": format_time(ctime, never_placeholder="") if formatted else ctime, "stime": format_time(stime, never_placeholder="") if formatted else stime, "etime": format_time(etime, never_placeholder="") if formatted else etime, } asset = Asset(meta=meta) row["asset_title"] = asset["title"] row["action_title"] = config["actions"][id_action]["title"] if id_service: service = config["services"].get(id_service, { "title": "Unknown", "host": "Unknown" }) row["service_title"] = f"{service['title']}@{service['host']}" else: row["service_title"] = "" data.append(row) return NebulaResponse(200, data=data)
def send_to( id_asset, id_action, settings=None, id_user=None, priority=3, restart_existing=True, restart_running=False, db=False, ): db = db or DB() if not id_asset: NebulaResponse(401, message="You must specify existing object") if settings is None: settings = {} db.query( """ SELECT id FROM jobs WHERE id_asset=%s AND id_action=%s AND settings=%s """, [id_asset, id_action, json.dumps(settings)], ) res = db.fetchall() if res: if restart_existing: conds = "0,5" if not restart_running: conds += ",1" db.query( f""" UPDATE jobs SET id_user=%s, id_service=NULL, message='Restart requested', status=5, retries=0, creation_time=%s, start_time=NULL, end_time=NULL WHERE id=%s AND status NOT IN ({conds}) RETURNING id """, [id_user, time.time(), res[0][0]], ) db.commit() if db.fetchall(): messaging.send( "job_progress", id=res[0][0], id_asset=id_asset, id_action=id_action, progress=0, ) return NebulaResponse(201, message="Job restarted") return NebulaResponse(200, message="Job exists. Not restarting") else: return NebulaResponse(200, message="Job exists. Not restarting") # # Create a new job # db.query( """INSERT INTO jobs ( id_asset, id_action, id_user, settings, priority, message, creation_time ) VALUES ( %s, %s, %s, %s, %s, 'Pending', %s ) RETURNING id """, [ id_asset, id_action, id_user, json.dumps(settings), priority, time.time() ], ) try: id_job = db.fetchall()[0][0] db.commit() except Exception: log_traceback() return NebulaResponse(500, "Unable to create job") messaging.send( "job_progress", id=id_job, id_asset=id_asset, id_action=id_action, progress=0, message="Job created", ) return NebulaResponse(201, message="Job created")
def get_job(id_service, action_ids, db=False): assert type(action_ids) == list, "action_ids must be list of integers" if not action_ids: return False db = db or DB() now = time.time() running_jobs_count = {} db.query(""" select id_action, count(id) from jobs where status=1 group by id_action """) for id_action, cnt in db.fetchall(): running_jobs_count[id_action] = cnt q = """ SELECT id, id_action, id_asset, id_user, settings, priority, retries, status FROM jobs WHERE status IN (0,3,5) AND id_action IN %s AND id_service IS NULL AND retries < %s ORDER BY priority DESC, creation_time DESC """ db.query(q, [tuple(action_ids), MAX_RETRIES]) for ( id_job, id_action, id_asset, id_user, settings, priority, retries, status, ) in db.fetchall(): asset = Asset(id_asset, db=db) action = actions[id_action] job = Job(id_job, db=db) job._asset = asset job._settings = settings job.priority = priority job.retries = retries job.id_user = id_user max_running_jobs = action.settings.attrib.get("max_jobs", 0) try: max_running_jobs = int(max_running_jobs) except ValueError: max_running_jobs = 0 if max_running_jobs: running_jobs = running_jobs_count.get(id_action, 0) if running_jobs >= max_running_jobs: continue # Maximum allowed jobs already running. skip for pre in action.settings.findall("pre"): if pre.text: try: exec(pre.text) except Exception: log_traceback() continue if not action: logging.warning( f"Unable to get job. No such action ID {id_action}") continue if status != 5 and action.should_skip(asset): logging.info(f"Skipping {job}") db.query( """ UPDATE jobs SET status=6, message='Skipped', start_time=%s, end_time=%s WHERE id=%s """, [now, now, id_job], ) db.commit() continue if action.should_start(asset): if job.take(id_service): return job else: logging.warning(f"Unable to take {job}") continue else: db.query("UPDATE jobs SET message='Waiting' WHERE id=%s", [id_job]) messaging.send( "job_progress", id=id_job, id_asset=id_asset, id_action=id_action, status=status, progress=0, message="Waiting", ) db.commit() return False
def api_set(**kwargs): object_type = kwargs.get("object_type", "asset") objects = kwargs.get("objects", []) data = kwargs.get("data", {}) user = kwargs.get("user", anonymous) db = kwargs.get("db", DB()) initiator = kwargs.get("initiator", None) if not user: return NebulaResponse(401) if not (data and objects): return NebulaResponse(200, "No object created or modified") object_type_class = { "asset": Asset, "item": Item, "bin": Bin, "event": Event, "user": User, }.get(object_type, None) if object_type_class is None: return NebulaResponse(400, f"Unsupported object type {object_type}") changed_objects = [] affected_bins = [] if "_password" in data: hpass = get_hash(data["_password"]) del data["_password"] data["password"] = hpass for id_object in objects: obj = object_type_class(id_object, db=db) changed = False if object_type == "asset": id_folder = data.get("id_folder", False) or obj["id_folder"] if not user.has_right("asset_edit", id_folder): folder_title = config["folders"][id_folder]["title"] return NebulaResponse( 403, f"{user} is not allowed to edit {folder_title} folder") elif object_type == "user": if obj.id: if not user.has_right("user_edit"): return NebulaResponse( 403, f"{user} is not allowed to edit users data") else: if not user.has_right("user_create"): return NebulaResponse( 403, f"{user} is not allowed to add new users") changed = False for key in data: value = data[key] old_value = obj[key] obj[key] = value if obj[key] != old_value: changed = True validator = get_validator(object_type, db=db) if changed and validator: logging.debug("Executing validation script") tt = obj.__repr__() try: obj = validator.validate(obj) except Exception: return NebulaResponse( 500, log_traceback("Unable to validate object changes.")) if not isinstance(obj, BaseObject): # TODO: use 409-conflict? return NebulaResponse(400, f"Unable to save {tt}:\n\n{obj}") if changed: obj.save(notify=False) changed_objects.append(obj.id) if object_type == "item" and obj["id_bin"] not in affected_bins: affected_bins.append(obj["id_bin"]) if changed_objects: messaging.send( "objects_changed", objects=changed_objects, object_type=object_type, user="******".format(user), initiator=initiator, ) if affected_bins: bin_refresh(affected_bins, db=db, initiator=initiator) return NebulaResponse(200, data=changed_objects)
def main(self): db = DB() db.query( """ SELECT id, title, autostart, state, last_seen FROM services WHERE host=%s """, [config["host"]], ) # # Start / stop service # for id, title, autostart, state, last_seen in db.fetchall(): messaging.send( "service_state", id=id, state=state, autostart=autostart, last_seen=last_seen, last_seen_before=max(0, int(time.time() - last_seen)), ) if state == ServiceState.STARTING: # Start service if id not in self.services.keys(): self.start_service(id, title, db=db) elif state == ServiceState.KILL: # Kill service if id in self.services.keys(): self.kill_service(self.services[id][0].pid) # # Real service state # service_list = [i for i in self.services.keys()] for id_service in service_list: proc, title = self.services[id_service] if proc.poll() is None: continue del self.services[id_service] logging.warning(f"Service ID {id_service} ({title}) terminated") db.query("UPDATE services SET state=0 WHERE id = %s", [id_service]) db.commit() # # Autostart # db.query( """ SELECT id, title, state, autostart FROM services WHERE host=%s AND state=0 AND autostart=true """, [config["host"]], ) for id, title, state, autostart in db.fetchall(): if id not in self.services.keys(): logging.debug(f"AutoStarting service ID {id} ({title})") self.start_service(id, title)
def api_schedule(**kwargs): id_channel = kwargs.get("id_channel", 0) start_time = kwargs.get("start_time", 0) end_time = kwargs.get("end_time", 0) events = kwargs.get("events", []) # Events to add/update delete = kwargs.get("delete", []) # Event ids to delete db = kwargs.get("db", DB()) user = kwargs.get("user", anonymous) initiator = kwargs.get("initiator", None) try: id_channel = int(id_channel) except ValueError: return NebulaResponse(400, "id_channel must be an integer") try: start_time = int(start_time) except ValueError: return NebulaResponse(400, "start_time must be an integer") try: end_time = int(end_time) except ValueError: return NebulaResponse(400, "end_time must be an integer") if not id_channel or id_channel not in config["playout_channels"]: return NebulaResponse(400, f"Unknown playout channel ID {id_channel}") changed_event_ids = [] # # Delete events # for id_event in delete: if not user.has_right("scheduler_edit", id_channel): return NebulaResponse(403, "You are not allowed to edit this channel") event = Event(id_event, db=db) if not event: logging.warning(f"Unable to delete non existent event ID {id_event}") continue try: event.bin.delete() except psycopg2.IntegrityError: return NebulaResponse(423, f"Unable to delete {event}. Already aired.") else: event.delete() changed_event_ids.append(event.id) # # Create / update events # for event_data in events: if not user.has_right("scheduler_edit", id_channel): return NebulaResponse(423, "You are not allowed to edit this channel") id_event = event_data.get("id", False) db.query( "SELECT meta FROM events WHERE id_channel=%s and start=%s", [id_channel, event_data["start"]], ) try: event_at_pos_meta = db.fetchall()[0][0] event_at_pos = Event(meta=event_at_pos_meta, db=db) except IndexError: event_at_pos = False if id_event: logging.debug(f"Updating event ID {id_event}") event = Event(id_event, db=db) if not event: logging.warning(f"No such event ID {id_event}") continue pbin = event.bin elif event_at_pos: event = event_at_pos pbin = event.bin else: logging.debug("Creating new event") event = Event(db=db) pbin = Bin(db=db) pbin.save() logging.debug("Saved", pbin) event["id_magic"] = pbin.id event["id_channel"] = id_channel id_asset = event_data.get("id_asset", False) if id_asset and id_asset != event["id_asset"]: asset = Asset(id_asset, db=db) if asset: logging.info(f"Replacing event primary asset with {asset}") pbin.delete_children() pbin.items = [] item = Item(db=db) item["id_asset"] = asset.id item["position"] = 0 item["id_bin"] = pbin.id item._asset = asset item.save() pbin.append(item) pbin.save() event["id_asset"] = asset.id for key in meta_types: if meta_types[key]["ns"] != "m": continue if key in asset.meta: event[key] = asset[key] for key in event_data: if key == "id_magic" and not event_data[key]: continue if key == "_items": for item_data in event_data["_items"]: if not pbin.items: start_pos = 0 else: start_pos = pbin.items[-1]["position"] try: pos = int(item_data["position"]) except KeyError: pos = 0 item = Item(meta=item_data, db=db) item["position"] = start_pos + pos item["id_bin"] = pbin.id item.save() continue event[key] = event_data[key] changed_event_ids.append(event.id) event.save(notify=False) if changed_event_ids: messaging.send( "objects_changed", objects=changed_event_ids, object_type="event", initiator=initiator, ) # # Return existing events # # TODO: ACL scheduler view result = [] if start_time and end_time: logging.debug( f"Requested events of channel {id_channel} " f"from {format_time(start_time)} to {format_time(end_time)}" ) db.query( """ SELECT e.meta, o.meta FROM events AS e, bins AS o WHERE e.id_channel=%s AND e.start > %s AND e.start < %s AND e.id_magic = o.id ORDER BY start ASC""", [id_channel, start_time, end_time], ) res = db.fetchall() db.query( """ SELECT e.meta, o.meta FROM events AS e, bins AS o WHERE e.id_channel=%s AND start <= %s AND e.id_magic = o.id ORDER BY start DESC LIMIT 1""", [id_channel, start_time], ) res = db.fetchall() + res for event_meta, alt_meta in res: ebin = Bin(meta=alt_meta, db=db) if "duration" in alt_meta.keys(): event_meta["duration"] = ebin.duration result.append(event_meta) return NebulaResponse(200, data=result)