def on_main(self): db = DB() db.query("SELECT id, meta FROM assets WHERE status=%s", [ObjectStatus.ONLINE]) for _, meta in db.fetchall(): asset = Asset(meta=meta, db=db) self.proc(asset)
def get_scheduled_assets(id_channel, **kwargs): db = kwargs.get("db", DB()) db.query( """ SELECT a.meta, dist FROM ( SELECT i.id_asset, MIN(ABS(e.start - extract(epoch from now()))) AS dist FROM events as e, items as i WHERE e.start > extract(epoch from now()) - 86400*7 AND e.id_channel = %s AND i.id_bin = e.id_magic AND i.id_asset > 0 GROUP BY i.id_asset) i LEFT JOIN assets a ON a.id = i.id_asset ORDER BY dist ASC """, [id_channel], ) for meta, dist in db.fetchall(): yield Asset(meta=meta, db=db), dist < 86400
def api_actions(**kwargs): objects = kwargs.get("objects") or kwargs.get("ids", []) db = kwargs.get("db", DB()) user = kwargs.get("user", anonymous) if not user: return NebulaResponse(401, "You are not allowed to execute any actions") if not objects: return NebulaResponse(400, "No asset selected") result = [] db.query("SELECT id, title, settings FROM actions ORDER BY id ASC") for id, title, settings in db.fetchall(): allow = False # noqa try: cond = xml(settings).find("allow_if").text except Exception: log_traceback() continue for id_asset in objects: asset = Asset(id_asset, db=db) # noqa if not eval(cond): break else: if user.has_right("job_control", id): result.append((id, title)) return NebulaResponse(200, data=result)
def on_init(self): if not config["playout_channels"]: logging.error("No playout channel configured") self.shutdown(no_restart=True) return try: self.id_channel = int(self.settings.find("id_channel").text) self.channel_config = config["playout_channels"][self.id_channel] except Exception: logging.error("Invalid channel specified") self.shutdown(no_restart=True) return self.fps = float(self.channel_config.get("fps", 25.0)) self.current_asset = Asset() self.current_event = Event() self.last_run = False self.last_info = 0 self.current_live = False self.cued_live = False self.auto_event = 0 self.status_key = f"playout_status/{self.id_channel}" self.plugins = PlayoutPlugins(self) self.controller = create_controller(self) if not self.controller: logging.error("Invalid controller specified") self.shutdown(no_restart=True) return port = int(self.channel_config.get("controller_port", 42100)) logging.info(f"Using port {port} for the HTTP interface.") self.server = HTTPServer(("", port), PlayoutRequestHandler) self.server.service = self self.server.methods = { "take": self.take, "cue": self.cue, "cue_forward": self.cue_forward, "cue_backward": self.cue_backward, "freeze": self.freeze, "set": self.set, "retake": self.retake, "abort": self.abort, "stat": self.stat, "plugin_list": self.plugin_list, "plugin_exec": self.plugin_exec, "recover": self.channel_recover, } self.server_thread = threading.Thread(target=self.server.serve_forever, args=(), daemon=True) self.server_thread.start() self.plugins.load() self.on_progress()
def _proc(self, id_asset, db): asset = Asset(id_asset, db = db) for id_action in self.conditions: if "broker/started/{}".format(id_action) in asset.meta: continue cond_title, cond, priority = self.conditions[id_action] if eval(cond): logging.info("{} matches action condition {}".format(asset, cond_title)) res, msg = send_to(asset.id, id_action, settings={}, id_user=0, priority=priority, restart_existing=False, db=db) if success(res): logging.info(msg) else: logging.error(msg) asset["broker/started/{}".format(id_action)] = 1 asset.save()
def meta_exists(key, value, db=False): if not db: db = DB() db.query("SELECT id, meta FROM assets WHERE meta->>%s = %s", [str(key), str(value)]) for _, meta in db.fetchall(): return Asset(meta=meta, db=db) return False
def on_main(self): if not self.import_dir: return if not os.path.isdir(self.import_dir): logging.error("Import directory does not exist. Shutting down.") self.import_path = False self.shutdown(no_restart=True) return db = DB() for import_file in get_files(self.import_dir, exts=self.exts): idec = import_file.base_name try: with import_file.open("rb") as f: f.seek(0, 2) fsize = f.tell() except IOError: logging.debug(f"Import file {import_file.base_name} is busy.") continue if not (import_file.path in self.filesizes and self.filesizes[import_file.path] == fsize): self.filesizes[import_file.path] = fsize logging.debug(f"New file '{import_file.base_name}' detected") continue db.query( """ SELECT meta FROM assets WHERE meta->>%s = %s """, [self.identifier, idec], ) for (meta, ) in db.fetchall(): asset = Asset(meta=meta, db=db) if not (asset["id_storage"] and asset["path"]): mk_error(import_file, "This file has no target path.") continue if self.versioning and os.path.exists(asset.file_path): version_backup(asset) do_import(self, import_file, asset) break else: mk_error(import_file, "This file is not expected.") for fname in os.listdir(self.import_dir): if not fname.endswith(".error.txt"): continue idec = fname.replace(".error.txt", "") if idec not in [ os.path.splitext(f)[0] for f in os.listdir(self.import_dir) ]: os.remove(os.path.join(self.import_dir, fname))
def j(*args): print db = DB() db.query(""" SELECT j.id, j.id_action, j.settings, j.priority, j.retries, j.status, j.progress, j.message, j.creation_time, j.start_time, j.end_time, a.meta FROM jobs AS j, assets AS a WHERE a.id = j.id_asset AND j.status in (0,1,5) ORDER BY id DESC LIMIT 50 """) for ( id, id_action, settings, priority, retries, status, progress, message, creation_time, start_time, end_time, meta, ) in db.fetchall(): asset = Asset(meta=meta) line = "{:<30}".format(asset) line += "{} {:.02f}%\n".format(status, progress) try: sys.stdout.write(line) sys.stdout.flush() except IOError: pass
def build(self, *args, **kwargs): mode = "active" if len(args) > 1: if args[1] in ["finished", "failed"]: mode = args[1] id_asset = kwargs.get("id_asset", 0) id_action = kwargs.get("id_action", 0) try: id_asset = int(id_asset) except ValueError: id_asset = 0 try: id_action = int(id_action) except ValueError: id_action = 0 query = kwargs.get("q", "") if cherrypy.request.method == "POST": if id_asset and id_action: # TODO: how to select restert_existing/running? response = api_send(ids=[id_asset], id_action=id_action, user=self["user"]) if response.is_error: self.context.message(response.message, level="error") else: self.context.message(response.message) # do not use filter: show all active jobs to see queue id_asset = id_action = 0 if id_asset: db = DB() asset = Asset(id_asset, db=db) actions = api_actions(user=self["user"], db=db, ids=[id_asset]) else: actions = NebulaResponse(404) asset = False self["name"] = "jobs" self["js"] = ["/static/js/jobs.js"] self["title"] = mode.capitalize() + " jobs" self["mode"] = mode self["id_asset"] = id_asset self["asset"] = asset self["actions"] = actions.data if actions.is_success else [] self["id_action"] = id_asset self["query"] = query
def asset_by_path(id_storage, path, db=False): id_storage = str(id_storage) path = path.replace("\\", "/") if not db: db = DB() db.query( """ SELECT id, meta FROM assets WHERE media_type = %s AND meta->>'id_storage' = %s AND meta->>'path' = %s """, [MediaType.FILE, id_storage, path], ) for id, meta in db.fetchall(): return Asset(meta=meta, db=db) return False
def load(self): self.db.query( """ SELECT id_action, id_asset, id_service, id_user, settings, priority, retries, status, progress, message FROM jobs WHERE id=%s """, [self.id], ) for ( id_action, id_asset, id_service, id_user, settings, priority, retries, status, progress, message, ) in self.db.fetchall(): self.id_service = id_service self.id_user = id_user self.priority = priority self.retries = retries self.status = status self.progress = progress self.message = message self._settings = settings self._asset = Asset(id_asset, db=self.db) self._action = actions[id_action] return logging.error(f"No such {self}")
def on_change(self): if not self.controller.current_item: return item = self.controller.current_item db = DB() self.current_asset = item.asset or Asset() self.current_event = item.event or Event() logging.info(f"Advanced to {item}") if self.last_run: db.query( """ UPDATE asrun SET stop = %s WHERE id = %s""", [int(time.time()), self.last_run], ) db.commit() if self.current_item: db.query( """ INSERT INTO asrun (id_channel, id_item, start) VALUES (%s, %s, %s) """, [self.id_channel, item.id, time.time()], ) self.last_run = db.lastid() db.commit() else: self.last_run = False for plugin in self.plugins: try: plugin.on_change() except Exception: log_traceback("Plugin on-change failed")
def _proc(self, id_asset, db): asset = Asset(id_asset, db=db) for analyzer in self.analyzers: qinfo = asset["qc/analyses"] or {} if type(qinfo) in [str, unicode]: qinfo = json.loads(qinfo) if analyzer.proc_name in qinfo and ( qinfo[analyzer.proc_name] == -1 or qinfo[analyzer.proc_name] >= analyzer.version): continue if eval(analyzer.condition): logging.info("Analyzing {} using '{}'".format( asset, analyzer.proc_name)) a = analyzer(asset) # # Reload asset (it may be changed by someone during analysis # del (asset) asset = Asset(id_asset, db=db) result = -1 if not a.status else analyzer.version qinfo = asset["qc/analyses"] or {} if type(qinfo) in [str, unicode]: qinfo = json.loads(qinfo) qinfo[analyzer.proc_name] = result asset["qc/analyses"] = qinfo # # Save result # for key in a.result: value = a.result[key] if value: logging.debug("Set {} {} to {}".format( asset, key, value)) asset[key] = value asset.save() self.heartbeat()
def api_jobs(**kwargs): formatted = kwargs.get("formatted", False) # load titles etc user = kwargs.get("user", anonymous) # query = kwargs.get("query", "") id_asset = kwargs.get("id_asset", False) view = kwargs.get("view", "active") db = kwargs.get("db", DB()) now = time.time() id_user = user.id or None if not user: return NebulaResponse(401, "You are not logged-in") for k in ["restart", "abort"]: if k in kwargs and not user.has_right("job_control", anyval=True): return NebulaResponse( 403, "You are not authorized to control this job") if "restart" in kwargs: jobs = [int(i) for i in kwargs["restart"]] db.query( """ UPDATE jobs SET id_user=%s, status=5, retries=0, creation_time=%s, start_time=NULL, end_time=NULL, id_service=NULL, message='Restart requested' WHERE id IN %s RETURNING id """, [id_user, now, tuple(jobs)], ) result = [r[0] for r in db.fetchall()] db.commit() logging.info("Restarted jobs {}".format(result)) for job_id in result: messaging.send( "job_progress", id=job_id, status=5, progress=0, ctime=now, stime=None, etime=None, message="Restart requested", ) return NebulaResponse(200, "Job restarted", data=result) if "abort" in kwargs: jobs = [int(i) for i in kwargs["abort"]] db.query( """ UPDATE jobs SET status=4, end_time=%s, message='Aborted' WHERE id IN %s RETURNING id """, [now, tuple(jobs)], ) result = [r[0] for r in db.fetchall()] logging.info("Aborted jobs {}".format(result)) db.commit() for job_id in result: messaging.send( "job_progress", id=job_id, status=4, progress=0, etime=now, message="Aborted", ) # TODO: smarter message return NebulaResponse(200, "Job aborted", data=result) # TODO: fulltext try: id_asset = int(id_asset) except ValueError: id_asset = False cond = "" if id_asset: cond = "AND j.id_asset = {}".format(id_asset) elif kwargs.get("fulltext"): fulltext = kwargs["fulltext"] if ":" in fulltext: key, value = fulltext.split(":") key = key.strip() value = value.strip().lower().replace("'", "") cond += " AND a.meta->>'{}' ILIKE '{}'".format(key, value) else: ft = slugify(fulltext, make_set=True) for word in ft: cond += "AND a.id IN (SELECT id FROM ft WHERE object_type=0 AND value LIKE '{}%')".format( word) elif view == "active": # Pending, in_progress, restart cond = "AND (j.status IN (0, 1, 5) OR j.end_time > {})".format( time.time() - 30) elif view == "finished": # completed, aborted, skipped cond = "AND j.status IN (2, 4, 6)" elif view == "failed": # failed cond = "AND j.status IN (3)" data = [] db.query("""SELECT j.id, j.id_asset, j.id_action, j.id_service, j.id_user, j.priority, j.retries, j.status, j.progress, j.message, j.creation_time, j.start_time, j.end_time, a.meta FROM jobs AS j, assets AS a WHERE a.id = j.id_asset {} ORDER BY end_time DESC NULLS FIRST, start_time DESC NULLS LAST, creation_time DESC LIMIT 100 """.format(cond)) for ( id, id_asset, id_action, id_service, id_user, priority, retries, status, progress, message, ctime, stime, etime, meta, ) in db.fetchall(): row = { "id": id, "id_asset": id_asset, "id_action": id_action, "id_service": id_service, "id_user": id_user, "priority": priority, "retries": retries, "status": status, "progress": progress, "message": message, "ctime": format_time(ctime, never_placeholder="") if formatted else ctime, "stime": format_time(stime, never_placeholder="") if formatted else stime, "etime": format_time(etime, never_placeholder="") if formatted else etime, } asset = Asset(meta=meta) row["asset_title"] = asset["title"] row["action_title"] = config["actions"][id_action]["title"] if id_service: service = config["services"].get(id_service, { "title": "Unknown", "host": "Unknown" }) row["service_title"] = f"{service['title']}@{service['host']}" else: row["service_title"] = "" data.append(row) return NebulaResponse(200, data=data)
def finalize(self): new = None asset = Asset( self.asset.id) # Reload asset (possibly changed during encoding) if self.task.find("target").text == "new": id_storage = self.id_storage r = asset_by_path(id_storage, self.target_rel_path) if r: new = Asset(r) logging.info("Updating asset {!r}".format(new)) keys = new.meta.keys() for key in keys: if key in meta_types and meta_types[key].namespace in [ "qc", "fmt" ]: new[key] = "" else: logging.info( "Creating new asset for {!r} conversion.".format(asset)) new = Asset() new["media_type"] = FILE new["content_type"] = VIDEO new["version_of"] = asset.id new["id_storage"] = id_storage new["path"] = self.target_rel_path new["origin"] = "Video conversion" new["id_folder"] = asset["id_folder"] for key in asset.meta: if key in meta_types and meta_types[key].namespace in [ "AIEB", "m" ]: new[key] = asset[key] new["status"] = CREATING for intra in self.task.findall("intra"): exec(intra.text) try: os.rename(self.temp_file_path, self.target_file_path) except: return "Unable to move output file to target destination" if new is not None: new.save() for post in self.task.findall("post"): exec(post.text) if new is not None: new.save() asset.save()
def get_job(id_service, action_ids, db=False): assert type(action_ids) == list, "action_ids must be list of integers" if not action_ids: return False db = db or DB() now = time.time() running_jobs_count = {} db.query(""" select id_action, count(id) from jobs where status=1 group by id_action """) for id_action, cnt in db.fetchall(): running_jobs_count[id_action] = cnt q = """ SELECT id, id_action, id_asset, id_user, settings, priority, retries, status FROM jobs WHERE status IN (0,3,5) AND id_action IN %s AND id_service IS NULL AND retries < %s ORDER BY priority DESC, creation_time DESC """ db.query(q, [tuple(action_ids), MAX_RETRIES]) for ( id_job, id_action, id_asset, id_user, settings, priority, retries, status, ) in db.fetchall(): asset = Asset(id_asset, db=db) action = actions[id_action] job = Job(id_job, db=db) job._asset = asset job._settings = settings job.priority = priority job.retries = retries job.id_user = id_user max_running_jobs = action.settings.attrib.get("max_jobs", 0) try: max_running_jobs = int(max_running_jobs) except ValueError: max_running_jobs = 0 if max_running_jobs: running_jobs = running_jobs_count.get(id_action, 0) if running_jobs >= max_running_jobs: continue # Maximum allowed jobs already running. skip for pre in action.settings.findall("pre"): if pre.text: try: exec(pre.text) except Exception: log_traceback() continue if not action: logging.warning( f"Unable to get job. No such action ID {id_action}") continue if status != 5 and action.should_skip(asset): logging.info(f"Skipping {job}") db.query( """ UPDATE jobs SET status=6, message='Skipped', start_time=%s, end_time=%s WHERE id=%s """, [now, now, id_job], ) db.commit() continue if action.should_start(asset): if job.take(id_service): return job else: logging.warning(f"Unable to take {job}") continue else: db.query("UPDATE jobs SET message='Waiting' WHERE id=%s", [id_job]) messaging.send( "job_progress", id=id_job, id_asset=id_asset, id_action=id_action, status=status, progress=0, message="Waiting", ) db.commit() return False
def build(self, *args, **kwargs): self["name"] = "detail" self["title"] = "Asset detail" self["js"] = ["/static/js/vendor/resumable.js", "/static/js/detail.js"] try: id_asset = int(args[-1].split("-")[0]) except (IndexError, ValueError): id_asset = 0 db = DB() if not id_asset: if kwargs.get("new_asset", False): asset = Asset(db=db) asset["id_folder"] = min(config["folders"].keys()) self["new_asset"] = True else: self["asset"] = False raise cherrypy.HTTPError(status=404, message="Asset not found") else: asset = Asset(id_asset, db=db) logging.debug(asset) if not asset.id: raise cherrypy.HTTPError(status=404, message="Asset not found") id_folder = int(kwargs.get("folder_change", asset["id_folder"])) if id_folder != asset["id_folder"]: asset["id_folder"] = id_folder if cherrypy.request.method == "POST": error_message = validate_data(self.context, asset, kwargs) if error_message: self.context.message(error_message, level="error") else: response = api_set( user=self["user"], objects=[asset.id], data={k: asset[k] for k in kwargs}, db=db, ) if response.is_success: self.context.message("Asset saved") else: self.context.message(response.message, level="error") asset = Asset(id_asset, db=db) # reload after update try: fconfig = config["folders"][id_folder] except Exception: self.context.message("Unknown folder ID", level="error") fconfig = config["folders"][min(config["folders"].keys())] # Get available actions actions = api_actions(user=self["user"], db=db, ids=[id_asset]) self["asset"] = asset self["title"] = asset["title"] if asset.id else "New asset" self["id_folder"] = id_folder self["main_keys"] = fconfig["meta_set"] self["extended_keys"] = sorted( [ k for k in asset.meta if k in asset.meta_types and asset.meta_types[k]["ns"] not in ["f", "q"] and k not in [mlist[0] for mlist in fconfig["meta_set"]] ], key=lambda k: asset.meta_types[k]["ns"], ) self["technical_keys"] = sorted( [k for k in asset.meta if asset.meta_types[k]["ns"] in ["f", "q"]]) self["actions"] = actions.data if actions.is_success else []
def build(self, *args, **kwargs): # Query params query = kwargs.get("q", "") order_key = kwargs.get("o", "id") order_trend = kwargs.get("ot", "desc") if order_trend != "asc": order_trend = "desc" try: id_view = int(kwargs["v"]) view = config["views"][id_view] except (KeyError, ValueError): id_view = min(config["views"]) view = config["views"][id_view] try: current_page = int(kwargs["p"]) except (KeyError, ValueError, TypeError): current_page = 1 # Build view assets = api_get( user=self["user"], id_view=id_view, fulltext=query or False, count=False, order="{} {}".format(order_key, order_trend), limit=RECORDS_PER_PAGE + 1, offset=(current_page - 1) * RECORDS_PER_PAGE, ) if len(assets["data"]) > RECORDS_PER_PAGE: page_count = current_page + 1 elif len(assets["data"]) == 0: page_count = max(1, current_page - 1) else: page_count = current_page if current_page > page_count: current_page = page_count def get_params(**override): data = copy.copy(kwargs) for key in override: if not override[key] and key in data: del data[key] else: data[key] = override[key] return "&".join(["{}={}".format(k, data[k]) for k in data]) self["show_jobs"] = config.get("hub_browser_jobs_column", True) self["name"] = "assets" self["title"] = config["views"][id_view]["title"] self["js"] = ["/static/js/assets.js"] self["id_view"] = id_view self["query"] = query self["current_page"] = current_page self["page_count"] = page_count self["columns"] = view["columns"] self["assets"] = [Asset(meta=meta) for meta in assets["data"]] self["order_key"] = order_key self["order_trend"] = order_trend self["get_params"] = get_params self["view_list"] = sorted( list(config["views"].keys()), key=lambda x: config["views"][x]["position"])
def _proc(self, id_asset, db): asset = Asset(id_asset, db=db) fname = asset.file_path if asset["id_storage"] not in self.mounted_storages: return if not os.path.exists(fname): if asset["status"] in [ONLINE, RESET, CREATING]: logging.warning( "Turning offline {} (File does not exist)".format(asset)) asset["status"] = OFFLINE asset.save() return try: fmtime = int(os.path.getmtime(fname)) fsize = int(os.path.getsize(fname)) except: log_traceback("Unable to get file attrs {}".format(asset)) return if fsize == 0: if asset["status"] != OFFLINE: logging.warning( "Turning offline {} (empty file)".format(asset)) asset["status"] = OFFLINE asset.save() return if fmtime != asset["file/mtime"] or asset["status"] == RESET: try: f = open(fname, "rb") except: logging.debug("{} creation in progress.".format(asset)) return else: f.seek(0, 2) fsize = f.tell() f.close() if asset["status"] == RESET: asset.load_sidecar_metadata() # Filesize must be changed to update metadata automatically. # It sucks, but mtime only condition is.... errr doesn't work always if fsize == asset["file/size"] and asset["status"] != RESET: logging.debug( "{} file mtime has been changed. Updating.".format(asset)) asset["file/mtime"] = fmtime asset.save(set_mtime=False, notify=False) else: logging.info("Updating {}".format(asset)) keys = list(asset.meta.keys()) for key in keys: if meta_types[key].namespace in ("fmt", "qc"): del (asset.meta[key]) asset["file/size"] = fsize asset["file/mtime"] = fmtime ######################################### ## PROBE for probe in probes: if probe.accepts(asset): logging.debug("Probing {} using {}".format( asset, probe)) asset = probe.work(asset) ## PROBE ######################################### if asset["status"] == RESET: asset["status"] = ONLINE logging.info("{} reset completed".format(asset)) else: asset["status"] = CREATING asset.save() if asset["status"] == CREATING and asset["mtime"] + 15 > time.time(): logging.debug("Waiting for {} completion assurance.".format(asset)) asset.save(set_mtime=False, notify=False) elif asset["status"] in (CREATING, OFFLINE): logging.goodnews("Turning online {}".format(asset)) asset["status"] = ONLINE asset.save() db = DB() db.query( """UPDATE nx_jobs SET progress=-1, id_service=0, ctime=%s, stime=0, etime=0, id_user=0, message='Restarting after source update' WHERE id_object=%s AND id_action > 0 and progress IN (-2, -3)""", [time.time(), id_asset]) db.commit()
def on_main(self): job = Job(self.id_service, self.allowed_actions.keys()) if not job: return id_asset = job.id_object asset = Asset(id_asset) try: vars = json.loads(job.settings) except: vars = {} action_config = self.allowed_actions[job.id_action] tasks = action_config.findall("task") job_start_time = last_info_time = time.time() for id_task, task in enumerate(tasks): task_start_time = time.time() try: using = task.attrib["using"] except: continue if not using in encoders: continue logging.debug("Configuring task {} of {}".format( id_task + 1, len(tasks))) encoder = encoders[using](asset, task, vars) err = encoder.configure() if err: job.fail(err) return logging.info("Starting task {} of {}".format( id_task + 1, len(tasks))) encoder.run() old_progress = 0 while encoder.is_working(): now = time.time() progress, msg = encoder.get_progress() if progress < 0: break if progress != old_progress: job.set_progress(progress * 100, msg) old_progress = progress if now - last_info_time > FORCE_INFO_EVERY: logging.debug("{}: {}, {:.2f}% completed".format( asset, msg, progress * 100)) last_info_time = now time.sleep(.0001) progress, msg = encoder.get_progress() if progress == FAILED: job.fail(msg) return logging.debug("Finalizing task {} of {}".format( id_task + 1, len(tasks))) err = encoder.finalize() if err: job.fail(err) return vars = encoder.vars job.done()
def api_order(**kwargs): """ Changes order of items in bin/rundown, creates new items from assets """ id_channel = kwargs.get("id_channel", 0) id_bin = kwargs.get("id_bin", False) order = kwargs.get("order", []) db = kwargs.get("db", DB()) user = kwargs.get("user", anonymous) initiator = kwargs.get("initiator", None) if not user: return NebulaResponse(401) if not id_channel in config["playout_channels"]: return NebulaResponse(400, f"No such channel ID {id_channel}") playout_config = config["playout_channels"][id_channel] append_cond = playout_config.get("rundown_accepts", "True") if id_channel and not user.has_right("rundown_edit", id_channel): return NebulaResponse(403, "You are not allowed to edit this rundown") if not (id_bin and order): return NebulaResponse( 400, f'Bad "order" request<br>id_bin: {id_bin}<br>order: {order}') logging.info(f"{user} executes bin_order method") affected_bins = [id_bin] pos = 1 rlen = float(len(order)) for i, obj in enumerate(order): object_type = obj["object_type"] id_object = obj["id_object"] meta = obj["meta"] if object_type == "item": if not id_object: item = Item(db=db) item["id_asset"] = obj.get("id_asset", 0) item.meta.update(meta) else: item = Item(id_object, db=db) if not item["id_bin"]: logging.error( f"Attempted asset data insertion ({object_type} ID {id_object} {meta}) to item. This should never happen" ) continue if not item: logging.debug(f"Skipping {item}") continue if not item["id_bin"] in affected_bins: if item["id_bin"]: affected_bins.append(item["id_bin"]) elif object_type == "asset": asset = Asset(id_object, db=db) if not asset: logging.error( f"Unable to append {object_type} ID {id_object}. Asset does not exist" ) continue try: can_append = eval(append_cond) except Exception: log_traceback( "Unable to evaluate rundown accept condition: {append_cond}" ) continue if not can_append: logging.error( f"Unable to append {asset}. Does not match conditions.") continue item = Item(db=db) for key in meta: if key in ["id", "id_bin", "id_asset"]: continue item[key] = meta[key] item["id_asset"] = asset.id item.meta.update(meta) else: logging.error( f"Unable to append {object_type} ID {id_object} {meta}. Unexpected object" ) continue if not item or item["position"] != pos or item["id_bin"] != id_bin: item["position"] = pos item["id_bin"] = id_bin # bin_refresh called later should be enough to trigger rundown reload item.save(notify=False) pos += 1 # Update bin duration bin_refresh(affected_bins, db=db, initiator=initiator) return NebulaResponse(200)
def on_main(self): db = DB() self.existing = [] start_time = time.time() db.query("SELECT meta FROM assets WHERE media_type=1 AND status=1") for (meta, ) in db.fetchall(): asset = Asset(meta=meta, db=db) file_path = asset.file_path self.existing.append(file_path) duration = time.time() - start_time if duration > 5 or config.get("debug_mode", False): logging.debug(f"Online assets loaded in {s2time(duration)}") start_time = time.time() for wf_settings in self.settings.findall("folder"): id_storage = int(wf_settings.attrib["id_storage"]) rel_wf_path = wf_settings.attrib["path"] quarantine_time = int( wf_settings.attrib.get("quarantine_time", "10")) id_folder = int(wf_settings.attrib.get("id_folder", 12)) storage_path = storages[id_storage].local_path watchfolder_path = os.path.join(storage_path, rel_wf_path) if not os.path.exists(watchfolder_path): logging.warning("Skipping non-existing watchfolder", watchfolder_path) continue i = 0 for file_object in get_files( watchfolder_path, recursive=wf_settings.attrib.get("recursive", False), hidden=wf_settings.attrib.get("hidden", False), case_sensitive_exts=wf_settings.get( "case_sensitive_exts", False), ): i += 1 if i % 100 == 0 and config.get("debug_mode", False): logging.debug("{} files scanned".format(i)) if not file_object.size: continue full_path = file_object.path if full_path in self.existing: continue now = time.time() asset_path = full_path.replace(storage_path, "", 1).lstrip("/") ext = os.path.splitext(asset_path)[1].lstrip(".").lower() if ext not in FileTypes.exts(): continue asset = asset_by_path(id_storage, asset_path, db=db) if asset: self.existing.append(full_path) continue base_name = get_base_name(asset_path) if quarantine_time and now - file_object.mtime < quarantine_time: logging.debug(f"{base_name} is too young. Skipping") continue asset = Asset(db=db) asset["content_type"] = FileTypes.by_ext(ext) asset["media_type"] = MediaType.FILE asset["id_storage"] = id_storage asset["path"] = asset_path asset["ctime"] = now asset["mtime"] = now asset["status"] = ObjectStatus.CREATING asset["id_folder"] = id_folder asset["title"] = base_name asset.load_sidecar_metadata() failed = False for post_script in wf_settings.findall("post"): try: exec(post_script.text) except Exception: log_traceback( f"Error executing post-script on {asset}") failed = True if not failed: asset.save(set_mtime=False) duration = time.time() - start_time if duration > 60 or config.get("debug_mode", False): logging.debug(f"Watchfolders scanned in {s2time(duration)}")
def get_rundown(id_channel, start_time=False, end_time=False, db=False): """Get a rundown.""" db = db or DB() channel_config = config["playout_channels"][id_channel] if not start_time: # default today sh, sm = channel_config.get("day_start", [6, 0]) rundown_date = time.strftime("%Y-%m-%d", time.localtime(time.time())) start_time = datestr2ts(rundown_date, hh=sh, mm=sm) end_time = end_time or start_time + (3600 * 24) item_runs = get_item_runs(id_channel, start_time, end_time, db=db) if channel_config.get("send_action", False): db.query( """SELECT id_asset FROM jobs WHERE id_action=%s AND status in (0, 5) """, [channel_config["send_action"]], ) pending_assets = [r[0] for r in db.fetchall()] else: pending_assets = [] db.query( """ SELECT e.id, e.meta, i.meta, a.meta FROM events AS e LEFT JOIN items AS i ON e.id_magic = i.id_bin LEFT JOIN assets AS a ON i.id_asset = a.id WHERE e.id_channel = %s AND e.start >= %s AND e.start < %s ORDER BY e.start ASC, i.position ASC, i.id ASC """, (id_channel, start_time, end_time), ) current_event_id = None event = None ts_broadcast = ts_scheduled = 0 pskey = "playout_status/{}".format(id_channel) for id_event, emeta, imeta, ameta in db.fetchall() + [ (-1, None, None, None) ]: if id_event != current_event_id: if event: yield event if not event.items: ts_broadcast = 0 if id_event == -1: break event = Event(meta=emeta) event.items = [] current_event_id = id_event rundown_event_asset = event.meta.get("id_asset", False) if event["run_mode"]: ts_broadcast = 0 event.meta["rundown_scheduled"] = ts_scheduled = event["start"] event.meta["rundown_broadcast"] = ts_broadcast = (ts_broadcast or ts_scheduled) if imeta: item = Item(meta=imeta, db=db) if ameta: asset = Asset(meta=ameta, db=db) if ameta else False item._asset = asset else: asset = False as_start, as_stop = item_runs.get(item.id, (0, 0)) airstatus = 0 if as_start: ts_broadcast = as_start if as_stop: airstatus = ObjectStatus.AIRED else: airstatus = ObjectStatus.ONAIR item.meta["asset_mtime"] = asset["mtime"] if asset else 0 item.meta["rundown_scheduled"] = ts_scheduled item.meta["rundown_broadcast"] = ts_broadcast item.meta["rundown_difference"] = ts_broadcast - ts_scheduled if rundown_event_asset: item.meta["rundown_event_asset"] = rundown_event_asset istatus = 0 if not asset: istatus = ObjectStatus.ONLINE elif airstatus: istatus = airstatus elif asset["status"] == ObjectStatus.OFFLINE: istatus = ObjectStatus.OFFLINE elif pskey not in asset.meta: istatus = ObjectStatus.REMOTE elif asset[pskey]["status"] == ObjectStatus.OFFLINE: istatus = ObjectStatus.REMOTE elif asset[pskey]["status"] == ObjectStatus.ONLINE: istatus = ObjectStatus.ONLINE elif asset[pskey]["status"] == ObjectStatus.CORRUPTED: istatus = ObjectStatus.CORRUPTED else: istatus = ObjectStatus.UNKNOWN item.meta["status"] = istatus if asset and asset.id in pending_assets: item.meta["transfer_progress"] = -1 if item["run_mode"] != RunMode.RUN_SKIP: ts_scheduled += item.duration ts_broadcast += item.duration event.items.append(item)
def __init__(self, event, **kwargs): self.event = event self.asset = Asset(event["id_magic"]) self.ingest_mode = "BACKUP"
def api_schedule(**kwargs): id_channel = kwargs.get("id_channel", 0) start_time = kwargs.get("start_time", 0) end_time = kwargs.get("end_time", 0) events = kwargs.get("events", []) # Events to add/update delete = kwargs.get("delete", []) # Event ids to delete db = kwargs.get("db", DB()) user = kwargs.get("user", anonymous) initiator = kwargs.get("initiator", None) try: id_channel = int(id_channel) except ValueError: return NebulaResponse(400, "id_channel must be an integer") try: start_time = int(start_time) except ValueError: return NebulaResponse(400, "start_time must be an integer") try: end_time = int(end_time) except ValueError: return NebulaResponse(400, "end_time must be an integer") if not id_channel or id_channel not in config["playout_channels"]: return NebulaResponse(400, f"Unknown playout channel ID {id_channel}") changed_event_ids = [] # # Delete events # for id_event in delete: if not user.has_right("scheduler_edit", id_channel): return NebulaResponse(403, "You are not allowed to edit this channel") event = Event(id_event, db=db) if not event: logging.warning(f"Unable to delete non existent event ID {id_event}") continue try: event.bin.delete() except psycopg2.IntegrityError: return NebulaResponse(423, f"Unable to delete {event}. Already aired.") else: event.delete() changed_event_ids.append(event.id) # # Create / update events # for event_data in events: if not user.has_right("scheduler_edit", id_channel): return NebulaResponse(423, "You are not allowed to edit this channel") id_event = event_data.get("id", False) db.query( "SELECT meta FROM events WHERE id_channel=%s and start=%s", [id_channel, event_data["start"]], ) try: event_at_pos_meta = db.fetchall()[0][0] event_at_pos = Event(meta=event_at_pos_meta, db=db) except IndexError: event_at_pos = False if id_event: logging.debug(f"Updating event ID {id_event}") event = Event(id_event, db=db) if not event: logging.warning(f"No such event ID {id_event}") continue pbin = event.bin elif event_at_pos: event = event_at_pos pbin = event.bin else: logging.debug("Creating new event") event = Event(db=db) pbin = Bin(db=db) pbin.save() logging.debug("Saved", pbin) event["id_magic"] = pbin.id event["id_channel"] = id_channel id_asset = event_data.get("id_asset", False) if id_asset and id_asset != event["id_asset"]: asset = Asset(id_asset, db=db) if asset: logging.info(f"Replacing event primary asset with {asset}") pbin.delete_children() pbin.items = [] item = Item(db=db) item["id_asset"] = asset.id item["position"] = 0 item["id_bin"] = pbin.id item._asset = asset item.save() pbin.append(item) pbin.save() event["id_asset"] = asset.id for key in meta_types: if meta_types[key]["ns"] != "m": continue if key in asset.meta: event[key] = asset[key] for key in event_data: if key == "id_magic" and not event_data[key]: continue if key == "_items": for item_data in event_data["_items"]: if not pbin.items: start_pos = 0 else: start_pos = pbin.items[-1]["position"] try: pos = int(item_data["position"]) except KeyError: pos = 0 item = Item(meta=item_data, db=db) item["position"] = start_pos + pos item["id_bin"] = pbin.id item.save() continue event[key] = event_data[key] changed_event_ids.append(event.id) event.save(notify=False) if changed_event_ids: messaging.send( "objects_changed", objects=changed_event_ids, object_type="event", initiator=initiator, ) # # Return existing events # # TODO: ACL scheduler view result = [] if start_time and end_time: logging.debug( f"Requested events of channel {id_channel} " f"from {format_time(start_time)} to {format_time(end_time)}" ) db.query( """ SELECT e.meta, o.meta FROM events AS e, bins AS o WHERE e.id_channel=%s AND e.start > %s AND e.start < %s AND e.id_magic = o.id ORDER BY start ASC""", [id_channel, start_time, end_time], ) res = db.fetchall() db.query( """ SELECT e.meta, o.meta FROM events AS e, bins AS o WHERE e.id_channel=%s AND start <= %s AND e.id_magic = o.id ORDER BY start DESC LIMIT 1""", [id_channel, start_time], ) res = db.fetchall() + res for event_meta, alt_meta in res: ebin = Bin(meta=alt_meta, db=db) if "duration" in alt_meta.keys(): event_meta["duration"] = ebin.duration result.append(event_meta) return NebulaResponse(200, data=result)