def load_filesystem(handler=False): if PLATFORM == "windows": for letter in get_available_drives(): if handler: handler(letter) base_path = f"{letter}:\\" if not os.path.exists(base_path): continue storage_ident = os.path.join(base_path, ".nebula_root") if not os.path.exists(storage_ident): continue for line in open(storage_ident).read().split("\n"): try: site, id_storage = line.split(":") id_storage = int(id_storage) except Exception: continue if site != config["site_name"]: continue if id_storage in config["storages"]: config["storages"][id_storage]["protocol"] = "local" config["storages"][id_storage]["path"] = base_path logging.debug( f"Mapped storage {id_storage} to {base_path}")
def process(self): while True: try: if not self.queue: time.sleep(0.01) if time.time() - self.last_message > 3: logging.debug("Heartbeat") messaging.send("heartbeat") self.last_message = time.time() continue message = self.queue.pop(0) self.last_message = time.time() if message.method != "log": self.relay_message(message) else: if self.log_dir: log = format_log_message(message) if not log: continue log_path = os.path.join(self.log_dir, time.strftime("%Y-%m-%d.txt")) with open(log_path, "a") as f: f.write(log) if self.loki: self.loki(message) except Exception: log_traceback("Unhandled exception during message processing")
def send_message(self, method, **data): if not (self.connection and self.channel): if not self.connect(): time.sleep(1) return message = json.dumps([ time.time(), config["site_name"], config["host"], method, data ]) try: self.channel.basic_publish( exchange='', routing_key=config["site_name"], body=message ) except pika.exceptions.ChannelWrongStateError: logging.warning("RabbitMQ: nobody's listening", handlers=[]) return except pika.exceptions.StreamLostError: logging.error("RabbitMQ connection lost", handlers=[]) self.connection = self.channel = None except: log_traceback("RabbitMQ error", handlers=[]) logging.debug("Unable to send message" , message, handlers=[]) self.connection = self.channel = None
def load_from_script(self, fname): if not fname.lower().endswith(".py"): fname += ".py" workerdir = get_plugin_path("worker") if not workerdir: logging.error("Plugin path is not set. Storage unmouted?") time.sleep(5) sys.exit(0) script_path = os.path.join(workerdir, fname) mod_name, file_ext = os.path.splitext(fname) if not os.path.exists(script_path): logging.error(f"Plugin {fname} not found") return False py_mod = imp.load_source(mod_name, script_path) if "Plugin" not in dir(py_mod): logging.error(f"No plugin class found in {fname}") return False logging.debug(f"Loading plugin {mod_name}") self.plugin = py_mod.Plugin(self) self.plugin.on_init() return True
def on_assets_update(self, *assets): logging.debug(f"[MAIN WINDOW] Updating {len(assets)} assets in views") self.browser.refresh_assets(*assets) self.detail.refresh_assets(*assets) if self.rundown: self.rundown.refresh_assets(*assets)
def refresh_events(self, events): for id_event in events: if id_event in self.calendar.event_ids: logging.debug( f"[SCHEDULER] Event id {id_event} has been changed." "Reloading calendar") self.load() break
def parse_callback(self, event): self.parsed = True # TODO: Provide a way to block on this and check # if media.get_parsed_status() == MediaParsedStatus.done logging.debug("parsed media", self.fname, event.u.new_status) # Clean up event handler to prevent memory leak self.media.event_manager().event_detach( vlc.EventType.MediaParsedChanged)
def save(self, **kwargs): duration = 0 for item in self.items: duration += item.duration if duration != self.duration: logging.debug(f"New duration of {self} is {s2tc(duration)}") self["duration"] = duration super(Bin, self).save(**kwargs)
def on_main(self): if not self.import_dir: return if not os.path.isdir(self.import_dir): logging.error("Import directory does not exist. Shutting down.") self.import_path = False self.shutdown(no_restart=True) return db = DB() for import_file in get_files(self.import_dir, exts=self.exts): idec = import_file.base_name try: with import_file.open("rb") as f: f.seek(0, 2) fsize = f.tell() except IOError: logging.debug(f"Import file {import_file.base_name} is busy.") continue if not (import_file.path in self.filesizes and self.filesizes[import_file.path] == fsize): self.filesizes[import_file.path] = fsize logging.debug(f"New file '{import_file.base_name}' detected") continue db.query( """ SELECT meta FROM assets WHERE meta->>%s = %s """, [self.identifier, idec], ) for (meta, ) in db.fetchall(): asset = Asset(meta=meta, db=db) if not (asset["id_storage"] and asset["path"]): mk_error(import_file, "This file has no target path.") continue if self.versioning and os.path.exists(asset.file_path): version_backup(asset) do_import(self, import_file, asset) break else: mk_error(import_file, "This file is not expected.") for fname in os.listdir(self.import_dir): if not fname.endswith(".error.txt"): continue idec = fname.replace(".error.txt", "") if idec not in [ os.path.splitext(f)[0] for f in os.listdir(self.import_dir) ]: os.remove(os.path.join(self.import_dir, fname))
def __getitem__(self, key): key = int(key) if key not in self.data: logging.debug("Direct loading asset id", key) self.request([[key, 0]]) return Asset() asset = self.data[key] asset["_last_access"] = time.time() return asset
def save(self, **kwargs): if not kwargs.get("silent", False): logging.debug(f"Saving {self}") self["ctime"] = self["ctime"] or time.time() if kwargs.get("set_mtime", True): self["mtime"] = time.time() for key in self.required: if (key not in self.meta) and (key in self.defaults): self[key] = self.defaults[key] assert key in self.meta, f"Unable to save {self}. {key} is required"
def log_clean_up(log_dir, ttl=30): ttl_sec = ttl * 3600 * 24 for f in get_files(log_dir, exts=["txt"]): if f.mtime < time.time() - ttl_sec: try: os.remove(f.path) except Exception: log_traceback(f"Unable to remove old log file {f.base_name}") else: logging.debug(f"Removed old log file {f.base_name}")
def __init__(self, parent): super(SchedulerCalendar, self).__init__(parent) self.week_start_time = self.week_end_time = 0 self.events = [] self.focus_data = [] self.dragging = False self.drag_offset = 0 self.drag_source = False self.append_condition = False self.selected_event = False header_layout = QHBoxLayout() header_layout.addSpacing(CLOCKBAR_WIDTH + 15) cols_layout = QHBoxLayout() self.clock_bar = SchedulerClockBar(self) cols_layout.addWidget(self.clock_bar, 0) self.headers = [] self.days = [] for i in range(7): self.headers.append(SchedulerDayHeaderWidget(self, i)) self.days.append(SchedulerDayWidget(self)) header_layout.addWidget(self.headers[-1]) cols_layout.addWidget(self.days[-1], 1) header_layout.addSpacing(20) self.scroll_widget = QWidget() self.scroll_widget.setLayout(cols_layout) self.scroll_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.scroll_area = QScrollArea(self) self.scroll_area.setFrameStyle(QFrame.NoFrame) self.scroll_area.setWidgetResizable(True) self.scroll_area.setWidget(self.scroll_widget) self.scroll_area.setContentsMargins(0, 0, 0, 0) self.scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) zoomlevel = self.parent().app_state.get("scheduler_zoom", 0) self.zoom = QSlider(Qt.Horizontal) self.zoom.setMinimum(0) self.zoom.setMaximum(10000) self.zoom.valueChanged.connect(self.on_zoom) logging.debug("Setting scheduler zoom level to", zoomlevel) self.zoom.setValue(zoomlevel) layout = QVBoxLayout() layout.addLayout(header_layout) layout.addWidget(self.scroll_area, 1) layout.addWidget(self.zoom, 0) self.setLayout(layout) self.setMinimumHeight(450)
def seismic_handler(self, message): if self.main_window.current_module != self.main_window.rundown: return if message.method == "playout_status": if message.data["id_channel"] != self.id_channel: return if message.data["current_item"] != self.current_item: self.current_item = message.data["current_item"] self.view.model().refresh_items([self.current_item]) if message.data["cued_item"] != self.cued_item: model = self.view.model() self.cued_item = message.data["cued_item"] for obj in model.object_data: if obj.object_type == "item" and obj.id == self.cued_item: if self.mcr and self.mcr.isVisible(): self.load() else: self.view.model().refresh_items([self.current_item]) break if self.mcr: self.mcr.seismic_handler(message) elif message.method == "objects_changed": if message.data["object_type"] == "event": for id_event in message.data["objects"]: if id_event in self.view.model().event_ids: logging.debug( "Event id {} has been changed. Reloading rundown.".format( id_event ) ) self.load() break elif message.data["object_type"] == "asset": self.refresh_assets(*message.data["objects"]) elif message.method == "job_progress": if self.playout_config.get("send_action", 0) == message.data["id_action"]: model = self.view.model() for row, obj in enumerate(model.object_data): if obj["id_asset"] == message.data["id_asset"]: model.object_data[row]["transfer_progress"] = message.data[ "progress" ] model.dataChanged.emit( model.index(row, 0), model.index(row, len(model.header_data) - 1), )
def start(self, **kwargs): message = "Executing: " + " ".join(self.args) logging.debug(message) self.proc = subprocess.Popen( self.args, stdin=kwargs.get("stdin", None), stdout=kwargs.get("stdout", subprocess.PIPE), stderr=kwargs.get("stderr", subprocess.PIPE), close_fds=True ) if kwargs.get("check_output", True): self.check_output(handler=kwargs.get("handler", False))
def on_init(self): self.service_type = "conv" self.actions = [] db = DB() db.query(""" SELECT id, title, service_type, settings FROM actions ORDER BY id """) for id_action, title, service_type, settings in db.fetchall(): if service_type == self.service_type: logging.debug(f"Registering action {title}") self.actions.append(Action(id_action, title, xml(settings))) self.reset_jobs()
def update_header(self): ch = self.playout_config["title"] t = datetime.date.fromtimestamp(self.start_time) if t < datetime.date.today(): s = " color='red'" elif t > datetime.date.today(): s = " color='green'" else: s = "" t = t.strftime("%A %Y-%m-%d") self.parent().setWindowTitle(f"Rundown {t}") self.channel_display.setText(f"<font{s}>{t}</font> - {ch}") logging.debug(f"[RUNDOWN] Header update ({ch})")
def save(self): if len(self.data) > CACHE_LIMIT: to_rm = list(self.data.keys()) to_rm.sort(key=lambda x: self.data[x].meta.get("_last_access", 0)) for t in to_rm[:-CACHE_LIMIT]: del self.data[t] logging.info("Saving {} assets to local cache".format(len(self.data))) start_time = time.time() data = [asset.meta for asset in self.data.values()] with open(self.cache_path, "w") as f: json.dump(data, f) logging.debug("Cache updated in {:.03f}s".format(time.time() - start_time))
def on_seismic_timer(self): now = time.time() if now - self.listener.last_msg > 5: logging.debug( "[MAIN WINDOW] No seismic message received. Something may be wrong" ) self.listener.last_msg = time.time() while True: try: message = self.listener.queue.get_nowait() except queue.Empty: return else: self.seismic_handler(message)
def focus(self, asset, silent=False, force=False): if not isinstance(asset, Asset): return logging.debug(f"[DETAIL] Focusing {asset}") if self._is_loading: self._load_queue = [asset] return else: self._load_queue = False self._is_loading = True if not silent: self.check_changed() # # Show data # self.folder_select.setEnabled(True) self.asset = Asset(meta=asset.meta) # asset deep copy self.parent().setWindowTitle(f"Detail of {self.asset}") self.detail_tabs.load(self.asset, force=force) self.folder_select.set_value(self.asset["id_folder"]) self.duration.fps = self.asset.fps self.duration.set_value(self.asset.duration) self.duration.show() if (self.asset["status"] == ObjectStatus.OFFLINE) or (not self.asset.id): self.duration.setEnabled(True) else: self.duration.setEnabled(False) enabled = (not asset.id) or has_right("asset_edit", self.asset["id_folder"]) self.folder_select.setEnabled(enabled) self.action_approve.setEnabled(enabled) self.action_qc_reset.setEnabled(enabled) self.action_reject.setEnabled(enabled) self.action_apply.setEnabled(enabled) self.action_revert.setEnabled(enabled) self.set_title("DETAIL : " + self.asset.__repr__()) self._is_loading = False if self._load_queue: self.focus(self._load_queue)
def load(self): if not os.path.exists(self.cache_path): return start_time = time.time() try: data = json.load(open(self.cache_path)) except Exception: log_traceback("Corrupted cache file '{}'".format(self.cache_path)) return for meta in data: self.data[int(meta["id"])] = Asset(meta=meta) logging.debug("Loaded {} assets from cache in {:.03f}s".format( len(self.data), time.time() - start_time))
def check(self, session_id, extend=False): data = self.load(session_id) if not data: return False age = time.time() - data.get("ctime", 0) if age > self.max_age: logging.debug(f"Session {session_id} has expired. Removing.") self.delete(session_id) return False if age > self.max_age / 2: extend = True if extend: data["ctime"] = time.time() self.save(session_id, data) return data["user_data"]
def query(self, query, **kwargs): """Send AMCP command""" if not self.connection: if not self.connect(): return CasparResponse(500, "Unable to connect CasparCG server") query = query.strip() if kwargs.get("verbose", True): if not query.startswith("INFO"): logging.debug("Executing AMCP: {}".format(query)) query += "\r\n" if PYTHON_VERSION >= 3: query = bytes(query.encode("utf-8")) delim = bytes("\r\n".encode("utf-8")) else: delim = "\r\n" try: self.connection.write(query) result = self.connection.read_until(delim).strip() except Exception: log_traceback() return CasparResponse(500, "Query failed") if PYTHON_VERSION >= 3: result = result.decode("UTF-8") if not result: return CasparResponse(500, "No result") try: if result[0:3] == "202": return CasparResponse(202, "No result") elif result[0:3] in ["201", "200"]: stat = int(result[0:3]) result = decode_if_py3( self.connection.read_until(delim)).strip() return CasparResponse(stat, result) elif int(result[0:1]) > 3: stat = int(result[0:3]) return CasparResponse(stat, result) except Exception: log_traceback() return CasparResponse(500, "Malformed result: {}".format(result)) return CasparResponse(500, "Unexpected result: {}".format(result))
def render_error(self, response_code, message, traceback=""): context = self.context() view = CherryAdminView("error", context) view["title"] = "Error" view.build(response_code=response_code, message=message, traceback=traceback) if response_code in (401, 403): logging.error("Access denied:", cherrypy.request.path_info) return self.render(view) logging.error("Error {} ({}) processing {} request \"{}\"".format( response_code, message, cherrypy.request.method, cherrypy.request.path_info)) if traceback: logging.debug(traceback) return self.render(view)
def plugin_exec(self, **kwargs): action = kwargs.get("action_name", False) data = json.loads(kwargs.get("data", "{}")) id_plugin = int(kwargs["id_plugin"]) logging.debug("Executing playout plugin:", action, id_plugin, data) if not action: return NebulaResponse(400, "No plugin action requested") try: plugin = self.plugins[id_plugin] except (KeyError, IndexError): log_traceback() return NebulaResponse(400, "No such action") if plugin.on_command(action, **data): return NebulaResponse(200) else: return NebulaResponse(500, "Playout plugin failed")
def on_response(self, response): if response.is_error: logging.error(response.message) return False ids = [] for meta in response.data: try: id_asset = int(meta["id"]) except KeyError: continue self.data[id_asset] = Asset(meta=meta) ids.append(id_asset) logging.debug("Updated {} assets in cache".format(len(ids))) if self.handler: self.handler(*ids) return True
def query(self, query, **kwargs): """Send AMCP command""" if not self.connection: if not self.connect(): return CasparResponse(500, "Unable to connect CasparCG server") query = query.strip() if kwargs.get("verbose", True): if not query.startswith("INFO"): logging.debug("Executing AMCP: {}".format(query)) query += "\r\n" if PYTHON_VERSION >= 3: query = bytes(query.encode("utf-8")) delim = bytes("\r\n".encode("utf-8")) else: delim = "\r\n" try: self.connection.write(query) result = self.connection.read_until(delim).strip() except Exception: log_traceback() return CasparResponse(500, "Query failed") if PYTHON_VERSION >= 3: result = result.decode("UTF-8") if not result: return CasparResponse(500, "No result") try: if result[0:3] == "202": return CasparResponse(202, "No result") elif result[0:3] in ["201", "200"]: stat = int(result[0:3]) result = decode_if_py3(self.connection.read_until(delim)).strip() return CasparResponse(stat, result) elif int(result[0:1]) > 3: stat = int(result[0:3]) return CasparResponse(stat, result) except Exception: log_traceback() return CasparResponse(500, "Malformed result: {}".format(result)) return CasparResponse(500, "Unexpected result: {}".format(result))
def seismic_handler(self, message): if (message.method == "objects_changed" and message.data["object_type"] == "asset"): objects = message.data["objects"] logging.debug( f"[MAIN WINDOW] {len(objects)} asset(s) have been changed") asset_cache.request([[aid, message.timestamp + 1] for aid in objects]) return if message.method == "config_changed": self.load_settings() return for module, methods in self.subscribers: if message.method in methods: module.seismic_handler(message)
def run(self, method, callback, **kwargs): logging.debug("Executing {}{} query".format( "" if callback == -1 else "async ", method)) kwargs["session_id"] = config["session_id"] kwargs["initiator"] = CLIENT_ID if method in ["ping", "login", "logout"]: method = "/" + method mime = QVariant("application/x-www-form-urlencoded") post_data = QUrlQuery() for key in kwargs: post_data.addQueryItem(key, kwargs[key]) data = post_data.toString(QUrl.FullyEncoded).encode("ascii") else: method = "/api/" + method mime = QVariant("application/json") data = json.dumps(kwargs).encode("ascii") request = QNetworkRequest(QUrl(config["hub"] + method)) request.setHeader(QNetworkRequest.ContentTypeHeader, mime) request.setHeader( QNetworkRequest.UserAgentHeader, QVariant(f"nebula-firefly/{FIREFLY_VERSION}"), ) try: query = self.manager.post(request, data) if callback != -1: query.finished.connect( functools.partial(self.handler, query, callback)) self.queries.append(query) except Exception: log_traceback() if callback: r = NebulaResponse(400, "Unable to send request") if callback == -1: return r else: callback(r) return if callback == -1: while not query.isFinished(): time.sleep(0.0001) QApplication.processEvents() return self.handler(query, -1)
def run(self): addr = config["hub"].replace("http", "ws", 1) + "/ws/" + config["site_name"] while self.should_run: logging.debug(f"[LISTENER] Connecting to {addr}", handlers=False) self.halted = False self.ws = websocket.WebSocketApp( addr, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close, ) self.ws.run_forever() self.active = False logging.debug("[LISTENER] halted", handlers=False) self.halted = True
def load(self, id): key = str(self.object_type_id) + "-" + str(id) try: cache_data = cache.load(key) if cache_data is not None: self.meta = json.loads(cache_data) return True except Exception: pass logging.debug(f"Loading {self.__class__.__name__} ID:{id} from DB") self.db.query(f"SELECT meta FROM {self.table_name} WHERE id = {id}") try: self.meta = self.db.fetchall()[0][0] except IndexError: logging.error(f"Unable to load {self.__class__.__name__}" f"ID:{id}. Object does not exist") return False self.cache()
def main(self, debug=False, counter=0): logging.info("Solving {}".format(self.placeholder)) message = "Solver returned no items. Keeping placeholder." try: for new_item in self.solve(): self.new_items.append(new_item) if debug: logging.debug("Appending {}".format(new_item.asset)) except Exception: message = log_traceback("Error occured during solving") return NebulaResponse(501, message) if debug: return NebulaResponse(202) if not self.new_items: return NebulaResponse(204, message) i = 0 for item in self.bin.items: i += 1 if item.id == self.placeholder.id: item.delete() for new_item in self.new_items: i += 1 new_item["id_bin"] = self.bin.id new_item["position"] = i new_item.save(notify=False) if item["position"] != i: item["position"] = i item.save(notify=False) if self.bin.id not in self.affected_bins: self.affected_bins.append(self.bin.id) if self.solve_next: self.init_solver(self.solve_next) return self.main(debug=debug, counter=len(self.new_items) + counter) bin_refresh(self.affected_bins, db=self.db) return NebulaResponse( 200, "Created {} new items".format(len(self.new_items) + counter) )