def send_message(self, method, **data): if not (self.connection and self.channel): if not self.connect(): time.sleep(1) return message = json.dumps([ time.time(), config["site_name"], config["host"], method, data ]) try: self.channel.basic_publish( exchange='', routing_key=config["site_name"], body=message ) except pika.exceptions.ChannelWrongStateError: logging.warning("RabbitMQ: nobody's listening", handlers=[]) return except pika.exceptions.StreamLostError: logging.error("RabbitMQ connection lost", handlers=[]) self.connection = self.channel = None except: log_traceback("RabbitMQ error", handlers=[]) logging.debug("Unable to send message" , message, handlers=[]) self.connection = self.channel = None
def passwd(*args): print() try: login = input("Login: "******"Password (will be echoed): ").strip() is_admin = input("Admin (yes/no): ").strip() except KeyboardInterrupt: print() logging.warning("Interrupted by user") sys.exit(0) db = DB() db.query("SELECT id FROM users WHERE login=%s", [login]) res = db.fetchall() if not res: critical_error("Unable to set password: no such user") u = User(res[0][0], db=db) if login: u["login"] = u["full_name"] = login u["is_admin"] = 1 if is_admin == "yes" else 0 u.set_password(password) u.save() print() logging.goodnews("Password changed")
def do_find(self, search_string, start_row=-1): self.last_search = search_string search_string = search_string.lower() if start_row == -1: for idx in self.view.selectionModel().selectedIndexes(): if idx.row() > start_row: start_row = idx.row() start_row += 1 for i, row in enumerate(self.view.model().object_data[start_row:]): for key in ["title", "id/main"]: if str(row[key]).lower().find(search_string) > -1: selection = QItemSelection() i1 = self.view.model().index(i + start_row, 0, QModelIndex()) i2 = self.view.model().index( i + start_row, len(self.view.model().header_data) - 1, QModelIndex(), ) self.view.scrollTo(i1, QAbstractItemView.PositionAtTop) selection.select(i1, i2) self.view.selectionModel().select( selection, QItemSelectionModel.ClearAndSelect ) break else: continue break else: logging.warning("Not found: {}".format(self.last_search)) self.view.clearSelection()
def format_list(meta_type, value, **kwargs): if type(value) == str: value = [value] elif type(value) != list: logging.warning("Unknown value {} for key {}".format(value, meta_type)) value = [] value = [str(v) for v in value] lang = kwargs.get("language", config.get("language", "en")) result = kwargs.get("result", "alias") cs = meta_type.cs if result == "brief": return { "value": value, "alias": ", ".join(cs.aliases(lang)), } elif result == "full": result = [] adkey = [] for v in value: if (v not in cs.data) and (v in cs.csdata): adkey.append(v) for csval in cs.data + adkey: role = cs.role(csval) if role == "hidden": continue result.append({ "value": csval, "alias": cs.alias(csval, lang), "description": cs.description(csval, lang), "selected": csval in value, "role": role, "indent": 0 }) if meta_type.get("mode") == "tree": sort_mode = lambda x: "".join( [n.zfill(3) for n in x["value"].split(".")]) result.sort(key=sort_mode) tree_indent(result) else: if meta_type.get("order") == "alias": sort_mode = lambda x: unaccent(str(x["alias"])) else: sort_mode = lambda x: unaccent(str(x["value"])) result.sort(key=sort_mode) return result elif result == "description": if len(value): return cs.description(value[0], lang) return "" else: # alias return ", ".join(cs.aliases(lang))
def on_main(self): if len(self.queue) > 50: logging.warning( f"Truncating message queue ({len(self.queue)} messages)", handlers=[]) self.queue = [] if self.log_dir and self.log_ttl: log_clean_up(self.log_dir, self.log_ttl)
def handle_data(self, data): try: message = Message(json.loads(data.decode())) except Exception: logging.warning("Malformed message detected", handlers=False) print("\n") print(data) print("\n") return if message.site_name != config["site_name"]: return self.queue.append(message)
def parse(self, layer_index): try: layers = self.data.find("stage").find("layers") except Exception: return None if layers is None: return None video_layer = None for layer in layers.findall("layer"): try: index = int(layer.find("index").text) except Exception: logging.warning("Unable to get layer index") return None if index == layer_index: video_layer = layer break else: logging.warning("Layer index {} not found".format(layer_index)) return None data = copy.deepcopy(self.defaults) try: fg_prod = video_layer.find("foreground").find("producer") if fg_prod.find("type").text == "image-producer": data["pos"] = 0 data["current"] = basefname(fg_prod.find("location").text) elif fg_prod.find("type").text == "empty-producer": data["current"] = False # No video is playing right now else: data["pos"] = int(fg_prod.find("file-frame-number").text) data["dur"] = int(fg_prod.find("file-nb-frames").text) data["current"] = basefname(fg_prod.find("filename").text) except Exception: pass try: bg_prod = ( video_layer.find("background") .find("producer") .find("destination") .find("producer") ) if bg_prod.find("type").text == "image-producer": data["cued"] = basefname(bg_prod.find("location").text) elif bg_prod.find("type").text == "empty-producer": data["cued"] = False # No video is cued else: data["cued"] = basefname(bg_prod.find("filename").text) except Exception: data["cued"] = False return data
def relay_message(self, message): mjson = message.json.replace("\n", "") + "\n" # one message per line for relay in self.relays: try: result = self.session.post(relay, mjson.encode("ascii"), timeout=0.3) except Exception: logging.error(f"Exception: Unable to relay message to {relay}", handlers=[]) continue if result.status_code >= 400: err = f"Error {result.status_code}" logging.warning(f"{err}: Unable to relay message to {relay}", handlers=[]) continue
def handshake(self): message = self.request.recv(1024).decode().strip() upgrade = re.search('\nupgrade[\s]*:[\s]*websocket', message.lower()) if not upgrade: self.keep_alive = False return key = re.search('\n[sS]ec-[wW]eb[sS]ocket-[kK]ey[\s]*:[\s]*(.*)\r\n', message) if key: key = key.group(1) else: logging.warning("Client tried to connect but was missing a key") self.keep_alive = False return response = self.make_handshake_response(key) self.handshake_done = self.request.send(response.encode()) self.valid_client = True self.server._new_client_(self)
def send_text(self, message): # Validate message if isinstance(message, bytes): message = try_decode_UTF8( message) # this is slower but assures we have UTF-8 if not message: logging.warning( "Can\'t send message, message is not valid UTF-8") return False elif isinstance(message, str) or isinstance(message, unicode): pass else: logging.warning( 'Can\'t send message, message has to be a string or bytes. Given type is %s' % type(message)) return False header = bytearray() payload = encode_to_UTF8(message) payload_length = len(payload) # Normal payload if payload_length <= 125: header.append(FIN | OPCODE_TEXT) header.append(payload_length) # Extended payload elif payload_length >= 126 and payload_length <= 65535: header.append(FIN | OPCODE_TEXT) header.append(PAYLOAD_LEN_EXT16) header.extend(struct.pack(">H", payload_length)) # Huge extended payload elif payload_length < 18446744073709551616: header.append(FIN | OPCODE_TEXT) header.append(PAYLOAD_LEN_EXT64) header.extend(struct.pack(">Q", payload_length)) else: raise Exception( "Message is too big. Consider breaking it into chunks.") return self.request.send(header + payload)
def __init__(self, parent): super(BrowserModule, self).__init__(parent) self.tabs = QTabWidget(self) self.tabs.setTabsClosable(True) self.tabs.tabCloseRequested.connect(self.close_tab) self.tabs.currentChanged.connect(self.on_tab_switch) self.layout = QVBoxLayout(self) self.layout.setSpacing(0) self.layout.setContentsMargins(0, 0, 0, 0) self.layout.addWidget(self.tabs) self.setLayout(self.layout) tabscfg = self.app_state.get("browser_tabs", []) created_tabs = 0 current_index = 0 for tabcfg in tabscfg: try: if tabcfg["id_view"] not in config["views"]: continue if tabcfg.get("active"): current_index = self.tabs.count() try: del tabcfg["active"] except KeyError: pass title = False if tabcfg.get("title"): title = tabcfg.get("title") try: del tabcfg["title"] except KeyError: pass self.new_tab(title, **tabcfg) created_tabs += 1 except Exception: log_traceback() logging.warning("Unable to restore tab") if not created_tabs: self.new_tab() self.tabs.setCurrentIndex(current_index)
def on_exit(self): asset_cache.save() if not self.main_window.listener: return if config.get("session_id"): with open(self.auth_key_path, "w") as f: f.write(config["session_id"]) if not self.main_window.listener.halted: self.main_window.listener.halt() i = 0 while not self.main_window.listener.halted: time.sleep(0.1) if i > 10: logging.warning( "Unable to shutdown listener. Forcing quit", handlers=False ) break i += 1 sys.exit(0)
def ping(self, **kwargs): request = parse_request(**kwargs) session_id = request.get("session_id") if not session_id: msg = "Not logged in - no session ID provided" logging.warning("PING:", msg) return json_response(401, msg) user_data = self.sessions.check(session_id) if not user_data: msg = f"Not logged in - session {session_id} not found" logging.warning("PING:", msg) return json_response(401, msg) client_info = get_client_info() self.sessions.delete(session_id) session_id = self.sessions.create(user_data, **client_info) save_session_cookie(self, session_id) uname = user_data.get("login", "anonymous") logging.debug(f"PING: Logged in user {uname}") return json_response(200, data=user_data, session_id=session_id)
def on_main(self): db = DB() self.existing = [] start_time = time.time() db.query("SELECT meta FROM assets WHERE media_type=1 AND status=1") for (meta, ) in db.fetchall(): asset = Asset(meta=meta, db=db) file_path = asset.file_path self.existing.append(file_path) duration = time.time() - start_time if duration > 5 or config.get("debug_mode", False): logging.debug(f"Online assets loaded in {s2time(duration)}") start_time = time.time() for wf_settings in self.settings.findall("folder"): id_storage = int(wf_settings.attrib["id_storage"]) rel_wf_path = wf_settings.attrib["path"] quarantine_time = int( wf_settings.attrib.get("quarantine_time", "10")) id_folder = int(wf_settings.attrib.get("id_folder", 12)) storage_path = storages[id_storage].local_path watchfolder_path = os.path.join(storage_path, rel_wf_path) if not os.path.exists(watchfolder_path): logging.warning("Skipping non-existing watchfolder", watchfolder_path) continue i = 0 for file_object in get_files( watchfolder_path, recursive=wf_settings.attrib.get("recursive", False), hidden=wf_settings.attrib.get("hidden", False), case_sensitive_exts=wf_settings.get( "case_sensitive_exts", False), ): i += 1 if i % 100 == 0 and config.get("debug_mode", False): logging.debug("{} files scanned".format(i)) if not file_object.size: continue full_path = file_object.path if full_path in self.existing: continue now = time.time() asset_path = full_path.replace(storage_path, "", 1).lstrip("/") ext = os.path.splitext(asset_path)[1].lstrip(".").lower() if ext not in FileTypes.exts(): continue asset = asset_by_path(id_storage, asset_path, db=db) if asset: self.existing.append(full_path) continue base_name = get_base_name(asset_path) if quarantine_time and now - file_object.mtime < quarantine_time: logging.debug(f"{base_name} is too young. Skipping") continue asset = Asset(db=db) asset["content_type"] = FileTypes.by_ext(ext) asset["media_type"] = MediaType.FILE asset["id_storage"] = id_storage asset["path"] = asset_path asset["ctime"] = now asset["mtime"] = now asset["status"] = ObjectStatus.CREATING asset["id_folder"] = id_folder asset["title"] = base_name asset.load_sidecar_metadata() failed = False for post_script in wf_settings.findall("post"): try: exec(post_script.text) except Exception: log_traceback( f"Error executing post-script on {asset}") failed = True if not failed: asset.save(set_mtime=False) duration = time.time() - start_time if duration > 60 or config.get("debug_mode", False): logging.debug(f"Watchfolders scanned in {s2time(duration)}")
def on_close(self, *args): self.active = False if self.should_run: logging.warning("[LISTENER] connection interrupted", handlers=False)
def main(self): storages_conf = config.get("storages", "all") db = DB() db.query("SELECT id, settings FROM storages") for id_storage, storage_settings in db.fetchall(): if type(storages_conf) == list and id_storage not in storages_conf: continue storage = Storage(id_storage, **storage_settings) if storage: storage_string = f"{config['site_name']}:{storage.id}" storage_ident_path = os.path.join(storage.local_path, ".nebula_root") if not ( os.path.exists(storage_ident_path) and storage_string in [line.strip() for line in open(storage_ident_path).readlines()] ): try: with open(storage_ident_path, "a") as f: f.write(storage_string + "\n") except Exception: if self.first_run: logging.warning(f"{storage} is mounted, but read only") else: if self.first_run: logging.info(f"{storage} is mounted and root is writable") continue s, i, lcheck = storage_status.get(id_storage, [True, 2, 0]) if not s and time.time() - lcheck < i: continue if s: logging.info(f"{storage} is not mounted. Mounting...") if not os.path.exists(storage.local_path): try: os.mkdir(storage.local_path) except Exception: if s: logging.error(f"Unable to create mountpoint for {storage}") storage_status[id_storage] = [False, 240, time.time()] continue self.mount(storage) if ismount(storage.local_path): logging.goodnews(f"{storage} mounted successfully") if id_storage not in storage_status: storage_status[id_storage] = [True, 2, 0] storage_status[id_storage][0] = True storage_status[id_storage][1] = 2 else: if s: logging.error(f"{storage} mounting failed") storage_status[id_storage][0] = False check_interval = storage_status[id_storage][1] storage_status[id_storage][1] = min(240, check_interval * 2) storage_status[id_storage][2] = time.time()