def ChangelogMenuTriggered(self, hash=""): log.info("ChangelogMenuTriggered") try: webbrowser.open(self.commit_url % hash) except: log.warning('Failed to launch web browser to %s' % self.commit_url)
def open(self, settings=None, show_errors=True): """Открыть соединение с БД""" if self.is_open(): self.close() if settings is None: settings = get_connection_settings() log.info('Подключаемся к базе данных') self.connection = QtSql.QSqlDatabase.addDatabase(settings["dbms"]) self.connection.setHostName(settings["host"]) self.connection.setPort(settings["port"]) self.connection.setDatabaseName(settings["db"]) self.connection.setUserName(settings["user"]) self.connection.setPassword(settings["password"]) ok = self.connection.open() if ok: return True else: error = ( f'Database: {self.connection.lastError().databaseText()}\n' f'Driver: {self.connection.lastError().driverText()}') log.warning(error) if show_errors: app.restoreOverrideCursor() QMessageBox.critical(app.main_window, "Ошибка!", error, QMessageBox.Close) return False
def read_from_file(self, file_path, path_mode="ignore"): """ Load JSON settings from a file """ try: with open(file_path, 'r', encoding='utf-8') as f: contents = f.read() if not contents: raise RuntimeError("Couldn't load {} file, no data.".format( self.data_type)) # Scan for and correct possible OpenShot 2.5.0 corruption if self.damage_re.search(contents) and self.version_re.search( contents): # File contains corruptions, backup and repair self.make_repair_backup(file_path, contents) # Repair lost slashes, then fix all corrupted escapes contents = self.slash_repair_re.sub(r'\1/\2', contents) contents, subs_count = self.damage_re.subn(r'\\u\1', contents) if subs_count < 1: # Nothing to do! log.info( "No recovery substitutions on {}".format(file_path)) else: # We have to de- and re-serialize the data, to complete repairs temp_data = json.loads(contents) contents = json.dumps(temp_data, ensure_ascii=False, indent=1) temp_data = {} # Save the repaired data back to the original file with open(file_path, "w", encoding="utf-8") as fout: fout.write(contents) msg_log = "Repaired {} corruptions in file {}" msg_local = self._( "Repaired {num} corruptions in file {path}") log.info(msg_log.format(subs_count, file_path)) if hasattr(self.app, "window") and hasattr( self.app.window, "statusBar"): self.app.window.statusBar.showMessage( msg_local.format(num=subs_count, path=file_path), 5000) # Process JSON data if path_mode == "absolute": # Convert any paths to absolute contents = self.convert_paths_to_absolute(file_path, contents) return json.loads(contents) except RuntimeError as ex: log.error(str(ex)) raise except Exception as ex: msg = ("Couldn't load {} file: {}".format(self.data_type, ex)) log.error(msg) raise Exception(msg) from ex msg = () log.warning(msg) raise Exception(msg)
def add_watcher(self, watcher): """ Add a new watcher (which will invoke the updateStatusChanged() method each time a 'redo' or 'undo' action is available). """ if not watcher in self.statusWatchers: self.statusWatchers.append(watcher) else: log.warning("Watcher already added.")
def json(self, is_array=False, only_value=False): """ Get the JSON string representing this UpdateAction """ # Build the dictionary to be serialized if only_value: data_dict = copy.deepcopy(self.values) else: data_dict = {"type": self.type, "key": self.key, "value": copy.deepcopy(self.values), "partial": self.partial_update, "old_values": copy.deepcopy(self.old_values)} # Always remove 'history' key (if found). This prevents nested "history" # attributes when a project dict is loaded. try: if isinstance(data_dict.get("value"), dict) and "history" in data_dict.get("value"): data_dict.get("value").pop("history", None) if isinstance(data_dict.get("old_values"), dict) and "history" in data_dict.get("old_values"): data_dict.get("old_values").pop("history", None) except Exception as ex: log.warning('Failed to clear history attribute from undo/redo data. {}'.format(ex)) if not is_array: # Use a JSON Object as the root object update_action_dict = data_dict else: # Use a JSON Array as the root object update_action_dict = [data_dict] # Serialize as JSON return json.dumps(update_action_dict)
def add_file(self, filepath): # Add file into project app = get_app() _ = app._tr # Check for this path in our existing project data # ["1F595-1F3FE", # "openshot-qt-git/src/emojis/color/svg/1F595-1F3FE.svg"] file = File.get(path=filepath) # If this file is already found, exit if file: return file # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return file except Exception as ex: # Log exception log.warning("Failed to import file: {}".format(str(ex)))
def parse_new_changelog(changelog_path): """Parse changelog data from specified new-format file.""" if not os.path.exists(changelog_path): return None changelog_list = None for encoding_name in ('utf_8', 'utf_16'): try: with codecs.open(changelog_path, 'r', encoding=encoding_name) as changelog_file: # Generate match object with fields from all matching lines matches = re.findall( r"^-\s?([0-9a-f]{40})\s(\d{4,4}-\d{2,2}-\d{2,2})\s(.*)\s\[(.*)\]\s*$", changelog_file.read(), re.MULTILINE) log.debug("Parsed {} changelog lines from {}".format(len(matches), changelog_path)) changelog_list = [{ "hash": entry[0], "date": entry[1], "subject": entry[2], "author": entry[3], } for entry in matches] except UnicodeError: log.debug('Failed to parse log file %s with encoding %s' % (changelog_path, encoding_name)) continue except Exception: log.warning("Parse error reading {}".format(changelog_path), exc_info=1) return None return changelog_list
def process_urls(self, qurl_list): """Recursively process QUrls from a QDropEvent""" import_quietly = False media_paths = [] for uri in qurl_list: filepath = uri.toLocalFile() if not os.path.exists(filepath): continue if filepath.endswith(".osp") and os.path.isfile(filepath): # Auto load project passed as argument get_app().window.OpenProjectSignal.emit(filepath) return True if os.path.isdir(filepath): import_quietly = True log.info("Recursively importing {}".format(filepath)) try: for r, _, f in os.walk(filepath): media_paths.extend( [os.path.join(r, p) for p in f]) except OSError: log.warning("Directory recursion failed", exc_info=1) elif os.path.isfile(filepath): media_paths.append(filepath) if not media_paths: return # Import all new media files media_paths.sort() log.debug("Importing file list: {}".format(media_paths)) self.add_files(media_paths, quiet=import_quietly)
def startDrag(self, event): """ Override startDrag method to display custom icon """ # Get first column indexes for all selected rows selected = self.selectionModel().selectedRows(0) # Get image of current item current = self.selectionModel().currentIndex() if not current.isValid() and selected: current = selected[0] if not current.isValid(): # We can't find anything to drag log.warning("No draggable items found in model!") return False # Get icon from column 0 on same row as current item icon = current.sibling(current.row(), 0).data(Qt.DecorationRole) # Start drag operation drag = QDrag(self) drag.setMimeData(self.model().mimeData(selected)) drag.setPixmap(icon.pixmap(QSize(self.drag_item_size, self.drag_item_size))) drag.setHotSpot(QPoint(self.drag_item_size / 2, self.drag_item_size / 2)) drag.exec_()
def send_metric(params): """Send anonymous metric over HTTP for tracking""" # Add to queue and *maybe* send if the user allows it metric_queue.append(params) # Check if the user wants to send metrics and errors if s.get("send_metrics"): for metric_params in metric_queue: url_params = urllib.parse.urlencode(metric_params) url = "https://www.google-analytics.com/collect?%s" % url_params # Send metric HTTP data try: r = requests.get(url, headers={"user-agent": user_agent}) except Exception: log.warning("Failed to track metric", exc_info=1) # Wait a moment, so we don't spam the requests time.sleep(0.25) # All metrics have been sent (or attempted to send) # Clear the queue metric_queue.clear()
def add_watcher(self, watcher): """ Add a new watcher (which will invoke the updateStatusChanged() method each time a 'redo' or 'undo' action is available). """ if not watcher in self.statusWatchers: self.statusWatchers.append(watcher) else: log.warning("Cannot add existing watcher: {}".format(str(watcher)))
def load_json(self, value): """ Load this UpdateAction from a JSON string """ # Load JSON string update_action_dict = json.loads(value, strict=False) # Set the Update Action properties self.type = update_action_dict.get("type") self.key = update_action_dict.get("key") self.values = update_action_dict.get("value") self.old_values = update_action_dict.get("old_values") self.partial_update = update_action_dict.get("partial") # Always remove 'history' key (if found). This prevents nested "history" # attributes when a project dict is loaded. try: if isinstance(self.values, dict) and "history" in self.values: self.values.pop("history", None) if isinstance(self.old_values, dict) and "history" in self.old_values: self.old_values.pop("history", None) except Exception as ex: log.warning( 'Failed to clear history attribute from undo/redo data. {}'. format(ex))
def add_listener(self, listener, index=-1): """ Add a new listener (which will invoke the changed(action) method each time an UpdateAction is available). """ if not listener in self.updateListeners: if index <= -1: # Add listener to end of list self.updateListeners.append(listener) else: # Insert listener at index self.updateListeners.insert(index, listener) else: log.warning("Listener already added.")
def onError(self, error): log.warning('Player error: %s', error) # Get translation object _ = get_app()._tr # Only JUCE audio errors bubble up here now if get_app().window.mode != "unittest": QMessageBox.warning( self.parent, _("Audio Error"), _("Please fix the following error and restart OpenShot\n%s") % error)
def testHardwareDecode(self, decoder, decoder_card="0"): """Test specific settings for hardware decode, so the UI can remove unsupported options.""" is_supported = False example_media = os.path.join(info.RESOURCES_PATH, "hardware-example.mp4") # Persist decoder card results if decoder_card not in self.hardware_tests_cards: # Init new decoder card list self.hardware_tests_cards[decoder_card] = [] if int(decoder) in self.hardware_tests_cards.get(decoder_card): # Test already run and succeeded return True # Keep track of previous settings current_decoder = openshot.Settings.Instance().HARDWARE_DECODER current_decoder_card = openshot.Settings.Instance().HW_DE_DEVICE_SET try: # Temp override hardware settings (to test them) openshot.Settings.Instance().HARDWARE_DECODER = int(decoder) openshot.Settings.Instance().HW_DE_DEVICE_SET = int(decoder_card) # Find reader clip = openshot.Clip(example_media) reader = clip.Reader() # Open reader reader.Open() # Test decoded pixel values for a valid decode (based on hardware-example.mp4) if reader.GetFrame(0).CheckPixel(0, 0, 2, 133, 255, 255, 5): is_supported = True self.hardware_tests_cards[decoder_card].append(int(decoder)) else: log.warning( "CheckPixel failed testing hardware decoding in preferences (i.e. wrong color found): %s-%s" % (decoder, decoder_card)) reader.Close() clip.Close() except: log.warning( "Exception trying to test hardware decoding in preferences (this is expected): %s-%s" % (decoder, decoder_card)) # Resume current settings openshot.Settings.Instance().HARDWARE_DECODER = current_decoder openshot.Settings.Instance().HW_DE_DEVICE_SET = current_decoder_card return is_supported
def add_listener(self, listener, index=-1): """ Add a new listener (which will invoke the changed(action) method each time an UpdateAction is available). """ if listener not in self.updateListeners: if index <= -1: # Add listener to end of list self.updateListeners.append(listener) else: # Insert listener at index self.updateListeners.insert(index, listener) else: log.warning("Cannot add existing listener: {}".format(str(listener)))
def read_from_file(self, file_path): """ Load JSON settings from a file """ # log.debug("loading {}".format(file_path)) try: with open(file_path.encode('UTF-8'), 'r') as f: contents = f.read() if contents: # log.debug("loaded", contents) return json.loads(contents) except Exception as ex: msg = ("Couldn't load {} file: {}".format(self.data_type, ex)) log.error(msg) raise Exception(msg) msg = ("Couldn't load {} file, no data.".format(self.data_type)) log.warning(msg) raise Exception(msg)
def read_from_file(self, file_path, path_mode="ignore"): """ Load JSON settings from a file """ try: with open(file_path, 'r') as f: contents = f.read() if contents: if path_mode == "absolute": # Convert any paths to absolute contents = self.convert_paths_to_absolute(file_path, contents) return json.loads(contents, strict=False) except Exception as ex: msg = ("Couldn't load {} file: {}".format(self.data_type, ex)) log.error(msg) raise Exception(msg) msg = ("Couldn't load {} file, no data.".format(self.data_type)) log.warning(msg) raise Exception(msg)
def delayed_fps_callback(self): """Callback for fps/profile changed event timer (to delay the timeline mapping so we don't spam libopenshot)""" # Stop timer self.delayed_fps_timer.stop() # Calculate fps fps_double = self.timeline.info.fps.ToDouble() # Apply mapping if valid fps detected (anything larger than 300 fps is considered invalid) if self.timeline and fps_double <= 300.0: log.info("Valid framerate detected, sending to libopenshot: %s" % fps_double) self.timeline.ApplyMapperToClips() else: log.warning( "Invalid framerate detected, not sending it to libopenshot: %s" % fps_double)
def color_selected(self, newColor): """Callback when the user chooses a color in the dialog""" if not self._color_scratchpad: log.warning("ColorPicker callback called without parameter to set") return (widget, param) = self._color_scratchpad if not newColor or not newColor.isValid(): return widget.setStyleSheet("background-color: {}".format(newColor.name())) self.params[param["name"]] = [ newColor.redF(), newColor.greenF(), newColor.blueF() ] if "diffuse_color" in param.get("name"): self.params[param["name"]].append(newColor.alphaF()) log.info('Animation param %s set to %s', param["name"], newColor.name())
def run(self): # Running self.running = True # Get settings s = get_app().get_settings() # Get port from settings port = s.get("debug-port") debug_enabled = s.get("debug-mode") # Set port on ZmqLogger singleton openshot.ZmqLogger.Instance().Connection("tcp://*:%s" % port) # Set filepath for ZmqLogger also openshot.ZmqLogger.Instance().Path( os.path.join(info.USER_PATH, 'libopenshot.log')) # Enable / Disable logger openshot.ZmqLogger.Instance().Enable(debug_enabled) # Socket to talk to server self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt_string(zmq.SUBSCRIBE, '') poller = zmq.Poller() poller.register(self.socket, zmq.POLLIN) log.info("Connecting to libopenshot with debug port: %s" % port) self.socket.connect("tcp://localhost:%s" % port) while self.running: msg = None # Receive all debug message sent from libopenshot (if any) try: socks = dict(poller.poll(1000)) if socks and socks.get(self.socket) == zmq.POLLIN: msg = self.socket.recv(zmq.NOBLOCK) if msg: log.info(msg.strip().decode('UTF-8')) except Exception as ex: log.warning(ex)
def accept(self): login = self.edt_login.text() password = self.edt_password.text() app.settings.setValue("registration/login", login) if not app.db.is_open(): app.db.open() if app.db.is_open(): user_id = app.db.get_user_id(login, password) if user_id: app.user_is_registered.emit(user_id) QDialog.accept(self) else: log.warning('Введен неправильный логин или пароль') self.edt_password.clear() self.edt_password.setFocus() QMessageBox.critical(self, "Ошибка входа в систему", "Имя пользователя или пароль неверны.", QMessageBox.Close)
def parse_changelog(changelog_path): """Read changelog entries from provided file path.""" changelog_list = [] if not os.path.exists(changelog_path): return None # Attempt to open changelog with utf-8, and then utf-16 (for unix / windows support) for encoding_name in ('utf_8', 'utf_16'): try: with codecs.open( changelog_path, 'r', encoding=encoding_name ) as changelog_file: for line in changelog_file: changelog_list.append({ 'hash': line[:9].strip(), 'date': line[9:20].strip(), 'author': line[20:45].strip(), 'subject': line[45:].strip(), }) break except Exception: log.warning('Failed to parse log file %s with encoding %s' % (changelog_path, encoding_name)) return changelog_list
def run_js(self, code, callback=None, retries=0): """Run JS code async and optionally have a callback for response""" # Check if document.Ready has fired in JS if not self.document_is_ready: # Not ready, try again in a few moments if retries == 0: # Log the script contents, the first time log.debug( "run_js() called before document ready event. Script queued: %s", code) elif retries % 5 == 0: log.warning( "WebKit backend still not ready after %d retries.", retries) else: log.debug("Script queued, %d retries so far", retries) QTimer.singleShot(200, partial(self.run_js, code, callback, retries + 1)) return None # Execute JS code if callback: # Pass output to callback callback(self.page().mainFrame().evaluateJavaScript(code)) else: return self.page().mainFrame().evaluateJavaScript(code)
def GenerateThumbnail(file_path, thumb_path, thumbnail_frame, width, height, mask, overlay): """Create thumbnail image, and check for rotate metadata (if any)""" # Create a clip object and get the reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Get the 'rotate' metadata (if any) rotate = 0.0 try: if reader.info.metadata.count("rotate"): rotate_data = reader.info.metadata.find("rotate").value()[1] rotate = float(rotate_data) except ValueError as ex: log.warning("Could not parse rotation value {}: {}".format( rotate_data, ex)) except Exception: log.warning( "Error reading rotation metadata from {}".format(file_path), exc_info=1) # Create thumbnail folder (if needed) parent_path = os.path.dirname(thumb_path) if not os.path.exists(parent_path): os.mkdir(parent_path) # Save thumbnail image and close readers reader.GetFrame(thumbnail_frame).Thumbnail(thumb_path, width, height, mask, overlay, "#000", False, "png", 85, rotate) reader.Close() clip.Close()
def create_clip(context, track): """Create a new clip based on this context dict""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file( context.get("clip_path", "")) if is_skipped: return # Get video context video_ctx = context.get("AX", {}).get("V", {}) audio_ctx = context.get("AX", {}).get("A", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = context.get("clip_path", "") clip.data["layer"] = track.data.get("number", 1000000) if video_ctx and not audio_ctx: # Only video clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_audio"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable audio }, "interpolation": 2 }] } elif audio_ctx and not video_ctx: # Only audio clip.data["position"] = timecodeToSeconds( audio_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( audio_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( audio_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_video"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable video }, "interpolation": 2 }] } else: # Both video and audio clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) # Add volume keyframes if context.get("volume"): clip.data["volume"] = {"Points": []} for keyframe in context.get("volume", []): clip.data["volume"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Add alpha keyframes if context.get("opacity"): clip.data["alpha"] = {"Points": []} for keyframe in context.get("opacity", []): clip.data["alpha"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Save clip clip.save()
# Manually and missing files (that were missed in the above step). These files are required # for certain distros (like Fedora, ShSuSE, Debian, etc...) #Also add Glib related files (required for some distros) for added_lib in [ARCHLIB + "libssl.so", ARCHLIB + "libcrypto.so", ARCHLIB + "libglib-2.0.so", ARCHLIB + "libgio-2.0.so", ARCHLIB + "libgmodule-2.0.so", ARCHLIB + "libthread-2.0.so", ]: if os.path.exists(added_lib): external_so_files.append((added_lib, os.path.basename(added_lib))) else: log.warning("{}: not found, skipping".format(added_lib)) elif sys.platform == "darwin": # Copy Mac specific files that cx_freeze misses # JPEG library for filename in find_files("/usr/local/Cellar/jpeg/8d/lib", ["libjpeg.8.dylib"]): external_so_files.append((filename, filename.replace("/usr/local/Cellar/jpeg/8d/lib/", ""))) #Add libresvg (if found) resvg_path = "/usr/local/lib/libresvg.dylib" if os.path.exists(resvg_path): external_so_files.append(resvg_path, resvg_path.replace("/usr/local/lib/", ""))) #copy Shvideo.py Python bindings src_files.append((os.path.join(PATH, "Shvideo.py"), "Shvideo.py")) src_files.append((os.path.join(PATH, "installer", "launch-mac.sh"), "launch-mac.sh"))
def get(self, key): """ Get copied value of a given key in data store """ # Verify key is valid type if not isinstance(key, list): log.warning("get() key must be a list. key: {}".format(key)) return None if not key: log.warning("Cannot get empty key.") return None # Get reference to internal data structure obj = self._data # Iterate through key list finding sub-objects either by name or by an object match criteria such as {"id":"ADB34"}. for key_index in range(len(key)): key_part = key[key_index] # Key_part must be a string or dictionary if not isinstance(key_part, dict) and not isinstance(key_part, str): log.error("Unexpected key part type: {}".format(type(key_part).__name__)) return None # If key_part is a dictionary and obj is a list or dict, each key is tested as a property of the items in the current object # in the project data structure, and the first match is returned. if isinstance(key_part, dict) and isinstance(obj, list): # Overall status of finding a matching sub-object found = False # Loop through each item in object to find match for item_index in range(len(obj)): item = obj[item_index] # True until something disqualifies this as a match match = True # Check each key in key_part dictionary and if not found to be equal as a property in item, move on to next item in list for subkey in key_part.keys(): # Get each key in dictionary (i.e. "id", "layer", etc...) subkey = subkey.lower() # If object is missing the key or the values differ, then it doesn't match. if not (subkey in item and item[subkey] == key_part[subkey]): match = False break # If matched, set key_part to index of list or dict and stop loop if match: found = True obj = item break # No match found, return None if not found: return None # If key_part is a string, homogenize to lower case for comparisons if isinstance(key_part, str): key_part = key_part.lower() # Check current obj type (should be dictionary) if not isinstance(obj, dict): log.warn( "Invalid project data structure. Trying to use a key on a non-dictionary object. Key part: {} (\"{}\").\nKey: {}".format( (key_index), key_part, key)) return None # If next part of path isn't in current dictionary, return failure if not key_part in obj: log.warn("Key not found in project. Mismatch on key part {} (\"{}\").\nKey: {}".format((key_index), key_part, key)) return None # Get the matching item obj = obj[key_part] # After processing each key, we've found object, return copy of it return copy.deepcopy(obj)
def _set(self, key, values=None, add=False, partial_update=False, remove=False): """ Store setting, but adding isn't allowed. All possible settings must be in default settings file. """ log.info( "_set key: {} values: {} add: {} partial: {} remove: {}".format(key, values, add, partial_update, remove)) parent, my_key = None, "" # Verify key is valid type if not isinstance(key, list): log.warning("_set() key must be a list. key: {}".format(key)) return None if not key: log.warning("Cannot set empty key.") return None # Get reference to internal data structure obj = self._data # Iterate through key list finding sub-objects either by name or by an object match criteria such as {"id":"ADB34"}. for key_index in range(len(key)): key_part = key[key_index] # Key_part must be a string or dictionary if not isinstance(key_part, dict) and not isinstance(key_part, str): log.error("Unexpected key part type: {}".format(type(key_part).__name__)) return None # If key_part is a dictionary and obj is a list or dict, each key is tested as a property of the items in the current object # in the project data structure, and the first match is returned. if isinstance(key_part, dict) and isinstance(obj, list): # Overall status of finding a matching sub-object found = False # Loop through each item in object to find match for item_index in range(len(obj)): item = obj[item_index] # True until something disqualifies this as a match match = True # Check each key in key_part dictionary and if not found to be equal as a property in item, move on to next item in list for subkey in key_part.keys(): # Get each key in dictionary (i.e. "id", "layer", etc...) subkey = subkey.lower() # If object is missing the key or the values differ, then it doesn't match. if not (subkey in item and item[subkey] == key_part[subkey]): match = False break # If matched, set key_part to index of list or dict and stop loop if match: found = True obj = item my_key = item_index break # No match found, return None if not found: return None # If key_part is a string, homogenize to lower case for comparisons if isinstance(key_part, str): key_part = key_part.lower() # Check current obj type (should be dictionary) if not isinstance(obj, dict): return None # If next part of path isn't in current dictionary, return failure if not key_part in obj: log.warn("Key not found in project. Mismatch on key part {} (\"{}\").\nKey: {}".format((key_index), key_part, key)) return None # Get sub-object based on part key as new object, continue to next part obj = obj[key_part] my_key = key_part # Set parent to the last set obj (if not final iteration) if key_index < (len(key) - 1) or key_index == 0: parent = obj # After processing each key, we've found object and parent, return former value/s on update ret = copy.deepcopy(obj) # Apply the correct action to the found item if remove: del parent[my_key] else: # Add or Full Update # For adds to list perform an insert to index or the end if not specified if add and isinstance(parent, list): # log.info("adding to list") parent.append(values) # Otherwise, set the given index elif isinstance(values, dict): # Update existing dictionary value obj.update(values) else: # Update root string self._data[my_key] = values # Return the previous value to the matching item (used for history tracking) return ret
def add_file(self, filepath): filename = os.path.basename(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name folderName = os.path.basename(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader().info.video_length # Save file file.save() # Reset list of ignored paths self.ignore_image_sequence_paths = [] return True except Exception as ex: # Log exception log.warning("Failed to import file: {}".format(str(ex))) # Show message to user msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def _set(self, key, values=None, add=False, partial_update=False, remove=False): """ Store setting, but adding isn't allowed. All possible settings must be in default settings file. """ log.info( "_set key: {} values: {} add: {} partial: {} remove: {}".format( key, values, add, partial_update, remove)) parent, my_key = None, "" # Verify key is valid type if not isinstance(key, list): log.warning("_set() key must be a list. key: {}".format(key)) return None if not key: log.warning("Cannot set empty key.") return None # Get reference to internal data structure obj = self._data # Iterate through key list finding sub-objects either by name or by an object match criteria such as {"id":"ADB34"}. for key_index in range(len(key)): key_part = key[key_index] # Key_part must be a string or dictionary if not isinstance(key_part, dict) and not isinstance( key_part, str): log.error("Unexpected key part type: {}".format( type(key_part).__name__)) return None # If key_part is a dictionary and obj is a list or dict, each key is tested as a property of the items in the current object # in the project data structure, and the first match is returned. if isinstance(key_part, dict) and isinstance(obj, list): # Overall status of finding a matching sub-object found = False # Loop through each item in object to find match for item_index in range(len(obj)): item = obj[item_index] # True until something disqualifies this as a match match = True # Check each key in key_part dictionary and if not found to be equal as a property in item, move on to next item in list for subkey in key_part.keys(): # Get each key in dictionary (i.e. "id", "layer", etc...) subkey = subkey.lower() # If object is missing the key or the values differ, then it doesn't match. if not (subkey in item and item[subkey] == key_part[subkey]): match = False break # If matched, set key_part to index of list or dict and stop loop if match: found = True obj = item my_key = item_index break # No match found, return None if not found: return None # If key_part is a string, homogenize to lower case for comparisons if isinstance(key_part, str): key_part = key_part.lower() # Check current obj type (should be dictionary) if not isinstance(obj, dict): return None # If next part of path isn't in current dictionary, return failure if not key_part in obj: log.warn( "Key not found in project. Mismatch on key part {} (\"{}\").\nKey: {}" .format((key_index), key_part, key)) return None # Get sub-object based on part key as new object, continue to next part obj = obj[key_part] my_key = key_part # Set parent to the last set obj (if not final iteration) if key_index < (len(key) - 1) or key_index == 0: parent = obj # After processing each key, we've found object and parent, return former value/s on update ret = copy.deepcopy(obj) # Apply the correct action to the found item if remove: del parent[my_key] else: # Add or Full Update # For adds to list perform an insert to index or the end if not specified if add and isinstance(parent, list): # log.info("adding to list") parent.append(values) # Otherwise, set the given index elif isinstance(values, dict): # Update existing dictionary value obj.update(values) else: # Update root string self._data[my_key] = values # Return the previous value to the matching item (used for history tracking) return ret
def get(self, key): """ Get copied value of a given key in data store """ # Verify key is valid type if not isinstance(key, list): log.warning("get() key must be a list. key: {}".format(key)) return None if not key: log.warning("Cannot get empty key.") return None # Get reference to internal data structure obj = self._data # Iterate through key list finding sub-objects either by name or by an object match criteria such as {"id":"ADB34"}. for key_index in range(len(key)): key_part = key[key_index] # Key_part must be a string or dictionary if not isinstance(key_part, dict) and not isinstance( key_part, str): log.error("Unexpected key part type: {}".format( type(key_part).__name__)) return None # If key_part is a dictionary and obj is a list or dict, each key is tested as a property of the items in the current object # in the project data structure, and the first match is returned. if isinstance(key_part, dict) and isinstance(obj, list): # Overall status of finding a matching sub-object found = False # Loop through each item in object to find match for item_index in range(len(obj)): item = obj[item_index] # True until something disqualifies this as a match match = True # Check each key in key_part dictionary and if not found to be equal as a property in item, move on to next item in list for subkey in key_part.keys(): # Get each key in dictionary (i.e. "id", "layer", etc...) subkey = subkey.lower() # If object is missing the key or the values differ, then it doesn't match. if not (subkey in item and item[subkey] == key_part[subkey]): match = False break # If matched, set key_part to index of list or dict and stop loop if match: found = True obj = item break # No match found, return None if not found: return None # If key_part is a string, homogenize to lower case for comparisons if isinstance(key_part, str): key_part = key_part.lower() # Check current obj type (should be dictionary) if not isinstance(obj, dict): log.warn( "Invalid project data structure. Trying to use a key on a non-dictionary object. Key part: {} (\"{}\").\nKey: {}" .format((key_index), key_part, key)) return None # If next part of path isn't in current dictionary, return failure if not key_part in obj: log.warn( "Key not found in project. Mismatch on key part {} (\"{}\").\nKey: {}" .format((key_index), key_part, key)) return None # Get the matching item obj = obj[key_part] # After processing each key, we've found object, return copy of it return copy.deepcopy(obj)