def get_user(self): try: with Session() as session: self.user = session.query(User).one() if check_internet(): self.user = AuthClient().populate_user_data(self.user) logger.warning('Could not update user information') with Session() as session: session.add(self.user) session.commit() except AuthError: self.usernameEdit.setText(self.user.login) self.passwordEdit.setFocus() except NoResultFound: self.usernameEdit.setFocus() else: # Add the user id of the logged in user to Sentry logs add_user_to_sentry_logs() return self.user self.exec_() if self.user: # Add the user id of the logged in user to Sentry logs add_user_to_sentry_logs() return self.user
def set_containing_folder(self, save_setting=False): with Session() as session: user = session.query(User).one() logger.warning('Changing containing folder') res = QFileDialog.getExistingDirectory( caption='Choose where to place OSF folder') if not res: return containing_folder = os.path.abspath(res) folder = os.path.join(containing_folder, 'OSF') if save_setting: logger.debug('Copy files into new OSF folder.') # copy the synced folders from the old to new location # so OSF doesn't think they were deleted copy_tree(user.folder, folder) user.folder = folder self.containingFolderTextEdit.setText( self._translate("Preferences", self.containing_folder)) self.open_window( tab=Preferences.GENERAL) # todo: dynamically update ui???? self.containing_folder_updated_signal.emit(folder) if save_setting: with Session() as session: session.add(user) session.commit() self.update_sync_nodes()
def collect_all_remote(self): ret = {} with ThreadPoolExecutor(max_workers=5) as tpe: with Session() as session: nodes = session.query(Node).filter(Node.sync) # first get top level nodes selected in settings for node in nodes: try: remote_node = OSFClient().get_node(node.id) except Exception as e: # If the node can't be reached, skip auditing of this project and go on to the next node # TODO: The client should be made smart enough to check return code before parsing and yield a custom exception # TODO: The user should be notified about projects that failed to sync, and given a way to deselect them self._unreachable.append(node.id) logger.exception( 'Could not fetch Remote node {!r}. Marking as unreachable.' .format(node)) continue remote_files = remote_node.get_storage(id='osfstorage') rel_path = os.path.join(node.rel_path, settings.OSF_STORAGE_FOLDER) tpe.submit(self._collect_node_remote, remote_files, ret, rel_path, tpe) try: stack = remote_node.get_children(lazy=False) except Exception as e: # If the node can't be reached, skip auditing of this project and go on to the next node # TODO: The client should be made smart enough to check return code before parsing and yield a custom exception # TODO: The user should be notified about projects that failed to sync, and given a way to deselect them self._unreachable.append(node.id) logger.exception( 'Could not fetch Remote node {!r}\'s children. Marking as unreachable.' .format(node)) continue while len(stack): remote_child = stack.pop(0) child_files = remote_child.get_storage(id='osfstorage') # RemoteSyncWorker's _preprocess_node guarantees a db entry exists # for each Node in the remote project hierarchy. Use the db Node's # path representation to ensure consistent path naming conventions. with Session() as session: child_path = session.query(Node).filter( Node.id == remote_child.id).one().rel_path tpe.submit( self._collect_node_remote, child_files, ret, os.path.join(child_path, settings.OSF_STORAGE_FOLDER), tpe) try: stack = stack + remote_child.get_children(lazy=False) except Exception as e: # If the node can't be reached, skip auditing of this project and go on to the next node # TODO: The client should be made smart enough to check return code before parsing and yield a custom exception # TODO: The user should be notified about projects that failed to sync, and given a way to deselect them logger.exception(e) continue tpe._work_queue.join() return ret
def handle_move_src_update(local, remote, local_events, remote_events): os.makedirs(remote.dest_path, exist_ok=True) local_events.pop(local.src_path) remote_events.pop(remote.src_path) remote_events.pop(remote.dest_path) for child in set(local_events.keys()) | set(remote_events.keys()): if not child.startswith(remote.src_path) and not child.startswith( remote.dest_path): continue if os.path.sep in child.replace(remote.src_path, '', 1).rstrip( os.path.sep) and os.path.sep in child.replace( remote.dest_path, '', 1).rstrip(os.path.sep): continue llocal, lremote = local_events.pop(child, None), remote_events.pop( child, None) if child not in (getattr(llocal, 'src_path', None), getattr(lremote, 'src_path', None)): continue if lremote and not llocal: lremote.operation().run() elif child.endswith(os.path.sep): handle_move_src_update(llocal, lremote, local_events, remote_events) with Session() as session: session.delete(local.context.db) session.commit() return True
def populate_item_tree(self, nodes): _translate = QCoreApplication.translate self.selected_nodes = [] with Session() as session: all_selected_nodes = [n.id for n in session.query(Node)] for n in session.query(Node): if n.parent_id not in all_selected_nodes and n.id not in self.selected_nodes: self.selected_nodes.append(n.id) for node in sorted(nodes, key=lambda n: n.title): tree_item = QTreeWidgetItem(self.treeWidget) tree_item.setCheckState( self.PROJECT_SYNC_COLUMN, Qt.Checked if node.id in self.selected_nodes else Qt.Unchecked) tree_item.setText( self.PROJECT_NAME_COLUMN, _translate('Preferences', '{} - {}'.format(node.title, node.id))) self.tree_items.append((tree_item, node)) self.treeWidget.resizeColumnToContents(self.PROJECT_SYNC_COLUMN) self.treeWidget.resizeColumnToContents(self.PROJECT_NAME_COLUMN) self.treeWidget.unsetCursor() self.treeWidget.setStyleSheet("background-color: white") self.changeFolderButton_2.setEnabled(True) self.pushButton.setEnabled(True) self.pushButton_2.setEnabled(True)
def update_sync_nodes(self): self.selected_nodes = [] with Session() as session: user = session.query(User).one() for tree_item, node in self.tree_items: checked = tree_item.checkState( self.PROJECT_SYNC_COLUMN) == Qt.Checked try: db_node = session.query(Node).filter( Node.id == node.id).one() except NoResultFound: db_node = None if checked: self.selected_nodes.append(node.id) if not db_node: session.add( Node(id=node.id, title=node.title, user=user, sync=True)) else: db_node.sync = True elif db_node: session.delete(db_node) session.commit() BackgroundHandler().sync_now() self.close()
def fork_file(local, remote, local_events, remote_events): del remote_events[remote.dest_path] with Session() as session: session.remove(local.context.db) session.commit() from osfoffline.sync.remote import RemoteSyncWorker RemoteSyncWorker().sync_now()
def get_current_user(): """ Fetch the database object representing the currently active user :return: A user object (raises exception if none found) :rtype: models.User :raises SQLAlchemyError """ with Session() as session: return session.query(models.User).one()
def _run(self): with Session() as session: db_parent = session.query(models.File).filter( models.File.id == self.remote.parent.id).one() # TODO folder and file with same name os.mkdir(os.path.join(db_parent.path, self.remote.name)) DatabaseCreateFolder( OperationContext(remote=self.remote, node=self.node)).run() Notification().info('Downloaded Folder: {}'.format( self.db.pretty_path))
def initialize(self): logger.info('Beginning initial sync') with Session() as session: nodes = session.query(Node).filter(Node.sync).all() for node in nodes: self._preprocess_node(node) # session.commit() # TODO No need for this check self._check() logger.info('Initial sync finished')
def collect_all_db(self): if self._unreachable: logger.warning( 'Not collecting database structure for unreachable nodes {}'. format(self._unreachable)) with Session() as session: return { entry.rel_path: Audit(entry.id, entry.sha256, entry) for entry in session.query(File).filter( ~File.node_id.in_(self._unreachable)) }
def _preprocess_node(self, node, *, delete=True): with Session() as session: nodes = [node] try: remote_node = OSFClient().get_node(node.id) except ClientLoadError as err: if err.status in (http.client.NOT_FOUND, http.client.GONE): # cascade should automagically delete children session.delete(node) session.commit() logger.info( "Remote Node<{}> appears to have been deleted; will stop tracking and delete from local database".format( node.id)) return else: # TODO: maybe handle other statuses here raise stack = remote_node.get_children(lazy=False) self._orphan_children(node, stack) while len(stack): child = stack.pop(0) # Ensure the database contains a Node record for each node in the project heirarchy. # This must guarentee the remote/database representations of the project heirarchy are # fully congruent. # TODO: If we want to support syncing only subsets of the project heirarchy then some # additional logic could be added here to skip over certain nodes. try: db_child = session.query(Node).filter( Node.id == child.id ).one() except NoResultFound: # Setting sync=False notes that the node is implicity synced parent = session.query(Node).filter( Node.id == child.parent.id ).one() db_child = Node( id=child.id, title=child.title, user=node.user, parent_id=parent.id ) session.add(db_child) session.commit() nodes.append(db_child) children = child.get_children(lazy=False) self._orphan_children(db_child, children) stack = stack + children for node in nodes: local = Path(os.path.join(node.path, settings.OSF_STORAGE_FOLDER)) if delete and not local.exists(): logger.warning('Clearing files for node {}'.format(node)) session.query(File).filter(File.node_id == node.id).delete() os.makedirs(str(local), exist_ok=True)
def _run(self): parent = self.remote.parent.id if self.remote.parent else None self.db.name = self.remote.name self.db.kind = self.remote.kind self.db.provider = self.remote.provider self.db.user = get_current_user() self.db.parent_id = parent self.db.node_id = self.node.id with Session() as session: session.add(self.db) session.commit()
def _run(self): parent = self.remote.parent.id if self.remote.parent else None with Session() as session: session.add( models.File(id=self.remote.id, name=self.remote.name, kind=self.remote.kind, provider=self.remote.provider, user=get_current_user(), parent_id=parent, node_id=self.node.id)) session.commit()
def db(self): if self._db: return self._db if self._local: self._db = utils.local_to_db(self._local, self.node, is_folder=self._is_folder, check_is_folder=self._check_is_folder) elif self._remote: with Session() as session: self._db = session.query(models.File).filter( models.File.id == self._remote.id).one() return self._db
def open_folder(self): with Session() as session: user = session.query(User).one() logger.debug("containing folder is :{}".format(user.folder)) if validate_containing_folder(user.folder): if sys.platform == 'win32': os.startfile(user.folder) elif sys.platform == 'darwin': subprocess.Popen(['open', user.folder]) else: try: subprocess.Popen(['xdg-open', user.folder]) except OSError: pass
def local_to_db(local, node, *, is_folder=False, check_is_folder=True): with Session() as session: db = session.query(models.File).filter( models.File.parent == None, models.File.node == node).one() # noqa parts = str(local).replace(node.path, '').split(os.path.sep) for part in parts: for child in db.children: if child.name == part: db = child if db.path.rstrip(os.path.sep) != str(local).rstrip( os.path.sep) or (check_is_folder and db.is_folder != (local.is_dir() or is_folder)): return None return db
def _reset_database(self): drop_db() user = models.User(id='fake_user_id', full_name='fake full name', login='******', oauth_token='fake_personal_access_token', folder=str(self.root_dir)) node = models.Node(id=self.PROJECT_STRUCTURE[0]['id'], title=self.PROJECT_STRUCTURE[0]['name'], sync=True, user_id=user.id) with Session() as session: session.add(user) session.add(node) session.commit()
def quit(self): BackgroundHandler().stop() with Session() as session: try: user = session.query(User).one() except NoResultFound: pass else: logger.debug('Saving user data') session.add(user) session.commit() session.close() logger.info('Quitting application') QApplication.instance().quit()
def _run(self): parent = self.remote.parent.id if self.remote.parent else None self.db.name = self.remote.name self.db.kind = self.remote.kind self.db.provider = self.remote.provider self.db.user = get_current_user() self.db.parent_id = parent self.db.node_id = self.node.id self.db.size = self.remote.size self.db.md5 = self.remote.extra['hashes']['md5'] self.db.sha256 = self.remote.extra['hashes']['sha256'] with Session() as session: session.add(self.db) session.commit()
def ensure_folder(self, user): containing_folder = os.path.dirname(user.folder or '') while not validate_containing_folder(containing_folder): logger.warning('Invalid containing folder: "{}"'.format(containing_folder)) res = QFileDialog.getExistingDirectory(caption='Choose where to place OSF folder') if not res: # Do not accept an empty string (dialog box dismissed without selection) # FIXME: This fixes overt errors, but user gets folder picker endlessly until they select a folder continue else: containing_folder = os.path.abspath(res) with Session() as session: user.folder = os.path.join(containing_folder, 'OSF') os.makedirs(user.folder, exist_ok=True) session.add(user) session.commit()
def logout(self): BackgroundHandler().stop() OSFClient().stop() # Will probably wipe out everything :shrug: drop_db() # Clear any user-specific context data that would be sent to Sentry remove_user_from_sentry_logs() # if the preferences window is active, close it. if self._context_menu.preferences.isVisible(): self._context_menu.preferences.close() with Session() as session: session.close() logger.info('Restart the application.') self.start()
def _orphan_children(self, node, remote_children): """It's a hard world out there... Delete the database record for any descendant not mirrored remotely. Via cascade this will also remove any descedant Nodes and Files. The effect of this action is that any files associated with a child Node locally for which the remote Node has been deleted are explicitly removed from OSFO's auditing and will be ignored. """ children_ids = [c.id for c in remote_children] for record in node.children: if record.id not in children_ids: with Session() as session: session.delete(record) session.commit() logger.info("Deleted remotely deleted database Node<{}>".format(record.id)) else: remote_child = OSFClient().get_node(record.id) self._orphan_children(record, remote_child.get_children(lazy=False))
def _run(self): parent = self.remote.parent.id if self.remote.parent else None with Session() as session: session.add( models.File( id=self.remote.id, name=self.remote.name, kind=self.remote.kind, provider=self.remote.provider, user=get_current_user(), parent_id=parent, node_id=self.node.id, size=self.remote.size, md5=self.remote.extra['hashes']['md5'], sha256=self.remote.extra['hashes']['sha256'], )) session.commit()
def run(self): while not self.__stop.is_set(): # Note: CHECK_INTERVAL must be < 24 hours logger.info('Sleeping for {} seconds'.format(settings.REMOTE_CHECK_INTERVAL)) if self._sync_now_event.wait(timeout=settings.REMOTE_CHECK_INTERVAL): if self.__stop.is_set(): break logger.info('Sleep interrupted, syncing now') self._sync_now_event.clear() logger.info('Beginning remote sync') LocalSyncWorker().ignore.set() # Ensure selected node directories exist and db entries created with Session() as session: nodes = session.query(Node).filter(Node.sync).all() for node in nodes: try: self._preprocess_node(node, delete=False) except OSError: # TODO: If the node folder cannot be created, what further actions must be taken before attempting to sync? # TODO: Should the error be user-facing? logger.exception('Error creating node directory for sync') # Session().commit() OperationWorker().join_queue() try: self._check() except: # TODO: Add user-facing notification? msg = 'Error encountered in remote sync operation; will try again later' Notification().error(msg) logger.exception(msg) # We need to ignore modifications to the local filesystem made by the RemoteSyncWorker. # Since there can be a delay between when an operation is popped off the OperationWorker's # queue and when the event is actually captured by watchdog, this sleep tries to ensure the # watchdog observer does not capture any events triggered by the application itself. time.sleep(10) LocalSyncWorker().ignore.clear() logger.info('Finished remote sync') logger.info('Stopped RemoteSyncWorker')
def _run(self): resp = OSFClient().request('DELETE', self.remote.raw['links']['delete']) with Session() as session: db_model = session.query( models.File).filter(models.File.id == self.remote.id).one() if resp.status_code == http.client.FORBIDDEN: permission_error_notification(db_model.kind.lower(), self.remote.name, self.node.title) else: assert resp.status_code == http.client.NO_CONTENT, resp Notification().info('Deleted {}: {} in {}'.format( db_model.kind.capitalize(), db_model.pretty_path, self.node.title)) # Always delete the database record. There are two cases: # 1. User can write, and the remote file is deleted # 2. User can not write, but has deleted a local file. Forgetting the database record means that file # will get re-synced later DatabaseDelete(OperationContext(db=db_model)).run()
def _run(self): if self.db is None and self.remote is None: # If a file is an ignored name and get renamed to a not ignored name # it will trigger a move but not exist anywhere else logger.debug( 'Source file does not exist; will run create operation instead' ) return RemoteCreateFile(self._dest_context).run() dest_parent = OperationContext(local=self._dest_context.local.parent) resp = OSFClient().request( 'POST', self.remote.raw['links']['move'], json={ 'action': 'move', 'path': dest_parent.db.osf_path if dest_parent.db.parent else '/', 'rename': self._dest_context.local.name, 'resource': self._dest_context.node.id, }) data = resp.json() if resp.status_code == http.client.FORBIDDEN: permission_error_notification( 'folder' if self._dest_context.local.is_dir else 'file', self._dest_context.local.name, self._dest_context.node.title) # Delete the database record. DatabaseDelete(OperationContext(db=self.db)).run() else: assert resp.status_code in (http.client.CREATED, http.client.OK), resp remote = osf_client.File(None, data['data']) # WB id are <provider>/<id> remote.id = remote.id.replace(remote.provider + '/', '') with Session() as session: remote.parent = session.query(models.File).filter( models.File.id == dest_parent.db.id).one() self.DB_CLASS( OperationContext(remote=remote, db=self.db, node=remote.parent.node)).run()
def extract_node(path): """Given a file path extract the node id and return the loaded Database object Visual, how this method works: '/root/OSF/Node - 1244/Components/Node - 1482/OSF Storage/OSF Storage/OSF Storage/file.txt' '/OSF/Node - 1244/Components/Node - 1482/OSF Storage/OSF Storage/OSF Storage/file.txt' ['/OSF/Node - 1244/Components/Node - 1482/', '', '', '/file.txt'] '/OSF/Node - 1244/Components/Node - 1482/' ['Node - 1244', 'Components', 'Node - 1482'] 'Node - 1482' 1482 """ node_id = path.replace(get_current_user().folder, '').split( settings.OSF_STORAGE_FOLDER)[0].strip(os.path.sep).split( os.path.sep)[-1].split(' - ')[-1] try: with Session() as session: return session.query( models.Node).filter(models.Node.id == node_id).one() except NoResultFound: raise NodeNotFound(path)
def closeEvent(self, event): if set(self.selected_nodes) != set([ node.id for tree_item, node in self.tree_items if tree_item.checkState(self.PROJECT_SYNC_COLUMN) == Qt.Checked ]): if self.selected_nodes and not self.tree_items: return else: reply = QMessageBox() reply.setText('Unsaved changes') reply.setIcon(QMessageBox.Warning) reply.setInformativeText(language.UNSAVED_CHANGES) default = reply.addButton('Exit without saving', QMessageBox.YesRole) reply.addButton('Review changes', QMessageBox.NoRole) reply.setDefaultButton(default) if reply.exec_() != 0: return event.ignore() self.reset_tree_widget() with Session() as session: user = session.query(User).one_or_none() if user: user.first_boot = False session.add(user) session.commit() if ON_WINDOWS: if self.startAtBoot.isChecked(): self.winRegistryRunKey.setValue('OSF Sync', sys.argv[0]) else: self.winRegistryRunKey.remove('OSF Sync') elif ON_MAC: if self.startAtBoot.isChecked(): with open(MAC_PLIST_FILE_PATH, 'w+') as file: file.write(MAC_PLIST_FILE_CONTENTS) elif os.path.exists(MAC_PLIST_FILE_PATH): os.remove(MAC_PLIST_FILE_PATH) event.accept()
def _run(self): with Session() as session: db_file = session.query( models.File).filter(models.File.id == self.remote.id).one() tmp_path = os.path.join(db_file.parent.path, '.~tmp.{}'.format(db_file.name)) resp = OSFClient().request('GET', self.remote.raw['links']['download'], stream=True) with open(tmp_path, 'wb') as fobj: for chunk in resp.iter_content(chunk_size=1024 * 64): if chunk: fobj.write(chunk) shutil.move(tmp_path, db_file.path) DatabaseUpdateFile( OperationContext(db=db_file, remote=self.remote, node=db_file.node)).run() Notification().info('Uploaded File {} to {}'.format( db_file.pretty_path, self.node.title))