def processor_callback(change): try: if self.interrupt or not self.job_status_running: raise InterruptException() self.update_current_tasks() self.update_global_progress() Processor = StorageChangeProcessor if self.storage_watcher else ChangeProcessor proc = Processor(change, self.current_store, self.job_config, self.system, self.sdk, self.db_handler, self.event_logger) proc.process_change() self.update_min_seqs_from_store(success=True) self.global_progress['queue_done'] = float(counter[0]) counter[0] += 1 self.update_current_tasks() self.update_global_progress() time.sleep(0.1) if self.interrupt or not self.job_status_running: raise InterruptException() except ProcessException as pe: logging.error(pe.message) return False except InterruptException as i: raise i except PydioSdkDefaultException as p: raise p except Exception as ex: logging.exception(ex.message) return False return True
def run(self): """ Start the thread """ logger = EventLogger(self.configs_path) very_first = False if self.watcher: if self.watcher_first_run: def status_callback(status): logger.log_state(status, 'sync') self.init_global_progress() try: self.global_progress['status_indexing'] = 1 logger.log_state( _('Checking changes since last launch...'), "sync") very_first = True self.watcher.check_from_snapshot( state_callback=status_callback) except DBCorruptedException as e: self.stop() JobsLoader.Instance().clear_job_data(self.job_config.id) logging.error(e) return except Exception as e: self.interrupt = True logger.log_state( _('Oops, error while indexing the local folder. Pausing the task.' ), 'error') logging.error(e) self.watcher_first_run = False self.watcher.start() while not self.interrupt: try: # logging.info('Starting cycle with cycles local %i and remote %is' % (self.local_seq, self.remote_seq)) self.processing_signals = {} self.init_global_progress() if very_first: self.global_progress['status_indexing'] = 1 interval = int(time.time() - self.last_run) if (self.online_status and interval < self.online_timer) or ( not self.online_status and interval < self.offline_timer): time.sleep(self.event_timer) continue if not self.job_status_running: logging.debug("self.online_timer: %s" % self.online_timer) logger.log_state(_('Status: Paused'), "sync") self.sleep_offline() continue if self.job_config.frequency == 'time': start_time = datetime.time( int(self.job_config.start_time['h']), int(self.job_config.start_time['m'])) end_time = datetime.time( int(self.job_config.start_time['h']), int(self.job_config.start_time['m']), 59) now = datetime.datetime.now().time() if not start_time < now < end_time: logger.log_state( _('Status: scheduled for %s') % str(start_time), "sync") self.sleep_offline() continue else: logging.info( "Now triggering synchro as expected at time " + str(start_time)) if not self.system.check_basepath(): log = _( 'Cannot find local folder! Did you disconnect a volume? Waiting %s seconds before retry' ) % self.offline_timer logging.error(log) logger.log_state( _('Cannot find local folder, did you disconnect a volume?' ), "error") self.sleep_offline() continue if self.watcher: for snap_path in self.marked_for_snapshot_pathes: logging.info( 'LOCAL SNAPSHOT : loading snapshot for directory %s' % snap_path) if self.interrupt or not self.job_status_running: raise InterruptException() self.watcher.check_from_snapshot(snap_path) # Load local and/or remote changes, depending on the direction from pydio.job.change_stores import SqliteChangeStore self.current_store = SqliteChangeStore( self.configs_path + '/changes.sqlite', self.job_config.filters['includes'], self.job_config.filters['excludes']) self.current_store.open() try: if self.job_config.direction != 'up': logging.info( 'Loading remote changes with sequence %s' % str(self.remote_seq)) if self.remote_seq == 0: logger.log_state( _('Gathering data from remote workspace, this can take a while...' ), 'sync') very_first = True self.remote_target_seq = self.load_remote_changes_in_store( self.remote_seq, self.current_store) self.current_store.sync() else: self.remote_target_seq = 1 self.ping_remote() except RequestException as ce: if not connection_helper.is_connected_to_internet(): error = _( 'No Internet connection detected! Waiting for %s seconds to retry' ) % self.offline_timer else: error = _( 'Connection to server failed, server is probably down. Waiting %s seconds to retry' ) % self.offline_timer self.marked_for_snapshot_pathes = [] logging.error(error) logger.log_state(error, "wait") self.sleep_offline() continue except Exception as e: error = 'Error while connecting to remote server (%s), waiting for %i seconds before retempting ' % ( e.message, self.offline_timer) logging.error(error) logger.log_state( _('Error while connecting to remote server (%s)') % e.message, "error") self.marked_for_snapshot_pathes = [] self.sleep_offline() continue self.online_status = True if not self.job_config.server_configs: self.job_config.server_configs = self.sdk.load_server_configs( ) self.sdk.set_server_configs(self.job_config.server_configs) if self.job_config.direction != 'down': logging.info('Loading local changes with sequence ' + str(self.local_seq)) self.local_target_seq = self.db_handler.get_local_changes_as_stream( self.local_seq, self.current_store.flatten_and_store) self.current_store.sync() else: self.local_target_seq = 1 if not connection_helper.internet_ok: connection_helper.is_connected_to_internet() changes_length = len(self.current_store) if not changes_length: logging.info('No changes detected') self.update_min_seqs_from_store() self.exit_loop_clean(logger) very_first = False continue self.global_progress['status_indexing'] = 1 logging.info('Reducing changes') logger.log_state( _('Merging changes between remote and local, please wait...' ), 'sync') logging.debug('Delete Copies') self.current_store.delete_copies() self.update_min_seqs_from_store() logging.debug('Dedup changes') self.current_store.dedup_changes() self.update_min_seqs_from_store() if not self.storage_watcher or very_first: logging.debug('Detect unnecessary changes') self.current_store.detect_unnecessary_changes( local_sdk=self.system, remote_sdk=self.sdk) self.update_min_seqs_from_store() logging.debug('Clearing op and pruning folders moves') self.current_store.clear_operations_buffer() self.current_store.prune_folders_moves() self.update_min_seqs_from_store() logging.debug('Store conflicts') store_conflicts = self.current_store.clean_and_detect_conflicts( self.db_handler) if store_conflicts: logging.info('Conflicts detected, cannot continue!') logger.log_state(_('Conflicts detected, cannot continue!'), 'error') self.current_store.close() self.sleep_offline() continue changes_length = len(self.current_store) if not changes_length: logging.info('No changes detected') self.exit_loop_clean(logger) very_first = False continue self.global_progress['status_indexing'] = 0 import change_processor self.global_progress['queue_length'] = changes_length logging.info('Processing %i changes' % changes_length) logger.log_state( _('Processing %i changes') % changes_length, "start") counter = [1] def processor_callback(change): try: if self.interrupt or not self.job_status_running: raise InterruptException() self.update_current_tasks() self.update_global_progress() Processor = StorageChangeProcessor if self.storage_watcher else ChangeProcessor proc = Processor(change, self.current_store, self.job_config, self.system, self.sdk, self.db_handler, self.event_logger) proc.process_change() self.update_min_seqs_from_store(success=True) self.global_progress['queue_done'] = float(counter[0]) counter[0] += 1 self.update_current_tasks() self.update_global_progress() time.sleep(0.1) if self.interrupt or not self.job_status_running: raise InterruptException() except ProcessException as pe: logging.error(pe.message) return False except InterruptException as i: raise i except PydioSdkDefaultException as p: raise p except Exception as ex: logging.exception(ex.message) return False return True try: if sys.platform.startswith('win'): self.marked_for_snapshot_pathes = list( set(self.current_store.find_modified_parents()) - set(self.marked_for_snapshot_pathes)) self.current_store.process_changes_with_callback( processor_callback) except InterruptException as iexc: pass logger.log_state( _('%i files modified') % self.global_progress['queue_done'], 'success') if self.global_progress['queue_done']: logger.log_notif( _('%i files modified') % self.global_progress['queue_done'], 'success') except PydioSdkDefaultException as re: logging.error(re.message) logger.log_state(re.message, 'error') except SSLError as rt: logging.error(rt.message) logger.log_state( _('An SSL error happened, please check the logs'), 'error') except ProxyError as rt: logging.error(rt.message) logger.log_state( _('A proxy error happened, please check the logs'), 'error') except TooManyRedirects as rt: logging.error(rt.message) logger.log_state(_('Connection error: too many redirects'), 'error') except ChunkedEncodingError as rt: logging.error(rt.message) logger.log_state( _('Chunked encoding error, please check the logs'), 'error') except ContentDecodingError as rt: logging.error(rt.message) logger.log_state( _('Content Decoding error, please check the logs'), 'error') except InvalidSchema as rt: logging.error(rt.message) logger.log_state(_('Http connection error: invalid schema.'), 'error') except InvalidURL as rt: logging.error(rt.message) logger.log_state(_('Http connection error: invalid URL.'), 'error') except Timeout as to: logging.error(to) logger.log_state(_('Connection timeout, will retry later.'), 'error') except RequestException as ree: logging.error(ree.message) logger.log_state(_('Cannot resolve domain!'), 'error') except Exception as e: if not (e.message.lower().count('[quota limit reached]') or e.message.lower().count('[file permissions]')): logging.exception('Unexpected Error: %s' % e.message) logger.log_state( _('Unexpected Error: %s') % e.message, 'error') logging.debug('Finished this cycle, waiting for %i seconds' % self.online_timer) self.exit_loop_clean(logger) very_first = False
def run(self): """ Start the thread """ logger = EventLogger(self.data_base) very_first = False if self.watcher: if self.watcher_first_run: logger.log_state(_('Checking changes since last launch...'), "sync") very_first = True self.watcher.check_from_snapshot() self.watcher_first_run = False self.watcher.start() while not self.interrupt: try: # logging.info('Starting cycle with cycles local %i and remote %is' % (self.local_seq, self.remote_seq)) self.processing_signals = {} self.init_global_progress() interval = int(time.time() - self.last_run) if (self.online_status and interval < self.online_timer) or (not self.online_status and interval < self.offline_timer): time.sleep(self.event_timer) continue if not self.job_status_running: logging.debug("self.online_timer: %s" % self.online_timer) logger.log_state(_('Status: Paused'), "sync") self.sleep_offline() continue if self.job_config.frequency == 'time': start_time = datetime.time(int(self.job_config.start_time['h']), int(self.job_config.start_time['m'])) end_time = datetime.time(int(self.job_config.start_time['h']), int(self.job_config.start_time['m']), 59) now = datetime.datetime.now().time() if not start_time < now < end_time: logger.log_state(_('Status: scheduled for %s') % str(start_time), "sync") self.sleep_offline() continue else: logging.info("Now triggering synchro as expected at time " + str(start_time)) if not self.system.check_basepath(): log = _('Cannot find local folder! Did you disconnect a volume? Waiting %s seconds before retry') % self.offline_timer logging.error(log) logger.log_state(_('Cannot find local folder, did you disconnect a volume?'), "error") self.sleep_offline() continue for snap_path in self.marked_for_snapshot_pathes: logging.info('LOCAL SNAPSHOT : loading snapshot for directory %s' % snap_path) if self.interrupt or not self.job_status_running: raise InterruptException() self.watcher.check_from_snapshot(snap_path) # Load local and/or remote changes, depending on the direction from pydio.job.change_stores import SqliteChangeStore self.current_store = SqliteChangeStore(self.data_base + '/changes.sqlite', self.job_config.filters['includes'], self.job_config.filters['excludes']) self.current_store.open() try: if self.job_config.direction != 'up': logging.info('Loading remote changes with sequence %s' % str(self.remote_seq)) if self.remote_seq == 0: logger.log_state(_('Gathering data from remote workspace, this can take a while...'), 'sync') very_first = True self.remote_target_seq = self.load_remote_changes_in_store(self.remote_seq, self.current_store) self.current_store.sync() else: self.remote_target_seq = 1 self.ping_remote() except ConnectionError as ce: error = _('No connection detected, waiting %s seconds to retry') % self.offline_timer self.marked_for_snapshot_pathes = [] logging.error(error) logger.log_state(error, "wait") self.sleep_offline() continue except Exception as e: error = 'Error while connecting to remote server (%s), waiting for %i seconds before retempting ' % (e.message, self.offline_timer) logging.error(error) logger.log_state(_('Error while connecting to remote server (%s)') % e.message, "error") self.marked_for_snapshot_pathes = [] self.sleep_offline() continue self.online_status = True if not self.job_config.server_configs: self.job_config.server_configs = self.sdk.load_server_configs() self.sdk.set_server_configs(self.job_config.server_configs) if self.job_config.direction != 'down': logging.info('Loading local changes with sequence ' + str(self.local_seq)) self.local_target_seq = self.db_handler.get_local_changes_as_stream(self.local_seq, self.current_store.flatten_and_store) self.current_store.sync() else: self.local_target_seq = 1 logging.info('Reducing changes') self.current_store.delete_copies() self.update_min_seqs_from_store() self.current_store.dedup_changes() self.update_min_seqs_from_store() self.current_store.detect_unnecessary_changes(local_sdk=self.system, remote_sdk=self.sdk) self.update_min_seqs_from_store() #self.current_store.filter_out_echoes_events() #self.update_min_seqs_from_store() self.current_store.clear_operations_buffer() self.current_store.prune_folders_moves() self.update_min_seqs_from_store() store_conflicts = self.current_store.clean_and_detect_conflicts(self.db_handler) if store_conflicts: logging.info('Conflicts detected, cannot continue!') logger.log_state(_('Conflicts detected, cannot continue!'), 'error') self.current_store.close() self.sleep_offline() continue changes_length = len(self.current_store) if changes_length: import change_processor self.global_progress['queue_length'] = changes_length logging.info('Processing %i changes' % changes_length) logger.log_state(_('Processing %i changes') % changes_length, "start") counter = [1] def processor_callback(change): try: if self.interrupt or not self.job_status_running: raise InterruptException() self.update_current_tasks() self.update_global_progress() proc = ChangeProcessor(change, self.current_store, self.job_config, self.system, self.sdk, self.db_handler, self.event_logger) proc.process_change() self.update_min_seqs_from_store(success=True) self.global_progress['queue_done'] = float(counter[0]) counter[0] += 1 self.update_current_tasks() self.update_global_progress() time.sleep(0.1) if self.interrupt or not self.job_status_running: raise InterruptException() except ProcessException as pe: logging.error(pe.message) return False except InterruptException as i: raise i except PydioSdkDefaultException as p: raise p except Exception as ex: logging.exception(ex.message) return False return True try: if sys.platform.startswith('win'): self.marked_for_snapshot_pathes = list(set(self.current_store.find_modified_parents()) - set(self.marked_for_snapshot_pathes)) self.current_store.process_changes_with_callback(processor_callback) except InterruptException as iexc: pass logger.log_state(_('%i files modified') % self.global_progress['queue_done'], "success") else: logging.info('No changes detected') self.marked_for_snapshot_pathes = [] if very_first: logger.log_state(_('Synchronized'), 'success') except PydioSdkDefaultException as re: logging.warning(re.message) logger.log_state(re.message, 'error') except RequestException as ree: logging.warning(ree.message) logger.log_state(ree.message, 'request error') except Exception as e: if not (e.message.lower().count('[quota limit reached]') or e.message.lower().count('[file permissions]')): logging.exception('Unexpected Error: %s' % e.message) logger.log_state(_('Unexpected Error: %s') % e.message, 'error') logging.debug('Finished this cycle, waiting for %i seconds' % self.online_timer) self.current_store.close() self.init_global_progress() if self.job_config.frequency == 'manual': self.job_status_running = False self.sleep_offline() else: self.sleep_online()