def check_variables(self): if not all([ Manager.is_digit(i) for i in [self.ratio, self.capacity, self.indices] ]): return ManagerError( "split_ratio, capacity, indices must be all digits") if any([ point for point in self.points if not isinstance(point, np.ndarray) ]): return ManagerError( "every point in node must be in the form of Numpy array") else: if self.cell_count() > 0: vec_dim = [ vec.ndim for vec in self.points ] # Identifying whether or not passed data is a vector space if vec_dim.count(vec_dim[0]) != len(vec_dim): return ManagerError( "every basis of vector space must be equal") if not isinstance(self.parent, InternalNode) or isinstance( self.parent, RootNode): return TreeError( "parent node must be either InternalNode or RootNode instance")
def push(self): """ Execute update by running commands from the execution queue prepared by the `prepare_update` method. Commands will only be executed if their total number does not exceed the `threshold` attribute. :raises ManagerError: in case of exceeded threshold/API error """ self.load_ipa_entities() self._prepare_push() if not self.commands: self.lg.info('FreeIPA consistent with local config, nothing to do') return if not self.force: # dry run self.lg.info('Would execute commands:') for command in sorted(self.commands): self.lg.info('- %s', command) self._check_threshold() if self.force: # command sorting really important here for correct update! for command in sorted(self.commands): try: command.execute(api) except CommandError as e: err = 'Error executing %s: %s' % (command.description, e) self.lg.error(err) # only added here to count the number of errors self.errs.append(err) if self.errs: raise ManagerError( 'There were %d errors executing update' % len(self.errs))
def _register_alerting(self): """ For each alerting plugin listed in settings: 1. Instantiate the plugin based on config. 2. Add the plugin as a root logging handler. 3. Add the plugin to the `alerting_plugins` list attribute, so that it dispatches the results in the end of the run. """ self.alerting_plugins = [] plugins_config = self.settings.get('alerting') if not plugins_config: self.lg.info('No alerting plugins configured in settings') return self.lg.debug('Registering %d alerting plugins', len(plugins_config)) root_logger = logging.getLogger() for name, config in plugins_config.iteritems(): try: module_path = 'ipamanager.alerting.%s' % (config['module']) module = importlib.import_module(module_path) plugin_config = config.get('config', {}) plugin = getattr(module, config['class'])(plugin_config) root_logger.addHandler(plugin) self.alerting_plugins.append(plugin) self.lg.debug('Registered plugin %s', plugin) except (AttributeError, ImportError, ManagerError) as e: raise ManagerError( 'Could not register alerting plugin %s: %s' % (name, e)) self.lg.debug('Registered %d alerting plugins', len(self.alerting_plugins))
def save_forest( forest_array, *file_path ): # TODO: Implement more efficient method for saving large forest if all([isinstance(tree, PartitionTree) for tree in forest_array]): if not file_path: current_path = os.path.dirname(os.path.abspath(__file__)) directory_path = os.path.join(current_path, "index_data") os.mkdir(directory_path) # Directory must be created first file_path = os.path.join(directory_path, "{0}.p".format(time.time())) else: if not os.path.splitext(file_path)[1] == ".p": return ManagerError('file extension must be ".p"') with open(file_path, "w") as fl: fl.write(pickle.dumps(forest_array)) else: return ManagerError( "forest_array must be array full of PartitionTree objects")
def delete_file(self): if not self.path: raise ManagerError( '%s has no file path, cannot delete.' % repr(self)) try: os.unlink(self.path) self.lg.debug('%s config file deleted', repr(self)) except OSError as e: raise ConfigError( 'Cannot delete %s at %s: %s' % (repr(self), self.path, e))
def check_variables(self): if not all([ self.is_digit(i) for i in [self.tree_count, self.split_ratio, self.capacity, self.indices] ]): return ManagerError( "tree_count, split_ratio, capacity, indices must be all digits" ) if not isinstance(self.vector_space, np.ndarray): return ManagerError( "vector space must be in the form of Numpy array") else: if self.indices > self.vector_space.ndim: return ManagerError( "number of indices should not be more than dimension of vector space" ) if not 0 < self.split_ratio <= 1 / 2: raise ManagerError( "split_ratio should be greater than 0 and less than 1/2")
def _load_settings(self): """ Load the settings file. The file contains integrity check settings, ignored entities configuration and other useful settings. """ self.lg.debug('Loading settings file from %s', self.args.settings) try: self.settings = utils.load_settings(self.args.settings) except Exception as e: raise ManagerError('Error loading settings: %s' % e) self.lg.debug('Settings parsed: %s', self.settings)
def _check_threshold(self): try: abs_ratio = float(len(self.commands)) / self.ipa_entity_count except ZeroDivisionError: abs_ratio = 1 # cap change ratio to 100 % to avoid threshold issues ratio = min(abs_ratio * 100, 100) self.lg.debug('%d commands, %d remote entities (%.2f %%)', len(self.commands), self.ipa_entity_count, ratio) if ratio > self.threshold: raise ManagerError( 'Threshold exceeded (%.2f %% > %.f %%), aborting' % (ratio, self.threshold)) self.lg.debug('Threshold check passed')
def load_ipa_entities(self): """ Load entities defined on the FreeIPA via API. Entity data is saved in `self.ipa_entities` nested dictionary with top-level keys being entity types (e.g., 'hostgroup') and bottom-level keys being entity names (e.g., 'group-one'). :raises ManagerError: if there is an error communicating with the API :returns: None (entities saved in the `self.ipa_entities` dict) """ self.lg.info('Loading entities from FreeIPA API') for entity_class in ENTITY_CLASSES: entity_type = entity_class.entity_name self.ipa_entities[entity_type] = dict() command = '%s_find' % entity_type self.lg.debug('Running API command %s', command) try: parsed = api.Command[command](all=True, sizelimit=0) except KeyError: raise ManagerError('Undefined API command %s' % command) except Exception as e: raise ManagerError('Error loading %s entities from API: %s' % (entity_type, e)) for data in parsed['result']: name = data[entity_class.entity_id_type][0] if check_ignored(entity_class, name, self.ignored): self.lg.debug( 'Not parsing ignored %s %s', entity_type, name) continue self.ipa_entities[entity_type][name] = entity_class(name, data) self.lg.info('Parsed %d %ss', len(self.ipa_entities[entity_type]), entity_type) self.lg.debug('%ss parsed: %s', entity_type, sorted(self.ipa_entities[entity_type].keys())) self.ipa_entity_count = sum( len(i) for i in self.ipa_entities.itervalues()) self.lg.info( 'Parsed %d entities from FreeIPA API', self.ipa_entity_count)
def write_to_file(self): if not self.path: raise ManagerError( '%s has no file path, nowhere to write.' % repr(self)) if self.metaparams: self.data_repo.update({'metaparams': self.metaparams}) # don't write default attributes into file for key in self.default_attributes: self.data_repo.pop(key, None) try: with open(self.path, 'w') as target: data = {self.name: self.data_repo or None} yaml.dump(data, stream=target, Dumper=EntityDumper, default_flow_style=False, explicit_start=True) self.lg.debug('%s written to file', repr(self)) except (IOError, OSError, yaml.YAMLError) as e: raise ConfigError( 'Cannot write %s to %s: %s' % (repr(self), self.path, e))