def process_batches(self): ''' Pack results from Druid into our format. ''' LOG.info('Processing batches...') geo_results = self.results['byGeo'] batch_warnings = [] for batch in self.batches: for geo_key, geo_result in list(batch.result.items()): if geo_result.has_data(): if geo_key not in geo_results: geo_results[geo_key] = GeoResult(geo_result.metadata) geo_results[geo_key].data.update(geo_result.data) batch_warnings.extend(geo_results[geo_key].warnings) if len(batch_warnings): LOG.warn('!! You overwrote existing data. Run in dev for detailed debug.') if not IS_PRODUCTION: # Only show these errors in dev. In production, these outputs # can overwhelm slow disks. print('\n'.join(batch_warnings)) return True
def check_pod(self, pod, app_controller): if not pod['is_ready'] or not pod['is_running']: return if app_controller.memory_controller is None: return memory_percent = round( float(pod['mem_usage'] - pod['mem_cache']) / float(pod['max_mem_limit']) * 100, 1) if memory_percent >= app_controller.memory_controller.warn_percent: msg = '{}:{}:cache={},usage={},percent={}%'.format( app_controller.name, pod['name'], pod['mem_cache'], pod['mem_usage'], memory_percent) alert_record = AlertRecord(key=pod['name'], alert_type=AlertType.MEM, alert_level=AlertLevel.WARN, timestamp=int(time.time()), msg=msg, hostname=pod['host_IP'], mail_list=app_controller.mail_list, phone_list=app_controller.phone_list) if Alert.alert(alert_record): LOG.info('sent mail: {}'.format(msg)) LOG.warn(msg) else: Alert.remove_alert_history(pod['name'])
def delete_image(self, image_name): """"Invoke xCAT REST API to delete a image.""" url = self._xcat_url.rmimage('/' + image_name) try: zvmutils.xcat_request("DELETE", url) except zvmutils.ZVMException: LOG.warn(("Failed to delete image file %s from xCAT") % image_name) url = self._xcat_url.rmobject('/' + image_name) try: zvmutils.xcat_request("DELETE", url) except zvmutils.ZVMException: LOG.warn(("Failed to delete image definition %s from xCAT") % image_name) LOG.info('Image %s successfully deleted' % image_name)
def __prepare_config(self, generator_config): ifs = ",".join([repr(pci) for pci in generator_config.pcis]) result = """# Config generated by NFVbench - port_limit : 2 version : 2 zmq_pub_port : {zmq_pub_port} zmq_rpc_port : {zmq_rpc_port} prefix : {prefix} limit_memory : {limit_memory} interfaces : [{ifs}]""".format( zmq_pub_port=generator_config.zmq_pub_port, zmq_rpc_port=generator_config.zmq_rpc_port, prefix=generator_config.name, limit_memory=generator_config.limit_memory, ifs=ifs) if hasattr(generator_config, 'platform'): if generator_config.platform.master_thread_id \ and generator_config.platform.latency_thread_id: platform = """ platform : master_thread_id : {master_thread_id} latency_thread_id : {latency_thread_id} dual_if:""".format(master_thread_id=generator_config.platform. master_thread_id, latency_thread_id=generator_config.platform. latency_thread_id) result += platform for core in generator_config.platform.dual_if: threads = "" try: threads = ",".join( [repr(thread) for thread in core.threads]) except TypeError: LOG.warn("No threads defined for socket %s", core.socket) core_result = """ - socket : {socket} threads : [{threads}]""".format(socket=core.socket, threads=threads) result += core_result else: LOG.info( "Generator profile 'platform' sub-properties are set but not filled in \ config file. TRex will use default values.") return result
def check(self, directory, files): # {{{ super(CheckerPath, self).check(directory, files) # Get the correct path from the configuration file pattern_str = get_param('patterns', 'path') pat = re.compile(pattern_str) LOG.debug("matching %s with %s",directory,pattern_str) mat = pat.match(directory) if mat: result = True else: LOG.warn("Path '%s' does not match pattern '%s'") result = False return result
def get_datasource_version(self, datasource_name): # TODO(stephen): Druid has a bug where the "full" version of the # datasource endpoint doesn't work. When it gets fixed, see if it # includes version. If so, include it above as part of the core # DruidDatasource class. url = '%s/%s/%s/segments?full' % ( self.druid_configuration.segment_metadata_endpoint(), self.datasource_list_path, datasource_name, ) r = requests.get(url) if not r.ok: raise MissingDatasourceException( 'Cannot retrieve version for datasource. Datasource does not ' 'exist: %s' % datasource_name) versions = set(segment['version'] for segment in r.json()) if len(versions) == 1: LOG.warn( 'Somehow datasource has multiple live versions! Taking the most recent. Datasource: %s\n' 'Versions: %s' % (datasource_name, versions)) return sorted(versions, reverse=True)[0]
def __prepare_config(self, generator_config): ifs = ",".join([repr(pci) for pci in generator_config.pcis]) result = """# Config generated by NFVbench - port_limit : 2 version : 2 zmq_pub_port : {zmq_pub_port} zmq_rpc_port : {zmq_rpc_port} prefix : {prefix} limit_memory : {limit_memory} interfaces : [{ifs}]""".format(zmq_pub_port=generator_config.zmq_pub_port, zmq_rpc_port=generator_config.zmq_rpc_port, prefix=generator_config.name, limit_memory=generator_config.limit_memory, ifs=ifs) if generator_config.platform.master_thread_id and \ generator_config.platform.latency_thread_id: try: platform = """ platform : master_thread_id : {master_thread_id} latency_thread_id : {latency_thread_id} dual_if:""".format(master_thread_id=generator_config.platform.master_thread_id, latency_thread_id=generator_config.platform.latency_thread_id) result += platform for core in generator_config.platform.dual_if: threads = "" try: threads = ",".join([repr(thread) for thread in core.threads]) except TypeError: LOG.warn("No threads defined for socket %s", core.socket) core_result = """ - socket : {socket} threads : [{threads}]""".format(socket=core.socket, threads=threads) result += core_result except (KeyError, AttributeError): pass return result
def login_from_request(request_object=None): request_object = request_object or request access_key = request.cookies.get('accessKey') if access_key: LOG.info('Received access key in cookie: %s' % access_key) if KeyManager.is_valid_key(access_key): LOG.info('Accessing %s with renderbot access key', request.path) bot = User.query.filter_by( username=global_config.RENDERBOT_EMAIL).first() return bot else: LOG.warn('Received invalid access key') try: username = request.headers.get('X-Username') password = request.headers.get('X-Password') if not (username and password): LOG.debug( 'Username or Password not provided via Login Headers. ') return None LOG.debug('Attempting to authenticate user: \'%s\'.', username) user = try_authenticate_user(username, password) if user: LOG.debug('User: \'%s\' authenticated successfully.', get_user_string(user)) return user else: LOG.warn('User: \'%s\' failed to authenticate.', username) return None except BadRequest: return None return None
stdout("should see debug message test 1\n") log.debug("log test 1") if test_fileobj('{0:s}'.format(pgm)) != 1: stdout('log file \"{0:s}\" should not have been created\n'.format(pgm), pgm) errors += 1 else: stdout('Should see "Parameter not a file or file object."\n') tests += 1 print stdout("testing setLogLevel('warn'), should see only WARN: log test 2\n") setLogLevel("warn") log.debug("log test 2") log.warn("log test 2") print stdout("shutting down logger, reinitializing with log level 0\n") stdout("NullHander should prevent logger calls from generating warnings/errors.\n") logging.shutdown() initLogger(logLevel=0, logConsole=True, logDisk=False) log.debug("this is an error.") stdout('you should not see "this is an error." above\n') stdout('after installing null handler, setting log level to debug\n') setLogLevel('debug') log.debug("this is not an error.") stdout('testing logging AppException instance with appended strings\n')
def parse_arguments(self): self.request_is_demo = self.request_data.get('demo') # Parse overall modifiers. self.use_randomized_data = USE_RANDOMIZED_DATA or self.request_is_demo # Location filters are default OR. # TODO(stephen, ian): When needed, allow more complex filtering filters = self.request_data.get('filters', []) for f in filters: if not len(list(f.keys())): # Skip empty filters. continue # HACK(stephen): Handle both hierarchical dimension filters (which # should be OR'd together) and non-hierarchical dimensions (which # should all be AND'd together) with the location filters first_key = list(f.keys())[0] if len(f) == 1 and first_key not in self.all_geo_dimensions: self.non_hierarchical_filter &= ( Dimension(first_key) == list(f.values())[0] ) continue location_filter = {} # Validate that the dimensions being filtered on actually exist for key, value in list(f.items()): # NOTE(stephen): This should never happen if key not in self.all_geo_dimensions: LOG.warn( 'A location filter contains non-location columns to ' 'filter by. Filter: %s', f, ) location_filter[key] = value if location_filter: self.location_filters.append(location_filter) geo_granularity = self.request_data.get('granularity') if geo_granularity != NATION_GEO_FIELD: latlng_fields = current_app.zen_config.aggregation.GEO_TO_LATLNG_FIELD.get( geo_granularity ) if latlng_fields: self.latitude_field = latlng_fields[0] self.longitude_field = latlng_fields[1] self.geo_field = geo_granularity # Capture requested fields request_fields = self.request_data.get('fields', []) # Parse denominator denom = self.request_data.get('denominator') if denom: if denom in current_app.zen_config.indicators.VALID_FIELDS: self.denom = denom request_fields.append(denom) else: error_msg = 'Invalid denominator specified: %s' % denom self.response = Error(error_msg) return False # Deduplicate field list while maintaining the user's selected order # since the frontend has implicit requirements around field ordering for field in request_fields: self.data_fields.add(field) # TODO(stephen): Is this even necessary? Can the frontend send # duplicate fields? Also, would love an ordered set here instead # of searching the list. if field not in self.ordered_fields: self.ordered_fields.append(field) bad_fields = self.data_fields - current_app.zen_config.indicators.VALID_FIELDS if bad_fields: error_msg = 'Invalid fields specified: %s' % ', '.join(bad_fields) self.response = Error(error_msg) return False self.selected_granularities = self.request_data.get( 'granularities', DEFAULT_GRANULARITIES ) self.calculation = current_app.zen_config.aggregation_rules.get_calculation_for_fields( self.data_fields ) self.calculation.set_strict_null_fields(self.data_fields) # Get dates # TODO(stephen, ian): Validate these self.start_date = datetime.strptime( self.request_data.get('start_date'), STANDARD_DATA_DATE_FORMAT ).date() self.end_date = datetime.strptime( self.request_data.get('end_date'), STANDARD_DATA_DATE_FORMAT ).date() self.time_bucket = self.request_data.get('time_bucket', DEFAULT_TIME_BUCKET) return True