def version(request): context = getattr(request, 'parser_context', {}) return { 'version': get_awx_version(), 'tower_version': get_awx_version(), 'short_tower_version': get_awx_version().split('-')[0], 'deprecated': getattr(context.get('view'), 'deprecated', False) }
def get(self, request, format=None): """Return some basic information about this instance Everything returned here should be considered public / insecure, as this requires no auth and is intended for use by the installer process. """ response = { 'ha': is_ha_environment(), 'version': get_awx_version(), 'active_node': settings.CLUSTER_HOST_ID, 'install_uuid': settings.INSTALL_UUID, } response['instances'] = [] for instance in Instance.objects.all(): response['instances'].append( dict(node=instance.hostname, uuid=instance.uuid, heartbeat=instance.modified, capacity=instance.capacity, version=instance.version)) sorted(response['instances'], key=operator.itemgetter('node')) response['instance_groups'] = [] for instance_group in InstanceGroup.objects.prefetch_related( 'instances'): response['instance_groups'].append( dict(name=instance_group.name, capacity=instance_group.capacity, instances=[ x.hostname for x in instance_group.instances.all() ])) return Response(response)
def config(since, **kwargs): license_info = get_license() install_type = 'traditional' if os.environ.get('container') == 'oci': install_type = 'openshift' elif 'KUBERNETES_SERVICE_PORT' in os.environ: install_type = 'k8s' return { 'platform': { 'system': platform.system(), 'dist': platform.dist(), 'release': platform.release(), 'type': install_type, }, 'install_uuid': settings.INSTALL_UUID, 'instance_uuid': settings.SYSTEM_UUID, 'tower_url_base': settings.TOWER_URL_BASE, 'tower_version': get_awx_version(), 'ansible_version': get_ansible_version(), 'license_type': license_info.get('license_type', 'UNLICENSED'), 'free_instances': license_info.get('free_instances', 0), 'total_licensed_instances': license_info.get('instance_count', 0), 'license_expiry': license_info.get('time_remaining', 0), 'pendo_tracking': settings.PENDO_TRACKING_STATE, 'authentication_backends': settings.AUTHENTICATION_BACKENDS, 'logging_aggregators': settings.LOG_AGGREGATOR_LOGGERS, 'external_logger_enabled': settings.LOG_AGGREGATOR_ENABLED, 'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', None), }
def send_messages(self, messages): sent_messages = 0 if 'User-Agent' not in self.headers: self.headers['User-Agent'] = "Tower {}".format(get_awx_version()) if self.http_method.lower() not in ['put', 'post']: raise ValueError("HTTP method must be either 'POST' or 'PUT'.") chosen_method = getattr(requests, self.http_method.lower(), None) for m in messages: auth = None if self.username or self.password: auth = (self.username, self.password) r = chosen_method("{}".format(m.recipients()[0]), auth=auth, json=m.body, headers=self.headers, verify=(not self.disable_ssl_verification)) if r.status_code >= 400: logger.error( smart_text( _("Error sending notification webhook: {}").format( r.status_code))) if not self.fail_silently: raise Exception( smart_text( _("Error sending notification webhook: {}").format( r.status_code))) sent_messages += 1 return sent_messages
def finalize_response(self, request, response, *args, **kwargs): ''' Log warning for 400 requests. Add header with elapsed time. ''' if response.status_code >= 400: status_msg = "status %s received by user %s attempting to access %s from %s" % \ (response.status_code, request.user, request.path, request.META.get('REMOTE_ADDR', None)) if hasattr(self, '__init_request_error__'): response = self.handle_exception(self.__init_request_error__) if response.status_code == 401: response.data['detail'] += ' To establish a login session, visit /api/login/.' logger.info(status_msg) else: logger.warning(status_msg) response = super(APIView, self).finalize_response(request, response, *args, **kwargs) time_started = getattr(self, 'time_started', None) response['X-API-Product-Version'] = get_awx_version() response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), StubLicense) else 'Red Hat Ansible Tower' response['X-API-Node'] = settings.CLUSTER_HOST_ID if time_started: time_elapsed = time.time() - self.time_started response['X-API-Time'] = '%0.3fs' % time_elapsed if getattr(settings, 'SQL_DEBUG', False): queries_before = getattr(self, 'queries_before', 0) q_times = [float(q['time']) for q in connection.queries[queries_before:]] response['X-API-Query-Count'] = len(q_times) response['X-API-Query-Time'] = '%0.3fs' % sum(q_times) if getattr(self, 'deprecated', False): response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa return response
def finalize_response(self, request, response, *args, **kwargs): """ Log warning for 400 requests. Add header with elapsed time. """ # # If the URL was rewritten, and we get a 404, we should entirely # replace the view in the request context with an ApiErrorView() # Without this change, there will be subtle differences in the BrowseableAPIRenderer # # These differences could provide contextual clues which would allow # anonymous users to determine if usernames were valid or not # (e.g., if an anonymous user visited `/api/v2/users/valid/`, and got a 404, # but also saw that the page heading said "User Detail", they might notice # that's a difference in behavior from a request to `/api/v2/users/not-valid/`, which # would show a page header of "Not Found"). Changing the view here # guarantees that the rendered response will look exactly like the response # when you visit a URL that has no matching URL paths in `awx.api.urls`. # if response.status_code == 404 and 'awx.named_url_rewritten' in request.environ: self.headers.pop('Allow', None) response = super(APIView, self).finalize_response(request, response, *args, **kwargs) view = ApiErrorView() setattr(view, 'request', request) response.renderer_context['view'] = view return response if response.status_code >= 400: status_msg = "status %s received by user %s attempting to access %s from %s" % ( response.status_code, request.user, request.path, request.META.get('REMOTE_ADDR', None), ) if hasattr(self, '__init_request_error__'): response = self.handle_exception(self.__init_request_error__) if response.status_code == 401: response.data['detail'] += _(' To establish a login session, visit') + ' /api/login/.' logger.info(status_msg) else: logger.warning(status_msg) response = super(APIView, self).finalize_response(request, response, *args, **kwargs) time_started = getattr(self, 'time_started', None) response['X-API-Product-Version'] = get_awx_version() response['X-API-Product-Name'] = server_product_name() response['X-API-Node'] = settings.CLUSTER_HOST_ID if time_started: time_elapsed = time.time() - self.time_started response['X-API-Time'] = '%0.3fs' % time_elapsed if getattr(settings, 'SQL_DEBUG', False): queries_before = getattr(self, 'queries_before', 0) q_times = [float(q['time']) for q in connection.queries[queries_before:]] response['X-API-Query-Count'] = len(q_times) response['X-API-Query-Time'] = '%0.3fs' % sum(q_times) if getattr(self, 'deprecated', False): response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa return response
def get(self, request, format=None): '''Return various sitewide configuration settings''' if request.user.is_superuser or request.user.is_system_auditor: license_data = get_license(show_key=True) else: license_data = get_license(show_key=False) if not license_data.get('valid_key', False): license_data = {} if license_data and 'features' in license_data and 'activity_streams' in license_data[ 'features']: # FIXME: Make the final setting value dependent on the feature? license_data['features'][ 'activity_streams'] &= settings.ACTIVITY_STREAM_ENABLED pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ( 'off', 'anonymous', 'detailed') else 'off' data = dict( time_zone=settings.TIME_ZONE, license_info=license_data, version=get_awx_version(), ansible_version=get_ansible_version(), eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '', analytics_status=pendo_state, analytics_collectors=all_collectors(), become_methods=PRIVILEGE_ESCALATION_METHODS, ) # If LDAP is enabled, user_ldap_fields will return a list of field # names that are managed by LDAP and should be read-only for users with # a non-empty ldap_dn attribute. if getattr(settings, 'AUTH_LDAP_SERVER_URI', None): user_ldap_fields = ['username', 'password'] user_ldap_fields.extend( getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys()) user_ldap_fields.extend( getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys()) data['user_ldap_fields'] = user_ldap_fields if request.user.is_superuser \ or request.user.is_system_auditor \ or Organization.accessible_objects(request.user, 'admin_role').exists() \ or Organization.accessible_objects(request.user, 'auditor_role').exists() \ or Organization.accessible_objects(request.user, 'project_admin_role').exists(): data.update( dict(project_base_dir=settings.PROJECTS_ROOT, project_local_paths=Project.get_local_path_choices(), custom_virtualenvs=get_custom_venv_choices())) elif JobTemplate.accessible_objects(request.user, 'admin_role').exists(): data['custom_virtualenvs'] = get_custom_venv_choices() return Response(data)
def config(since): license_info = get_license(show_key=False) return { 'system_uuid': settings.SYSTEM_UUID, 'tower_url_base': settings.TOWER_URL_BASE, 'tower_version': get_awx_version(), 'ansible_version': get_ansible_version(), 'license_type': license_info.get('license_type', 'UNLICENSED'), 'free_instances': license_info.get('free instances', 0), 'license_expiry': license_info.get('time_remaining', 0), 'pendo_tracking': settings.PENDO_TRACKING_STATE, 'authentication_backends': settings.AUTHENTICATION_BACKENDS, 'logging_aggregators': settings.LOG_AGGREGATOR_LOGGERS, 'external_logger_enabled': settings.LOG_AGGREGATOR_ENABLED, 'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', None), }
def config(since, **kwargs): license_info = get_license() install_type = 'traditional' if os.environ.get('container') == 'oci': install_type = 'openshift' elif 'KUBERNETES_SERVICE_PORT' in os.environ: install_type = 'k8s' return { 'platform': { 'system': platform.system(), 'dist': distro.linux_distribution(), 'release': platform.release(), 'type': install_type, }, 'install_uuid': settings.INSTALL_UUID, 'instance_uuid': settings.SYSTEM_UUID, 'tower_url_base': settings.TOWER_URL_BASE, 'tower_version': get_awx_version(), 'license_type': license_info.get('license_type', 'UNLICENSED'), 'license_date': license_info.get('license_date'), 'subscription_name': license_info.get('subscription_name'), 'sku': license_info.get('sku'), 'support_level': license_info.get('support_level'), 'product_name': license_info.get('product_name'), 'valid_key': license_info.get('valid_key'), 'satellite': license_info.get('satellite'), 'pool_id': license_info.get('pool_id'), 'current_instances': license_info.get('current_instances'), 'automated_instances': license_info.get('automated_instances'), 'automated_since': license_info.get('automated_since'), 'trial': license_info.get('trial'), 'grace_period_remaining': license_info.get('grace_period_remaining'), 'compliant': license_info.get('compliant'), 'date_warning': license_info.get('date_warning'), 'date_expired': license_info.get('date_expired'), 'free_instances': license_info.get('free_instances', 0), 'total_licensed_instances': license_info.get('instance_count', 0), 'license_expiry': license_info.get('time_remaining', 0), 'pendo_tracking': settings.PENDO_TRACKING_STATE, 'authentication_backends': settings.AUTHENTICATION_BACKENDS, 'logging_aggregators': settings.LOG_AGGREGATOR_LOGGERS, 'external_logger_enabled': settings.LOG_AGGREGATOR_ENABLED, 'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', None), }
def send_messages(self, messages): sent_messages = 0 if 'User-Agent' not in self.headers: self.headers['User-Agent'] = "Tower {}".format(get_awx_version()) for m in messages: r = requests.post("{}".format(m.recipients()[0]), json=m.body, headers=self.headers, verify=(not self.disable_ssl_verification)) if r.status_code >= 400: logger.error( smart_text( _("Error sending notification webhook: {}").format( r.text))) if not self.fail_silently: raise Exception( smart_text( _("Error sending notification webhook: {}").format( r.text))) sent_messages += 1 return sent_messages
def metrics(): license_info = get_license(show_key=False) SYSTEM_INFO.info({ 'system_uuid': settings.SYSTEM_UUID, 'insights_analytics': str(settings.INSIGHTS_DATA_ENABLED), 'tower_url_base': settings.TOWER_URL_BASE, 'tower_version': get_awx_version(), 'ansible_version': get_ansible_version(), 'license_type': license_info.get('license_type', 'UNLICENSED'), 'free_instances': str(license_info.get('free instances', 0)), 'license_expiry': str(license_info.get('time_remaining', 0)), 'pendo_tracking': settings.PENDO_TRACKING_STATE, 'external_logger_enabled': str(settings.LOG_AGGREGATOR_ENABLED), 'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', 'None') }) current_counts = counts(None) ORG_COUNT.set(current_counts['organization']) USER_COUNT.set(current_counts['user']) TEAM_COUNT.set(current_counts['team']) INV_COUNT.set(current_counts['inventory']) PROJ_COUNT.set(current_counts['project']) JT_COUNT.set(current_counts['job_template']) WFJT_COUNT.set(current_counts['workflow_job_template']) HOST_COUNT.labels(type='all').set(current_counts['host']) HOST_COUNT.labels(type='active').set(current_counts['active_host_count']) SCHEDULE_COUNT.set(current_counts['schedule']) INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script']) CUSTOM_VENVS.set(current_counts['custom_virtualenvs']) USER_SESSIONS.labels(type='all').set(current_counts['active_sessions']) USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions']) USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions']) RUNNING_JOBS.set(current_counts['running_jobs']) instance_data = instance_info(None) for uuid in instance_data: INSTANCE_CAPACITY.labels(type=uuid).set(instance_data[uuid]['capacity']) INSTANCE_CPU.labels(type=uuid).set(instance_data[uuid]['cpu']) INSTANCE_MEMORY.labels(type=uuid).set(instance_data[uuid]['memory']) INSTANCE_INFO.labels(type=uuid).info({ 'enabled': str(instance_data[uuid]['enabled']), 'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'), 'managed_by_policy': str(instance_data[uuid]['managed_by_policy']), 'version': instance_data[uuid]['version'] }) instance_data = job_instance_counts(None) for node in instance_data: # skipping internal execution node (for system jobs) # TODO: determine if we should exclude execution_node from instance count if node == '': continue types = instance_data[node].get('launch_type', {}) for launch_type, value in types.items(): INSTANCE_LAUNCH_TYPE.labels(node=node, launch_type=launch_type).set(value) statuses = instance_data[node].get('status', {}) for status, value in statuses.items(): INSTANCE_STATUS.labels(node=node, status=status).set(value) return generate_latest()
def version(request): return { 'version': get_awx_version(), 'tower_version': get_awx_version(), 'short_tower_version': get_awx_version().split('-')[0], }
def metrics(): REGISTRY = CollectorRegistry() SYSTEM_INFO = Info('awx_system', 'AWX System Information', registry=REGISTRY) ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations', registry=REGISTRY) USER_COUNT = Gauge('awx_users_total', 'Number of users', registry=REGISTRY) TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams', registry=REGISTRY) INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories', registry=REGISTRY) PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects', registry=REGISTRY) JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates', registry=REGISTRY) WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates', registry=REGISTRY) HOST_COUNT = Gauge( 'awx_hosts_total', 'Number of hosts', [ 'type', ], registry=REGISTRY, ) SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules', registry=REGISTRY) INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts', registry=REGISTRY) USER_SESSIONS = Gauge( 'awx_sessions_total', 'Number of sessions', [ 'type', ], registry=REGISTRY, ) CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY) RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system', registry=REGISTRY) PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system', registry=REGISTRY) STATUS = Gauge( 'awx_status_total', 'Status of Job launched', [ 'status', ], registry=REGISTRY, ) INSTANCE_CAPACITY = Gauge( 'awx_instance_capacity', 'Capacity of each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) INSTANCE_CPU = Gauge( 'awx_instance_cpu', 'CPU cores on each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) INSTANCE_MEMORY = Gauge( 'awx_instance_memory', 'RAM (Kb) on each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) INSTANCE_INFO = Info( 'awx_instance', 'Info about each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) INSTANCE_LAUNCH_TYPE = Gauge( 'awx_instance_launch_type_total', 'Type of Job launched', [ 'node', 'launch_type', ], registry=REGISTRY, ) INSTANCE_STATUS = Gauge( 'awx_instance_status_total', 'Status of Job launched', [ 'node', 'status', ], registry=REGISTRY, ) INSTANCE_CONSUMED_CAPACITY = Gauge( 'awx_instance_consumed_capacity', 'Consumed capacity of each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) INSTANCE_REMAINING_CAPACITY = Gauge( 'awx_instance_remaining_capacity', 'Remaining capacity of each node in a Tower system', [ 'hostname', 'instance_uuid', ], registry=REGISTRY, ) LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY) LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY) license_info = get_license() SYSTEM_INFO.info( { 'install_uuid': settings.INSTALL_UUID, 'insights_analytics': str(settings.INSIGHTS_TRACKING_STATE), 'tower_url_base': settings.TOWER_URL_BASE, 'tower_version': get_awx_version(), 'license_type': license_info.get('license_type', 'UNLICENSED'), 'license_expiry': str(license_info.get('time_remaining', 0)), 'pendo_tracking': settings.PENDO_TRACKING_STATE, 'external_logger_enabled': str(settings.LOG_AGGREGATOR_ENABLED), 'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', 'None'), } ) LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0))) LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0))) current_counts = counts(None) ORG_COUNT.set(current_counts['organization']) USER_COUNT.set(current_counts['user']) TEAM_COUNT.set(current_counts['team']) INV_COUNT.set(current_counts['inventory']) PROJ_COUNT.set(current_counts['project']) JT_COUNT.set(current_counts['job_template']) WFJT_COUNT.set(current_counts['workflow_job_template']) HOST_COUNT.labels(type='all').set(current_counts['host']) HOST_COUNT.labels(type='active').set(current_counts['active_host_count']) SCHEDULE_COUNT.set(current_counts['schedule']) INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script']) CUSTOM_VENVS.set(current_counts['custom_virtualenvs']) USER_SESSIONS.labels(type='all').set(current_counts['active_sessions']) USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions']) USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions']) all_job_data = job_counts(None) statuses = all_job_data.get('status', {}) for status, value in statuses.items(): STATUS.labels(status=status).set(value) RUNNING_JOBS.set(current_counts['running_jobs']) PENDING_JOBS.set(current_counts['pending_jobs']) instance_data = instance_info(None, include_hostnames=True) for uuid, info in instance_data.items(): hostname = info['hostname'] INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity']) INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu']) INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory']) INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity']) INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity']) INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info( { 'enabled': str(instance_data[uuid]['enabled']), 'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'), 'managed_by_policy': str(instance_data[uuid]['managed_by_policy']), 'version': instance_data[uuid]['version'], } ) instance_data = job_instance_counts(None) for node in instance_data: # skipping internal execution node (for system jobs) if node == '': continue types = instance_data[node].get('launch_type', {}) for launch_type, value in types.items(): INSTANCE_LAUNCH_TYPE.labels(node=node, launch_type=launch_type).set(value) statuses = instance_data[node].get('status', {}) for status, value in statuses.items(): INSTANCE_STATUS.labels(node=node, status=status).set(value) return generate_latest(registry=REGISTRY)