def update_config(m2ee): runtime_version = runtime.get_runtime_version() if not meets_version_requirements(runtime_version): logging.warning( "Not enabling Mendix Java Agent: runtime version must be 7.14 or up. " "Application metrics will not be shipped to third-party monitoring services." ) if is_enabled(runtime_version): _enable_mx_java_agent(m2ee)
def _get_azure_storage_specific_config(vcap_services): azure_storage = None for key in vcap_services: if key.startswith("azure-storage") or ( key == "objectstore" and os.getenv("MENDIX_BLOBSTORE_TYPE") == "azure"): azure_storage = vcap_services[key][0] if azure_storage: if runtime.get_runtime_version() < 6.7: logging.warning( "Can not configure Azure Storage with Mendix < 6.7") return None creds = azure_storage["credentials"] container_name = os.getenv("AZURE_CONTAINER_NAME", "mendix") core_config_value = STORAGE_CUSTOM_RUNTIME_SETTINGS_PREFIX + "azure" config_prefix = core_config_value + "." config_object = { STORAGE_CORE_CUSTOM_RUNTIME_SETTINGS_KEY: core_config_value, config_prefix + "Container": container_name, config_prefix + "CreateContainerIfNotExists": False, } if "primary_access_key" in creds: config_object[config_prefix + "AccountKey"] = creds["primary_access_key"] if "storage_account_name" in creds: config_object[config_prefix + "AccountName"] = creds["storage_account_name"] if "account_name" in creds: config_object[config_prefix + "AccountName"] = creds["account_name"] if "sas_token" in creds: config_object[config_prefix + "SharedAccessSignature"] = creds["sas_token"] if "container_uri" in creds: config_object[config_prefix + "BlobEndpoint"] = creds["container_uri"] if "container_name" in creds: config_object[config_prefix + "Container"] = creds["container_name"] return config_object else: return None
def __init__(self, interval, m2ee): super().__init__() self.interval = interval self.m2ee = m2ee self.db = None self.micrometer_metrics_enabled = micrometer_metrics_enabled( runtime.get_runtime_version()) if bypass_loggregator(): logging.info("Metrics are logged direct to metrics server.") self.emitter = MetricsServerEmitter(metrics_url=get_metrics_url()) else: logging.info("Metrics are logged to stdout.") self.emitter = LoggingEmitter()
def update_config(): samesite_cookie_workaround_enabled = ( _is_samesite_cookie_workaround_enabled(runtime.get_runtime_version()) ) if samesite_cookie_workaround_enabled: logging.info("SameSite cookie workaround is enabled") # Populating nginx config template output_path = os.path.abspath(CONFIG_FILE) template_path = os.path.abspath("{}.j2".format(CONFIG_FILE)) with open(template_path, "r") as file_: template = Template(file_.read(), trim_blocks=True, lstrip_blocks=True) rendered = template.render( samesite_cookie_workaround_enabled=samesite_cookie_workaround_enabled, locations=_get_locations(), default_headers=_get_http_headers(), nginx_port=str(util.get_nginx_port()), runtime_port=str(util.get_runtime_port()), admin_port=str(util.get_admin_port()), root=os.getcwd(), mxadmin_path=MXADMIN_PATH, client_cert_check_internal_path_prefix=CLIENT_CERT_CHECK_INTERNAL_PATH_PREFIX, ) logging.debug("Writing nginx configuration file...") with open(output_path, "w") as file_: file_.write(rendered) logging.debug("nginx configuration file written") # Populating proxy params template output_path = os.path.abspath(PROXY_FILE) template_path = os.path.abspath("{}.j2".format(PROXY_FILE)) with open(template_path, "r") as file_: template = Template(file_.read(), trim_blocks=True, lstrip_blocks=True) rendered = template.render( proxy_buffers=_get_proxy_buffers(), proxy_buffer_size=_get_proxy_buffer_size(), ) logging.debug("Writing proxy_params configuration file...") with open(output_path, "w") as file_: file_.write(rendered) logging.debug("proxy_params configuration file written") _generate_password_file({"MxAdmin": security.get_m2ee_password()})
def _inject_storage_stats(self, stats): storage_stats = {} runtime_version = runtime.get_runtime_version() try: storage_stats["get_number_of_files"] = self._get_number_of_files() except Exception as e: logging.warn("Metrics: Failed to retrieve number of files, " + str(e)) raise if runtime_version >= MXVersion("7.4.0"): try: storage_stats["get_size_of_files"] = self._get_size_of_files() except Exception as e: logging.warn("Metrics: Failed to retrieve size of files, " + str(e)) raise stats["storage"] = storage_stats return stats
def update_config( m2ee, model_version, runtime_version, extra_jmx_instance_config=None, jmx_config_files=[], ): if not is_enabled() or not _is_installed(): return # Set up runtime logging if runtime.get_runtime_version() >= 7.15: util.upsert_logging_config( m2ee, { "type": "tcpjsonlines", "name": "DatadogSubscriber", "autosubscribe": "INFO", "host": "localhost", # For MX8 integer is supported again, this change needs to be # made when MX8 is GA "port": str(LOGS_PORT), }, ) # Set up runtime JMX configuration with open(_get_jmx_conf_file(), "w") as fh: fh.write( yaml.safe_dump( _get_runtime_jmx_config( extra_jmx_instance_config=extra_jmx_instance_config, ) ) ) # Set up Datadog Java Trace Agent jmx_config_files.append(_get_jmx_conf_file()) _set_up_dd_java_agent( m2ee, model_version, runtime_version, jmx_config_files=jmx_config_files, )
def configure_metrics_registry(m2ee): """Add custom environment variables to runtime. This ensures runtime micrometer sends metrics to telegraf. """ if not micrometer_metrics_enabled(runtime.get_runtime_version()): return [] logging.info( "Configuring runtime to push metrics to influx via micrometer") if util.is_free_app(): return FREEAPPS_METRICS_REGISTRY paidapps_registries = [INFLUX_REGISTRY] if (datadog.is_enabled() or get_appmetrics_target() or appdynamics.machine_agent_enabled()): paidapps_registries.append(STATSD_REGISTRY) return paidapps_registries
def _get_credentials_from_tvm(tvm_endpoint, tvm_username, tvm_password): retry = 3 while True: response = requests.get( "https://%s/v1/getcredentials" % tvm_endpoint, headers={ "User-Agent": "Mendix Buildpack {} (for Mendix {})".format( util.get_buildpack_version(), runtime.get_runtime_version(), ) }, auth=(tvm_username, tvm_password), ) if response.ok: break elif not response.ok and retry == 0: logging.error("Failed to get IAM credential from TVM") raise Exception( "failed to get IAM credential from TVM for tvm_user %s" % tvm_username) else: retry = retry - 1 time.sleep(5) logging.error( "Failed to get IAM credential from TVM (HTTP {}), Retrying... {}" .format(response.status_code, retry)) logging.error("Number of retries left = {}".format(retry)) result = response.json() if "AccessKeyId" not in result: raise Exception( "failed to get IAM credential from TVM for tvm_user %s (missing AccessKeyId)" % tvm_username) if "SecretAccessKey" not in result: raise Exception( "failed to get IAM credential from TVM for tvm_user %s (missing SecretAccessKey)" % tvm_username) return result["AccessKeyId"], result["SecretAccessKey"]
def _get_swift_specific_config(vcap_services): if "Object-Storage" not in vcap_services: return None if runtime.get_runtime_version() < 6.7: logging.warning("Can not configure Object Storage with Mendix < 6.7") return None creds = vcap_services["Object-Storage"][0]["credentials"] container_name = os.getenv("SWIFT_CONTAINER_NAME", "mendix") core_config_value = STORAGE_CUSTOM_RUNTIME_SETTINGS_PREFIX + "swift" config_prefix = core_config_value + "." return { STORAGE_CORE_CUSTOM_RUNTIME_SETTINGS_KEY: core_config_value, config_prefix + "Container": container_name, config_prefix + "Container.AutoCreate": True, config_prefix + "credentials.DomainId": creds["domainId"], config_prefix + "credentials.Authurl": creds["auth_url"], config_prefix + "credentials.Username": creds["username"], config_prefix + "credentials.Password": creds["password"], config_prefix + "credentials.Region": creds["region"], }
def _enable_mx_java_agent(m2ee): jar = os.path.join( _get_destination_dir(), os.path.basename(util.get_dependency(DEPENDENCY)["artifact"]), ) logging.debug("Checking if Mendix Java Agent is enabled...") if 0 in [ v.find("-javaagent:{}".format(jar)) for v in util.get_javaopts(m2ee) ]: logging.debug("Mendix Java Agent is already enabled") return logging.debug("Enabling Mendix Java Agent...") mx_agent_args = [] if "METRICS_AGENT_CONFIG" in os.environ: mx_agent_args.append( _to_arg( "config", _to_file( "METRICS_AGENT_CONFIG", os.environ.get("METRICS_AGENT_CONFIG"), ), )) elif "MetricsAgentConfig" in util.get_custom_runtime_settings(m2ee): logging.warning( "Passing MetricsAgentConfig with custom runtime " "settings is deprecated. " "Please use the METRICS_AGENT_CONFIG environment variable.") mx_agent_args.append( _to_arg( "config", _to_file( "METRICS_AGENT_CONFIG", util.get_custom_runtime_setting(m2ee, "MetricsAgentConfig"), ), )) # Default config for fallback instrumentation_config = os.path.join(_get_destination_dir(), "DefaultInstrumentationConfig.json") if "METRICS_AGENT_INSTRUMENTATION_CONFIG" in os.environ: instrumentation_config = _to_file( "METRICS_AGENT_INSTRUMENTATION_CONFIG", os.environ.get("METRICS_AGENT_INSTRUMENTATION_CONFIG"), ) mx_agent_args.append( _to_arg("instrumentation_config", instrumentation_config)) mx_agent_args = list(filter(lambda x: x, mx_agent_args)) mx_agent_args_str = f'={",".join(mx_agent_args)}' if mx_agent_args else "" util.upsert_javaopts(m2ee, "-javaagent:{}{}".format(jar, mx_agent_args_str)) # If not explicitly set, # - default to StatsD (MxVersion < metrics.MXVERSION_MICROMETER) # - default to micrometer (MxVersion >= metrics.MXVERSION_MICROMETER) # NOTE : Runtime is moving away from statsd type metrics. If we # have customers preferring statsd format, they would need to configure # StatsD registry for micrometer. # https://docs.mendix.com/refguide/metrics metrics_type = "statsd" if metrics.micrometer_metrics_enabled(runtime.get_runtime_version()): metrics_type = "micrometer" try: util.upsert_custom_runtime_setting(m2ee, "com.mendix.metrics.Type", metrics_type) except ValueError: logging.debug( "com.mendix.metrics.Type custom runtime setting exists, not setting" )
"Mendix Cloud Foundry Buildpack %s [%s] starting...", util.get_buildpack_version(), util.get_current_buildpack_commit(), ) try: if os.getenv("CF_INSTANCE_INDEX") is None: logging.warning( "CF_INSTANCE_INDEX environment variable not found, assuming cluster leader responsibility..." ) # Initialize the runtime m2ee = runtime.setup(util.get_vcap_data()) # Get versions and names runtime_version = runtime.get_runtime_version() model_version = runtime.get_model_version() application_name = util.get_vcap_data()["application_name"] # Update runtime configuration based on component configuration database.update_config(m2ee) storage.update_config(m2ee) java.update_config(m2ee, util.get_vcap_data(), runtime_version) newrelic.update_config(m2ee, application_name) appdynamics.update_config(m2ee) dynatrace.update_config(m2ee, application_name) mx_java_agent.update_config(m2ee) telegraf.update_config(m2ee, application_name) ( databroker_jmx_instance_cfg, databroker_jmx_config_files,
def _get_s3_specific_config(vcap_services): version = runtime.get_runtime_version() access_key = secret = bucket = encryption_keys = key_suffix = None tvm_endpoint = tvm_username = tvm_password = endpoint = amazon_s3 = None v2_auth = "" blobstore_type = os.getenv("MENDIX_BLOBSTORE_TYPE") for key in vcap_services: if key.startswith("amazon-s3") or (key == "objectstore" and (blobstore_type is None or blobstore_type == "s3")): amazon_s3 = key if amazon_s3: _conf = vcap_services[amazon_s3][0]["credentials"] bucket = _conf["bucket"] # see below at hacky for actual conf if "access_key_id" in _conf: access_key = _conf["access_key_id"] if "secret_access_key" in _conf: secret = _conf["secret_access_key"] if "tvm_endpoint" in _conf: tvm_endpoint = _conf["tvm_endpoint"] if "tvm_username" in _conf: tvm_username = _conf["tvm_username"] if "tvm_password" in _conf: tvm_password = _conf["tvm_password"] if "encryption_keys" in _conf: encryption_keys = _conf["encryption_keys"] if "key_suffix" in _conf: key_suffix = _conf["key_suffix"] if "host" in _conf: endpoint = _conf["host"] if "endpoint" in _conf: endpoint = _conf["endpoint"] # hacky way to switch from suffix to prefix configuration if "key_prefix" in _conf and "endpoint" in _conf: bucket = _conf["key_prefix"].replace("/", "") endpoint = _conf["endpoint"] + "/" + _conf["bucket"] key_suffix = None elif "p-riakcs" in vcap_services: _conf = vcap_services["p-riakcs"][0]["credentials"] access_key = _conf["access_key_id"] secret = _conf["secret_access_key"] pattern = r"https://(([^:]+):([^@]+)@)?([^/]+)/(.*)" match = re.search(pattern, _conf["uri"]) endpoint = "https://" + match.group(4) bucket = match.group(5) v2_auth = "true" access_key = os.getenv("S3_ACCESS_KEY_ID", access_key) secret = os.getenv("S3_SECRET_ACCESS_KEY", secret) tvm_endpoint = os.getenv("S3_TVM_ENDPOINT", tvm_endpoint) tvm_username = os.getenv("S3_TVM_USERNAME", tvm_username) tvm_password = os.getenv("S3_TVM_PASSWORD", tvm_password) bucket = os.getenv("S3_BUCKET_NAME", bucket) if "S3_ENCRYPTION_KEYS" in os.environ: encryption_keys = json.loads(os.getenv("S3_ENCRYPTION_KEYS")) dont_perform_deletes = (os.getenv("S3_PERFORM_DELETES", "true").lower() == "false") key_suffix = os.getenv("S3_KEY_SUFFIX", key_suffix) endpoint = os.getenv("S3_ENDPOINT", endpoint) v2_auth = os.getenv("S3_USE_V2_AUTH", v2_auth).lower() == "true" sse = os.getenv("S3_USE_SSE", "").lower() == "true" if not bucket: return None core_config_value = STORAGE_CUSTOM_RUNTIME_SETTINGS_PREFIX + "s3" config_prefix = core_config_value + "." if access_key and secret: logging.info("S3 config detected, activating external file store") config = { STORAGE_CORE_CUSTOM_RUNTIME_SETTINGS_KEY: core_config_value, config_prefix + "AccessKeyId": access_key, config_prefix + "SecretAccessKey": secret, config_prefix + "BucketName": bucket, } elif (tvm_endpoint and tvm_username and tvm_password and _runtime_sts_support(version)): logging.info("S3 TVM config detected") config = { STORAGE_CORE_CUSTOM_RUNTIME_SETTINGS_KEY: core_config_value, config_prefix + "tokenService.Url": "https://%s/v1/gettoken" % tvm_endpoint, config_prefix + "tokenService.Username": tvm_username, config_prefix + "tokenService.Password": tvm_password, config_prefix + "tokenService.RefreshPercentage": 80, config_prefix + "tokenService.RetryIntervalInSeconds": 10, config_prefix + "BucketName": bucket, } elif tvm_endpoint and tvm_username and tvm_password: logging.info( "S3 TVM config detected, fetching IAM credentials from TVM...") access_key, secret = _get_credentials_from_tvm(tvm_endpoint, tvm_username, tvm_password) config = { STORAGE_CORE_CUSTOM_RUNTIME_SETTINGS_KEY: core_config_value, config_prefix + "AccessKeyId": access_key, config_prefix + "SecretAccessKey": secret, config_prefix + "BucketName": bucket, } else: return None if dont_perform_deletes: logging.debug("disabling perform deletes for runtime") if version < 7.19: # Deprecated in 7.19 config[config_prefix + "PerformDeleteFromStorage"] = False elif version >= 9.12 or (version.major == 9 and version.minor == 6 and version.patch >= 11): config[STORAGE_CUSTOM_RUNTIME_SETTINGS_PREFIX + "PerformDeleteFromStorage"] = "NoFiles" else: config[STORAGE_CUSTOM_RUNTIME_SETTINGS_PREFIX + "PerformDeleteFromStorage"] = False if key_suffix: config[config_prefix + "ResourceNameSuffix"] = key_suffix if v2_auth: config[config_prefix + "UseV2Auth"] = v2_auth if endpoint: config[config_prefix + "EndPoint"] = endpoint if version >= 6 and encryption_keys: config[config_prefix + "EncryptionKeys"] = encryption_keys if version >= 6 and sse: config[config_prefix + "UseSSE"] = sse return config