def check_database_environment(): try: database.get_config() return True except RuntimeError as ex: logging.error( "You should provide a DATABASE_URL by adding a database service " "to this application, it can be either MySQL or Postgres " "If this is the first push of a new app, " "set up a database service " "and push again afterwards: %s", ex, ) return False
def _get_db_config(): if (include_db_metrics() or datadog.get_api_key()) and util.i_am_primary_instance(): db_config = database.get_config() if db_config and db_config["DatabaseType"] == "PostgreSQL": return db_config return None
def _get_db_conn(self): if self.db and self.db.closed != 0: self.db.close() self.db = None if not self.db: # get_database config may return None or empty db_config = database.get_config() if not db_config or "DatabaseType" not in db_config: raise ValueError( "Database not set as VCAP or DATABASE_URL. Check " "documentation to see supported configuration options.") if db_config["DatabaseType"] != "PostgreSQL": raise Exception("Metrics only supports postgresql, not %s" % db_config["DatabaseType"]) host_and_port = db_config["DatabaseHost"].split(":") host = host_and_port[0] if len(host_and_port) > 1: port = int(host_and_port[1]) else: port = 5432 self.db = psycopg2.connect( "options='-c statement_timeout=60s'", database=db_config["DatabaseName"], user=db_config["DatabaseUserName"], password=db_config["DatabasePassword"], host=host, port=port, connect_timeout=3, ) self.db.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) return self.db
def _set_up_postgres(): # TODO: set up a way to disable this, on shared database (mxapps.io) we # don't want to allow this. if not util.i_am_primary_instance(): return dbconfig = database.get_config() for k in ( "DatabaseType", "DatabaseUserName", "DatabasePassword", "DatabaseHost", ): if k not in dbconfig: logging.warning( "Skipping database configuration for Datadog because " "configuration is not found. See database_config.py " "for details") return if dbconfig["DatabaseType"] != "PostgreSQL": return os.makedirs(DD_AGENT_CHECKS_DIR + "/postgres.d", exist_ok=True) with open(DD_AGENT_CHECKS_DIR + "/postgres.d/conf.yaml", "w") as fh: config = { "init_config": {}, "instances": [{ "host": dbconfig["DatabaseHost"].split(":")[0], "port": int(dbconfig["DatabaseHost"].split(":")[1]), "username": dbconfig["DatabaseUserName"], "password": dbconfig["DatabasePassword"], "dbname": dbconfig["DatabaseName"], }], } fh.write(yaml.safe_dump(config))
def _get_database_tcp_latency(self, timeout: float = 5): db_config = database.get_config() host, port = self._get_db_host_and_port(db_config["DatabaseHost"]) # New Socket and Time out sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) # Start a timer s_start = timer() # Try to Connect try: sock.connect((host, int(port))) sock.shutdown(socket.SHUT_RD) sock.close() # If something bad happens, the latency is None except socket.timeout: return None except OSError: return None # Stop Timer s_stop = timer() s_runtime = "%.2f" % (1000 * (s_stop - s_start)) return s_runtime
def _set_up_environment(): # Trace variables need to be set in the global environment # since the Datadog Java Trace Agent does not live inside the Datadog Agent process if _is_dd_tracing_enabled(): os.environ["DD_SERVICE_NAME"] = _get_service() os.environ["DD_JMXFETCH_ENABLED"] = "false" dbconfig = database.get_config() if dbconfig: os.environ["DD_SERVICE_MAPPING"] = "{}:{}.db".format( dbconfig["DatabaseType"].lower(), _get_service()) e = dict(os.environ.copy()) # Everything in datadog.yaml can be configured with environment variables # This is the "official way" of working with the DD buildpack, so let's do this to ensure forward compatibility e["DD_API_KEY"] = get_api_key() e["DD_HOSTNAME"] = util.get_hostname() # Explicitly turn off tracing to ensure backward compatibility if not _is_dd_tracing_enabled(): e["DD_TRACE_ENABLED"] = "false" e["DD_LOGS_ENABLED"] = "true" e["DD_LOG_FILE"] = "/dev/null" tags = util.get_tags() if tags: e["DD_TAGS"] = ",".join(tags) e["DD_PROCESS_CONFIG_LOG_FILE"] = "/dev/null" e["DD_DOGSTATSD_PORT"] = str(_get_statsd_port()) # Include for forward-compatibility with DD buildpack e["DD_ENABLE_CHECKS"] = "true" e["DATADOG_DIR"] = str(os.path.abspath(DD_AGENT_DIR)) return e
def _get_database_table_size(self): conn = self._get_db_conn() db_config = database.get_config() with conn.cursor() as cursor: cursor.execute("SELECT pg_database_size('%s');" % (db_config["DatabaseName"], )) rows = cursor.fetchall() return int_or_default(rows[0][0])
def test_database_url(self): self.clean_env() os.environ[ "DATABASE_URL"] = "jdbc:postgres://*****:*****@host/database" config = get_config() assert config assert config["DatabaseType"] == "PostgreSQL"
def test_mx_runtime_db_config(self): # Test if MXRUNTIME variables are set up if no database configuration is returned # based on DATABASE_URL or VCAP_SERVICES self.clean_env() os.environ["MXRUNTIME_DatabaseType"] = "PostgreSQL" os.environ[ "MXRUNTIME_DatabaseJdbcUrl"] = "jdbc:postgresql://*****:*****@rdsbroker-testfree-nonprod-1-eu-west-1.asdbjasdg.eu-west-1.rds.amazonaws.com:5432/testdatabase" # noqa E501 config = get_config() assert not config
def set_runtime_config(metadata, mxruntime_config, vcap_data, m2ee): scheduled_event_execution, my_scheduled_events = get_scheduled_events( metadata ) app_config = { "ApplicationRootUrl": get_application_root_url(vcap_data), "MicroflowConstants": get_constants(metadata), "ScheduledEventExecution": scheduled_event_execution, } if my_scheduled_events is not None: app_config["MyScheduledEvents"] = my_scheduled_events if util.is_development_mode(): logging.warning( "Runtime is being started in Development Mode. Set " 'DEVELOPMENT_MODE to "false" (currently "true") to ' "set it to production." ) app_config["DTAPMode"] = "D" if ( m2ee.config.get_runtime_version() >= 7 and not util.i_am_primary_instance() ): app_config["com.mendix.core.isClusterSlave"] = "true" elif ( m2ee.config.get_runtime_version() >= 6 and os.getenv("ENABLE_STICKY_SESSIONS", "false").lower() == "true" ): logging.info("Enabling sticky sessions") app_config["com.mendix.core.SessionIdCookieName"] = "JSESSIONID" util.mkdir_p(os.path.join(os.getcwd(), "model", "resources")) mxruntime_config.update(app_config) # db configuration might be None, database should then be set up with # MXRUNTIME_Database... custom runtime settings. runtime_db_config = database.get_config() if runtime_db_config: mxruntime_config.update(runtime_db_config) mxruntime_config.update(storage.get_config(m2ee)) mxruntime_config.update(security.get_certificate_authorities()) mxruntime_config.update( security.get_client_certificates(m2ee.config.get_runtime_version()) ) mxruntime_config.update(get_custom_settings(metadata, mxruntime_config)) mxruntime_config.update(get_license_subscription()) mxruntime_config.update(get_custom_runtime_settings())
def _set_up_dd_java_agent(m2ee, jmx_config_files): jar = os.path.join(SIDECAR_ROOT_DIR, JAVA_AGENT_ARTIFACT_NAME) # Check if already configured if 0 in [ v.find("-javaagent:{}".format(jar)) for v in m2ee.config._conf["m2ee"]["javaopts"] ]: return # Inject Datadog Java agent m2ee.config._conf["m2ee"]["javaopts"].extend([ "-javaagent:{}".format(jar), "-D{}={}".format("dd.tags", _get_datadog_tags()), "-D{}={}".format("dd.service", get_service()), ]) # Expllicitly set tracing flag m2ee.config._conf["m2ee"]["javaopts"].extend([ "-D{}={}".format("dd.trace.enabled", str(_is_tracing_enabled()).lower()), ]) # Extend with tracing options if _is_tracing_enabled(): m2ee.config._conf["m2ee"]["javaopts"].extend([ "-D{}={}".format("dd.logs.injection", "true"), ]) # Extend with database service mapping dbconfig = database.get_config() if dbconfig and "postgres" in dbconfig["DatabaseType"].lower(): m2ee.config._conf["m2ee"]["javaopts"].extend([ "-D{}={}".format( "dd.service.mapping", "{}:{}.db".format("postgresql", get_service()), ), ]) # Extend with JMX options m2ee.config._conf["m2ee"]["javaopts"].extend([ "-D{}={}".format("dd.jmxfetch.enabled", "true"), "-D{}={}".format("dd.jmxfetch.statsd.port", get_statsd_port()), ]) if jmx_config_files: # Set up Java Agent JMX configuration m2ee.config._conf["m2ee"]["javaopts"].extend([ "-D{}={}".format("dd.jmxfetch.config", ",".join(jmx_config_files)), ])
def _get_database_mutations(self): conn = self._get_db_conn() db_config = database.get_config() with conn.cursor() as cursor: cursor.execute("SELECT xact_commit, " " xact_rollback, " " tup_inserted, " " tup_updated, " " tup_deleted " "FROM pg_stat_database " "WHERE datname = '%s';" % (db_config["DatabaseName"], )) rows = cursor.fetchall() return { "xact_commit": int_or_default(rows[0][0]), "xact_rollback": int_or_default(rows[0][1]), "tup_inserted": int_or_default(rows[0][2]), "tup_updated": int_or_default(rows[0][3]), "tup_deleted": int_or_default(rows[0][4]), } return None
def _set_up_environment(): if "MXRUNTIME_License.SubscriptionSecret" in os.environ: os.environ["MXUMS_SUBSCRIPTION_SECRET"] = os.environ[ "MXRUNTIME_License.SubscriptionSecret"] if "MXRUNTIME_License.LicenseServerURL" in os.environ: os.environ["MXUMS_LICENSESERVER_URL"] = os.environ[ "MXRUNTIME_License.LicenseServerURL"] if "MXRUNTIME_License.EnvironmentName" in os.environ: os.environ["MXUMS_ENVIRONMENT_NAME"] = os.environ[ "MXRUNTIME_License.EnvironmentName"] dbconfig = database.get_config() if dbconfig: os.environ[ "MXUMS_DB_CONNECTION_URL"] = "postgres://{}:{}@{}/{}".format( dbconfig["DatabaseUserName"], dbconfig["DatabasePassword"], dbconfig["DatabaseHost"], dbconfig["DatabaseName"], ) project_id = _get_project_id(SIDECAR_DIR + "/" + SIDECAR_CONFIG_FILE) os.environ["MXUMS_PROJECT_ID"] = project_id e = dict(os.environ.copy()) return e
def test_vcap(self): self.clean_env() os.environ[ "VCAP_SERVICES" ] = """ { "rds-testfree": [ { "binding_name": null, "credentials": { "db_name": "dbuajsdhkasdhaks", "host": "rdsbroker-testfree-nonprod-1-eu-west-1.asdbjasdg.eu-west-1.rds.amazonaws.com", "password": "******", "uri": "postgres://*****:*****@rdsbroker-testfree-nonprod-1-eu-west-1.asdbjasdg.eu-west-1.rds.amazonaws.com:5432/dbuajsdhkasdhaks", "username": "******" }, "instance_name": "ops-432a659e.test.foo.io-database", "label": "rds-testfree", "name": "ops-432a659e.test.foo.io-database", "plan": "shared-psql-testfree", "provider": null, "syslog_drain_url": null, "tags": [ "database", "RDS", "postgresql" ], "volume_mounts": [] } ] } """ # noqa config = get_config() assert config assert config["DatabaseType"] == "PostgreSQL"
def run(): vcap_services = util.get_vcap_services_data() schnapps = None amazon_s3 = None for key in vcap_services: if key.startswith("amazon-s3"): amazon_s3 = key if key.startswith("schnapps"): schnapps = key if not vcap_services or schnapps not in vcap_services: logging.debug("No backup service detected") return backup_service = {} if amazon_s3 in vcap_services: s3_credentials = vcap_services[amazon_s3][0]["credentials"] backup_service["filesCredentials"] = { "accessKey": s3_credentials["access_key_id"], "secretKey": s3_credentials["secret_access_key"], "bucketName": s3_credentials["bucket"], } if "key_suffix" in s3_credentials: # Not all s3 plans have this field backup_service["filesCredentials"]["keySuffix"] = s3_credentials[ "key_suffix"] try: db_config = database.get_config() if db_config["DatabaseType"] != "PostgreSQL": raise Exception("Schnapps only supports postgresql, not %s" % db_config["DatabaseType"]) host_and_port = db_config["DatabaseHost"].split(":") backup_service["databaseCredentials"] = { "host": host_and_port[0], "username": db_config["DatabaseUserName"], "password": db_config["DatabasePassword"], "dbname": db_config["DatabaseName"], "port": int(host_and_port[1]) if len(host_and_port) > 1 else 5432, } except Exception as e: logging.exception( "Schnapps will not be activated because error occurred with " "parsing the database credentials") return schnapps_url = vcap_services[schnapps][0]["credentials"]["url"] schnapps_api_key = vcap_services[schnapps][0]["credentials"]["apiKey"] try: result = requests.put( schnapps_url, headers={ "Content-Type": "application/json", "apiKey": schnapps_api_key, }, data=json.dumps(backup_service), ) except requests.exceptions.SSLError as e: logging.warning("Failed to contact backup service. SSLError: %s", str(e)) return except Exception as e: logging.warning("Failed to contact backup service: ", exc_info=True) return if result.status_code == 200: logging.info("Successfully updated backup service") else: logging.warning("Failed to update backup service: " + result.text)
def update_config(m2ee, app_name): if not is_enabled() or not _is_installed(): return # Telegraf config, taking over defaults from telegraf.conf from the distro logging.debug("creating telegraf config") _create_config_file({ "interval": "10s", "round_interval": True, "metric_batch_size": 1000, "metric_buffer_limit": 10000, "collection_jitter": "0s", "flush_interval": "10s", "flush_jitter": "5s", "precision": "", "debug": False, "logfile": "", "hostname": util.get_hostname(), "omit_hostname": False, }) _write_config("[global_tags]", _get_tags()) _write_config( "[[inputs.statsd]]", { "protocol": "udp", "max_tcp_connections": 250, "tcp_keep_alive": False, "service_address": ":8125", "delete_gauges": True, "delete_counters": True, "delete_sets": True, "delete_timings": True, "percentiles": [90], "metric_separator": ".", "parse_data_dog_tags": True, "allowed_pending_messages": 10000, "percentile_limit": 1000, }, ) # Configure postgreSQL input plugin if include_db_metrics(): db_config = database.get_config() if db_config: _write_config( "[[inputs.postgresql]]", { "address": "postgres://{}:{}@{}/{}".format( db_config["DatabaseUserName"], db_config["DatabasePassword"], db_config["DatabaseHost"], db_config["DatabaseName"], ) }, ) # Forward metrics also to DataDog when enabled if datadog.is_enabled(): _write_config("[[outputs.datadog]]", {"apikey": datadog.get_api_key()}) # # Write http_oputs (one or array) http_configs = json.loads(_get_appmetrics_target()) if type(http_configs) is list: for http_config in http_configs: _write_http_output_config(http_config) else: _write_http_output_config(http_configs) # Enable Java Agent on MxRuntime to datadog.enable_mx_java_agent(m2ee)
def test_no_setup(self): self.clean_env() with self.assertRaises(RuntimeError): get_config()