def _get_db_conn(self): if self.db and self.db.closed != 0: self.db.close() self.db = None if not self.db: try: db_config = buildpackutil.get_database_config() if db_config['DatabaseType'] != 'PostgreSQL': raise Exception( 'Metrics only supports postgresql, not %s' % db_config['DatabaseType'] ) host_and_port = db_config['DatabaseHost'].split(':') host = host_and_port[0] if len(host_and_port) > 1: port = int(host_and_port[1]) else: port = 5432 self.db = psycopg2.connect( "options='-c statement_timeout=60s'", database=db_config['DatabaseName'], user=db_config['DatabaseUserName'], password=db_config['DatabasePassword'], host=host, port=port, connect_timeout=3, ) self.db.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) except Exception as e: logger.warn('METRICS: ' + e.message) return self.db
def _set_up_postgres(): # TODO: set up a way to disable this, on shared database (mxapps.io) we # don't want to allow this. if not buildpackutil.i_am_primary_instance(): return dbconfig = buildpackutil.get_database_config() for k in ( "DatabaseType", "DatabaseUserName", "DatabasePassword", "DatabaseHost", ): if k not in dbconfig: return if dbconfig["DatabaseType"] != "PostgreSQL": return with open(".local/datadog/conf.d/postgres.yaml", "w") as fh: config = { "init_config": {}, "instances": [{ "host": dbconfig["DatabaseHost"].split(":")[0], "port": int(dbconfig["DatabaseHost"].split(":")[1]), "username": dbconfig["DatabaseUserName"], "password": dbconfig["DatabasePassword"], "dbname": dbconfig["DatabaseName"], }], } fh.write(yaml.safe_dump(config))
def set_runtime_config(metadata, mxruntime_config, vcap_data, m2ee): scheduled_event_execution, my_scheduled_events = get_scheduled_events( metadata) app_config = { "ApplicationRootUrl": "https://%s" % vcap_data["application_uris"][0], "MicroflowConstants": get_constants(metadata), "ScheduledEventExecution": scheduled_event_execution, } if my_scheduled_events is not None: app_config["MyScheduledEvents"] = my_scheduled_events if is_development_mode(): logger.warning("Runtime is being started in Development Mode. Set " 'DEVELOPMENT_MODE to "false" (currently "true") to ' "set it to production.") app_config["DTAPMode"] = "D" if m2ee.config.get_runtime_version() >= 7 and not i_am_primary_instance(): app_config["com.mendix.core.isClusterSlave"] = "true" elif (m2ee.config.get_runtime_version() >= 5.15 and os.getenv("ENABLE_STICKY_SESSIONS", "false").lower() == "true"): logger.info("Enabling sticky sessions") app_config["com.mendix.core.SessionIdCookieName"] = "JSESSIONID" buildpackutil.mkdir_p(os.path.join(os.getcwd(), "model", "resources")) mxruntime_config.update(app_config) mxruntime_config.update( buildpackutil.get_database_config( development_mode=is_development_mode())) mxruntime_config.update(get_filestore_config(m2ee)) mxruntime_config.update(get_certificate_authorities()) mxruntime_config.update(get_client_certificates()) mxruntime_config.update(get_custom_settings(metadata, mxruntime_config)) mxruntime_config.update(get_custom_runtime_settings())
def _get_database_mutations(self): conn = self._get_db_conn() try: db_config = buildpackutil.get_database_config() with conn.cursor() as cursor: cursor.execute("SELECT xact_commit, " " xact_rollback, " " tup_inserted, " " tup_updated, " " tup_deleted " "FROM pg_stat_database " "WHERE datname = '%s';" % (db_config['DatabaseName'], )) rows = cursor.fetchall() return { 'xact_commit': int(rows[0][0]), 'xact_rollback': int(rows[0][1]), 'tup_inserted': int(rows[0][2]), 'tup_updated': int(rows[0][3]), 'tup_deleted': int(rows[0][4]), } except Exception as e: logger.warn('Metrics: Failed to get database mutation stats, ' + str(e)) return None
def _get_db_conn(self): if self.db and self.db.closed != 0: self.db.close() self.db = None if not self.db: db_config = buildpackutil.get_database_config() if db_config["DatabaseType"] != "PostgreSQL": raise Exception("Metrics only supports postgresql, not %s" % db_config["DatabaseType"]) host_and_port = db_config["DatabaseHost"].split(":") host = host_and_port[0] if len(host_and_port) > 1: port = int(host_and_port[1]) else: port = 5432 self.db = psycopg2.connect( "options='-c statement_timeout=60s'", database=db_config["DatabaseName"], user=db_config["DatabaseUserName"], password=db_config["DatabasePassword"], host=host, port=port, connect_timeout=3, ) self.db.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) return self.db
def _set_up_postgres(): # TODO: set up a way to disable this, on shared database (mxapps.io) we # don't want to allow this. if not buildpackutil.i_am_primary_instance(): return dbconfig = buildpackutil.get_database_config() for k in ( 'DatabaseType', 'DatabaseUserName', 'DatabasePassword', 'DatabaseHost', ): if k not in dbconfig: return if dbconfig['DatabaseType'] != 'PostgreSQL': return with open('.local/datadog/conf.d/postgres.yaml', 'w') as fh: config = { 'init_config': {}, 'instances': [{ 'host': dbconfig['DatabaseHost'].split(':')[0], 'port': int(dbconfig['DatabaseHost'].split(':')[1]), 'username': dbconfig['DatabaseUserName'], 'password': dbconfig['DatabasePassword'], 'dbname': dbconfig['DatabaseName'], }], } fh.write(yaml.safe_dump(config))
def set_runtime_config(metadata, mxruntime_config, vcap_data, m2ee): scheduled_event_execution, my_scheduled_events = ( get_scheduled_events(metadata)) app_config = { 'ApplicationRootUrl': 'https://%s' % vcap_data['application_uris'][0], 'MicroflowConstants': get_constants(metadata), 'ScheduledEventExecution': scheduled_event_execution, } if my_scheduled_events is not None: app_config['MyScheduledEvents'] = my_scheduled_events if is_development_mode(): logger.warning('Runtime is being started in Development Mode. Set ' 'DEVELOPMENT_MODE to "false" (currently "true") to ' 'set it to production.') app_config['DTAPMode'] = 'D' if (m2ee.config.get_runtime_version() >= 7 and not i_am_primary_instance()): app_config['com.mendix.core.isClusterSlave'] = 'true' elif (m2ee.config.get_runtime_version() >= 5.15 and os.getenv('ENABLE_STICKY_SESSIONS', 'false').lower() == 'true'): logger.info('Enabling sticky sessions') app_config['com.mendix.core.SessionIdCookieName'] = 'JSESSIONID' mxruntime_config.update(app_config) mxruntime_config.update( buildpackutil.get_database_config( development_mode=is_development_mode(), )) mxruntime_config.update(get_filestore_config(m2ee)) mxruntime_config.update(get_certificate_authorities()) mxruntime_config.update(get_client_certificates()) mxruntime_config.update(get_custom_settings(metadata, mxruntime_config)) mxruntime_config.update(get_custom_runtime_settings())
def _get_database_table_size(self): conn = self._get_db_conn() db_config = buildpackutil.get_database_config() with conn.cursor() as cursor: cursor.execute("SELECT pg_database_size('%s');" % (db_config["DatabaseName"], )) rows = cursor.fetchall() return int_or_default(rows[0][0])
def service_backups(): vcap_services = buildpackutil.get_vcap_services_data() if not vcap_services or 'schnapps' not in vcap_services: logger.debug("No backup service detected") return backup_service = {} if 'amazon-s3' in vcap_services: s3_credentials = vcap_services['amazon-s3'][0]['credentials'] backup_service['filesCredentials'] = { 'accessKey': s3_credentials['access_key_id'], 'secretKey': s3_credentials['secret_access_key'], 'bucketName': s3_credentials['bucket'], } if 'key_suffix' in s3_credentials: # Not all s3 plans have this field backup_service['filesCredentials']['keySuffix'] = s3_credentials['key_suffix'] try: db_config = buildpackutil.get_database_config() if db_config['DatabaseType'] != 'PostgreSQL': raise Exception( 'Schnapps only supports postgresql, not %s' % db_config['DatabaseType'] ) host_and_port = db_config['DatabaseHost'].split(':') backup_service['databaseCredentials'] = { 'host': host_and_port[0], 'username': db_config['DatabaseUserName'], 'password': db_config['DatabasePassword'], 'dbname': db_config['DatabaseName'], 'port': int(host_and_port[1]) if len(host_and_port) > 1 else 5432, } except Exception as e: logger.exception( 'Schnapps will not be activated because error occurred with ' 'parsing the database credentials' ) return schnapps_url = vcap_services['schnapps'][0]['credentials']['url'] schnapps_api_key = vcap_services['schnapps'][0]['credentials']['apiKey'] try: result = requests.put( schnapps_url, headers={ 'Content-Type': 'application/json', 'apiKey': schnapps_api_key }, data=json.dumps(backup_service), ) except Exception as e: logger.warning('Failed to contact backup service: ' + e) return if result.status_code == 200: logger.info("Successfully updated backup service") else: logger.warning("Failed to update backup service: " + result.text)
def _get_database_table_size(self): conn = self._get_db_conn() try: db_config = buildpackutil.get_database_config() with conn.cursor() as cursor: cursor.execute("SELECT pg_database_size('%s');" % (db_config['DatabaseName'], )) rows = cursor.fetchall() return int(rows[0][0]) except Exception as e: logger.warn('Metrics: Failed to get database data size, ' + str(e)) return None
def service_backups(): vcap_services = buildpackutil.get_vcap_services_data() if not vcap_services or 'schnapps' not in vcap_services: logger.info("No backup service detected") return backup_service = {} if 'amazon-s3' in vcap_services: s3_credentials = vcap_services['amazon-s3'][0]['credentials'] backup_service['filesCredentials'] = { 'accessKey': s3_credentials['access_key_id'], 'secretKey': s3_credentials['secret_access_key'], 'bucketName': s3_credentials['bucket'], } if 'key_suffix' in s3_credentials: # Not all s3 plans have this field backup_service['filesCredentials']['keySuffix'] = s3_credentials['key_suffix'] if 'PostgreSQL' in vcap_services: db_config = buildpackutil.get_database_config() host_and_port = db_config['DatabaseHost'].split(':') backup_service['databaseCredentials'] = { 'host': host_and_port[0], 'username': db_config['DatabaseUserName'], 'password': db_config['DatabasePassword'], 'dbname': db_config['DatabaseName'], 'port': int(host_and_port[1]) if len(host_and_port) > 1 else 5432, } schnapps_url = vcap_services['schnapps'][0]['credentials']['url'] schnapps_api_key = vcap_services['schnapps'][0]['credentials']['apiKey'] result = requests.put( schnapps_url, headers={ 'Content-Type': 'application/json', 'apiKey': schnapps_api_key }, data=json.dumps(backup_service), ) if result.status_code == 200: logger.info("Successfully updated backup service") else: logger.warning("Failed to update backup service: " + result.text)
def _get_database_mutations(self): conn = self._get_db_conn() db_config = buildpackutil.get_database_config() with conn.cursor() as cursor: cursor.execute("SELECT xact_commit, " " xact_rollback, " " tup_inserted, " " tup_updated, " " tup_deleted " "FROM pg_stat_database " "WHERE datname = '%s';" % (db_config["DatabaseName"], )) rows = cursor.fetchall() return { "xact_commit": int_or_default(rows[0][0]), "xact_rollback": int_or_default(rows[0][1]), "tup_inserted": int_or_default(rows[0][2]), "tup_updated": int_or_default(rows[0][3]), "tup_deleted": int_or_default(rows[0][4]), } return None
def set_runtime_config(metadata, mxruntime_config, vcap_data, m2ee): scheduled_event_execution, my_scheduled_events = ( get_scheduled_events(metadata) ) app_config = { 'ApplicationRootUrl': 'https://%s' % vcap_data['application_uris'][0], 'MicroflowConstants': get_constants(metadata), 'ScheduledEventExecution': scheduled_event_execution, } if my_scheduled_events is not None: app_config['MyScheduledEvents'] = my_scheduled_events if is_development_mode(): logger.warning( 'Runtime is being started in Development Mode. Set ' 'DEVELOPMENT_MODE to "false" (currently "true") to ' 'set it to production.' ) app_config['DTAPMode'] = 'D' if (m2ee.config.get_runtime_version() >= 5.15 and os.getenv('ENABLE_STICKY_SESSIONS', 'false').lower() == 'true' and not is_cluster_enabled()): logger.info('Enabling sticky sessions') app_config['com.mendix.core.SessionIdCookieName'] = 'JSESSIONID' mxruntime_config.update(app_config) mxruntime_config.update(buildpackutil.get_database_config( development_mode=is_development_mode(), )) mxruntime_config.update(get_filestore_config(m2ee)) mxruntime_config.update(get_cluster_config()) mxruntime_config.update(get_certificate_authorities()) mxruntime_config.update(get_custom_settings(metadata, mxruntime_config)) for k, v in os.environ.iteritems(): if k.startswith('MXRUNTIME_'): mxruntime_config[ k.replace('MXRUNTIME_', '', 1).replace('_', '.') ] = v
def service_backups(): vcap_services = buildpackutil.get_vcap_services_data() schnapps = None amazon_s3 = None for key in vcap_services: if key.startswith("amazon-s3"): amazon_s3 = key if key.startswith("schnapps"): schnapps = key if not vcap_services or schnapps not in vcap_services: logger.debug("No backup service detected") return backup_service = {} if amazon_s3 in vcap_services: s3_credentials = vcap_services[amazon_s3][0]["credentials"] backup_service["filesCredentials"] = { "accessKey": s3_credentials["access_key_id"], "secretKey": s3_credentials["secret_access_key"], "bucketName": s3_credentials["bucket"], } if "key_suffix" in s3_credentials: # Not all s3 plans have this field backup_service["filesCredentials"]["keySuffix"] = s3_credentials[ "key_suffix" ] try: db_config = buildpackutil.get_database_config() if db_config["DatabaseType"] != "PostgreSQL": raise Exception( "Schnapps only supports postgresql, not %s" % db_config["DatabaseType"] ) host_and_port = db_config["DatabaseHost"].split(":") backup_service["databaseCredentials"] = { "host": host_and_port[0], "username": db_config["DatabaseUserName"], "password": db_config["DatabasePassword"], "dbname": db_config["DatabaseName"], "port": int(host_and_port[1]) if len(host_and_port) > 1 else 5432, } except Exception as e: logger.exception( "Schnapps will not be activated because error occurred with " "parsing the database credentials" ) return schnapps_url = vcap_services[schnapps][0]["credentials"]["url"] schnapps_api_key = vcap_services[schnapps][0]["credentials"]["apiKey"] try: result = requests.put( schnapps_url, headers={ "Content-Type": "application/json", "apiKey": schnapps_api_key, }, data=json.dumps(backup_service), ) except requests.exceptions.SSLError as e: logger.warning("Failed to contact backup service. SSLError: " + str(e)) return except Exception as e: logger.warning("Failed to contact backup service: ", exc_info=True) return if result.status_code == 200: logger.info("Successfully updated backup service") else: logger.warning("Failed to update backup service: " + result.text)