def deploy_resources(): rds_client = RDSClient().get_client() rds = RDS(rds_client) rds.create_postgresql_instance() print("Creating RDS PostgreSQL Instance...")
def modify_schema_owner_password(event, context): _validate() logger.info(event) """ We don't know the password for 'capture_owner' on the production db, but we have already changed the postgres password in the modifyDbCluster step. So change the password for 'capture_owner' here. :param event: :param context: :return: """ original = secrets_client.get_secret_value(SecretId=NWCAPTURE_REAL, ) secret_string = json.loads(original['SecretString']) db_host = secret_string['DATABASE_ADDRESS'] db_name = secret_string['DATABASE_NAME'] postgres_password = secret_string['POSTGRES_PASSWORD'] schema_owner_password = secret_string['SCHEMA_OWNER_PASSWORD'] logger.info( f"db_host {db_host} db_name {db_name} postgres_password {postgres_password} sop {schema_owner_password}" ) rds = RDS(db_host, 'postgres', db_name, postgres_password) logger.info("got rds ok") sql = "alter user capture_owner with password %s" rds.alter_permissions(sql, (schema_owner_password, )) queue_info = sqs_client.get_queue_url(QueueName=CAPTURE_TRIGGER_QUEUE) sqs_client.purge_queue(QueueUrl=queue_info['QueueUrl']) queue_info = sqs_client.get_queue_url(QueueName=ERROR_QUEUE) sqs_client.purge_queue(QueueUrl=queue_info['QueueUrl']) enable_lambda_trigger(TRIGGER[os.environ['STAGE']])
def modify_schema_owner_password(event, context): logger.info(event) """ We don't know the password for 'capture_owner' on the production db, but we have already changed the postgres password in the modifyDbCluster step. So change the password for 'capture_owner' here. :param event: :param context: :return: """ original = secrets_client.get_secret_value( SecretId=NWCAPTURE_LOAD, ) secret_string = json.loads(original['SecretString']) db_host = secret_string['DATABASE_ADDRESS'] db_name = secret_string['DATABASE_NAME'] rds = RDS(db_host, 'postgres', db_name, 'Password123') sql = "alter user capture_owner with password 'Password123'" rds.alter_permissions(sql) sqs = boto3.client('sqs', os.getenv('AWS_DEPLOYMENT_REGION')) queue_info = sqs.get_queue_url(QueueName=CAPTURE_TRIGGER_QUEUE) sqs.purge_queue(QueueUrl=queue_info['QueueUrl']) queue_info = sqs.get_queue_url(QueueName=ERROR_QUEUE) sqs.purge_queue(QueueUrl=queue_info['QueueUrl'])
def pre_test(event, context): logger.info(event) """ This is a place holder that will inspect the beginning state of the load test db and save some data so that it can be compared with the db after the integration tests run :param event: :param context: :return: """ original = secrets_client.get_secret_value( SecretId=NWCAPTURE_LOAD, ) secret_string = json.loads(original['SecretString']) logger.info(f"retrieving secrets from {NWCAPTURE_LOAD} {secret_string}") db_host = secret_string['DATABASE_ADDRESS'] db_user = secret_string['SCHEMA_OWNER_USERNAME'] db_name = secret_string['DATABASE_NAME'] db_password = secret_string['SCHEMA_OWNER_PASSWORD'] logger.info(f"db_host= {db_host} db_password= {db_password}") rds = RDS(db_host, db_user, db_name, db_password) sql = "select count(1) from capture.json_data" result = rds.execute_sql(sql) logger.info(f"RESULT: {result}") content = {"StartTime": str(datetime.datetime.now()), "StartCount": result} logger.info(f"Writing this to S3 {json.dumps(content)}") s3.Object('iow-retriever-capture-load', 'TEST_RESULTS').put(Body=json.dumps(content))
def test_db_disconnect(self, mock_connection): mock_connection.return_value.cursor.return_value = mock.Mock() rds = RDS(self.host, self.user, self.database, self.password) mock_connection.assert_called_with(host='some-host', database='some-database', user='******', password='******', connect_timeout=65) rds.disconnect()
def run_integration_tests(event, context): logger.info(event) """ Integration tests will go here. Right now the idea is that the pre-test will save a TEST_RESULT object up in the bucket and that the integration tests will write to that same object, so when everything finishes it will be like a report. That's just a placeholder idea. :param event: :param context: :return: """ original = secrets_client.get_secret_value( SecretId=NWCAPTURE_LOAD, ) secret_string = json.loads(original['SecretString']) db_host = secret_string['DATABASE_ADDRESS'] db_user = secret_string['SCHEMA_OWNER_USERNAME'] db_name = secret_string['DATABASE_NAME'] db_password = secret_string['SCHEMA_OWNER_PASSWORD'] logger.info(f"{db_host} {db_user} {db_name} {db_password}") rds = RDS(db_host, db_user, db_name, db_password) sql = "select count(1) from capture.json_data" result = rds.execute_sql(sql) logger.info(f"RESULT: {result}") obj = s3.Object('iow-retriever-capture-load', 'TEST_RESULTS') logger.info(f"read content from S3: {obj}") content = json.loads(obj.get()['Body'].read().decode('utf-8')) logger.info(f"after json loads {content}") content["End Time"] = str(datetime.datetime.now()) content["End Count"] = result start_date_time_obj = datetime.datetime.strptime(content["StartTime"], '%Y-%m-%d %H:%M:%S.%f') elapsed_time = datetime.datetime.now().timestamp() - start_date_time_obj.timestamp() for alarm in ALARMS: response = _get_cloudwatch_alarm_history(start_date_time_obj, alarm) content = _update_results_for_alarm(response, content, alarm) content["ElapsedTimeInSeconds"] = elapsed_time logger.info(f"Writing this to S3 {json.dumps(content)}") s3.Object('iow-retriever-capture-load', 'TEST_RESULTS').put(Body=json.dumps(content))
def run_etl_query(rds=None): """ If we look in the batch_job_executions table and something is in a state other than COMPLETED or FAILED, assume an ETL is in progress and don't shut the db down. Also, if someone aborts an ETL job in Jenkins, we can get in a messed up state where a job execution has STARTED but never fails or completes, so check the start time and if it's more than 4 days ignore those and shut the db down anyway. """ if rds is None: rds = RDS(os.getenv('DB_HOST'), os.getenv('DB_USER'), os.getenv('DB_NAME'), os.getenv('DB_PASSWORD')) result = rds.execute_sql(OBSERVATIONS_ETL_IN_PROGRESS_SQL, (etl_start,)) if result[0] > 0: logger.debug(f"Cannot shutdown down observations db because {result[0]} processes are running") return False elif result[0] == 0: logger.debug("Shutting down observations db because no processes are running") return True else: raise Exception(f"something wrong with db result {result}")
def test_db_connect(self, mock_connection): mock_connection.return_value.cursor.return_value = mock.Mock() with mock.patch.dict('src.config.CONFIG', self.config): RDS() mock_connection.assert_called_with( host='some-host', database='some-database', user='******', password='******', connect_timeout=65 )
def deploy_resources(): rds_client = RDSClient().get_client() rds = RDS(rds_client) rds.create_postgrestsq_intance() print("create RDS Postgress Instance")
def deploy_resources(): rds_client = RDSClient().get_client() rds = RDS(rds_client) # rds.check_instances() rds.create_postgresql_instance()
def modify_observation_passwords(event, context): _validate() logger.info(event) original = secrets_client.get_secret_value(SecretId=OBSERVATION_REAL, ) secret_string = json.loads(original['SecretString']) db_host = secret_string['DATABASE_ADDRESS'] db_name = secret_string['DATABASE_NAME'] postgres_password = secret_string['POSTGRES_PASSWORD'] rds = RDS(db_host, 'postgres', db_name, postgres_password) logger.info("got rds ok") pwd = secret_string['DB_OWNER_PASSWORD'] sql = "alter user wqp_core with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed wqp_core password") pwd = secret_string['WQP_READ_ONLY_PASSWORD'] sql = "alter user wqp_user with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed wqp_user password") pwd = secret_string['ARS_SCHEMA_OWNER_PASSWORD'] sql = "alter user ars_owner with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed ars_owner password") pwd = secret_string['NWIS_SCHEMA_OWNER_PASSWORD'] sql = "alter user nwis_ws_star_owner with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed nwis_ws_star_owner password") pwd = secret_string['EPA_SCHEMA_OWNER_PASSWORD'] sql = "alter user epa_owner with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed epa_owner password") pwd = secret_string['WDFN_DB_READ_ONLY_PASSWORD'] sql = "alter user wdfn_user with password %s" rds.alter_permissions(sql, (pwd, )) logger.info("changed wdfn_user password") return True
def get_rds(): rds_client = RDSClient().get_client() rds = RDS(rds_client) return rds