def prepare_victoria_metrics(tmp_path): data, expected = load_case('raw/query_ts', 'query_router') # declare environment kafka_input_config = KafkaInputConfig(TOPIC) victoria_metrics_config = VictoriaMetricsConfig() postgres_config = PostgresConfig() qs = QueryServiceTs(db_config=victoria_metrics_config) sr = SchemaRegistry('http://edge_registry_not_used', kafka_input_config.brokers, postgres_config) # prepare environment vm_clear_data(victoria_metrics_config) vm_insert_data(victoria_metrics_config, data['database_setup']) qs.start() sr.start() schema_id = sr.create_schema('test', kafka_input_config.topic, f'http://localhost:{qs.input_port}', '{}', 1) with QueryRouter(f'http://localhost:{sr.input_port}') as qr: yield data, expected, qr, schema_id # cleanup environment qs.stop() sr.stop() vm_clear_data(victoria_metrics_config)
def prepare(tmp_path): data, expected = load_case('schema/query_ds', 'query_router') # declare environment kafka_input_config = KafkaInputConfig(TOPIC) postgres_config = PostgresConfig() qs = QueryService(db_config=postgres_config) sr = SchemaRegistry('http://edge_registry_not_used', kafka_input_config.brokers, postgres_config) # prepare environment sr.start() schema_id = sr.create_schema('test', kafka_input_config.topic, f'http://localhost:{qs.input_port}', '{}', 0) clear_data(postgres_config) for entry in data: entry['schema_id'] = schema_id insert_data(postgres_config, data) qs.start() with QueryRouter(f'http://localhost:{sr.input_port}') as qr: yield expected, qr, schema_id # cleanup environment clear_data(postgres_config) sr.stop() qs.stop()
def shrinking(request): with CdlEnv('.', postgres_config=PostgresConfig()) as env: db = connect_to_postgres(env.postgres_config) data, expected = load_case(request.param, 'db_shrinker_postgres') insert_test_data(db, data) yield db, env.postgres_config, expected db.close()
def shrinking(request): data, expected = load_case(request.param, 'db_shrinker_postgres') postgres_config = PostgresConfig() # prepare environment clear_data(postgres_config) insert_data(postgres_config, data) yield postgres_config, expected # cleanup environment clear_data(postgres_config)
def prepare(request): with CdlEnv('.', postgres_config=PostgresConfig(), kafka_input_config=KafkaInputConfig(TOPIC)) as env: data, expected = load_case(request.param, 'command_service') db = connect_to_postgres(env.postgres_config) producer = KafkaProducer(bootstrap_servers='localhost:9092') with CommandService(env.kafka_input_config, db_config=env.postgres_config) as _: yield db, producer, data, expected producer.close() db.close()
def prepare_victoria_metrics(tmp_path): data, expected = load_case('single/query_ts', 'query_router') # declare environment kafka_input_config = KafkaInputConfig(TOPIC) victoria_metrics_config = VictoriaMetricsConfig() postgres_config = PostgresConfig() qs = QueryServiceTs(db_config=victoria_metrics_config) sr = SchemaRegistry('http://edge_registry_not_used', kafka_input_config.brokers, postgres_config) # prepare environment vm_clear_data(victoria_metrics_config) setup_data = data['database_setup'] start = int(time.time()) end = start + len(setup_data) for i in range(0, len(setup_data)): ts = ( start + i ) * 1_000_000_000 # VM requires nanoseconds when inserting data via Influx LineProtocol setup_data[i] = setup_data[i].replace("$TIMESTAMP", str(ts)) expected['data']['result'][0]['values'][i][0] = start + i vm_insert_data(victoria_metrics_config, setup_data) qs.start() sr.start() schema_id = sr.create_schema('test', kafka_input_config.topic, f'http://localhost:{qs.input_port}', '{}', 1) with QueryRouter(f'http://localhost:{sr.input_port}') as qr: yield data, expected, start, end, qr, schema_id # cleanup environment qs.stop() sr.stop() vm_clear_data(victoria_metrics_config)
def prepare(request): data, expected = load_case(request.param, "command_service/victoria_command") topic = f'{TOPIC}.{request.param}' # declare environment kafka_config = KafkaInputConfig(topic) victoria_metrics_config = VictoriaMetricsConfig() # prepare environment create_kafka_topic(kafka_config, topic) clear_data(victoria_metrics_config) with CommandService(kafka_config, db_config=victoria_metrics_config) as _: yield data, expected, kafka_config, victoria_metrics_config # cleanup environment delete_kafka_topic(kafka_config, topic) clear_data(victoria_metrics_config)
def prepare(request): data, expected = load_case(request.param, "query_service_ts") # declare environment victoria_metrics_config = VictoriaMetricsConfig() # setup environment qs = QueryServiceTs(db_config=victoria_metrics_config) channel = grpc.insecure_channel(f"localhost:{qs.input_port}") stub = query_service_ts_pb2_grpc.QueryServiceTsStub(channel) clear_data(victoria_metrics_config) insert_data(victoria_metrics_config, data['database_setup']) qs.start() yield stub, expected, data['query_for'] # cleanup environment qs.stop() clear_data(victoria_metrics_config)
def prepare(): data, expected = load_case('range/data', 'query_service_ts') # declare environment victoria_metrics_config = VictoriaMetricsConfig() # setup environment qs = QueryServiceTs(db_config=victoria_metrics_config) channel = grpc.insecure_channel(f"localhost:{qs.input_port}") stub = query_service_ts_pb2_grpc.QueryServiceTsStub(channel) setup_data = data['database_setup'] start = int(time.time()) end = start + len(setup_data) for i in range(0, len(setup_data)): ts = ( start + i ) * 1_000_000_000 # VM requires nanoseconds when inserting data via Influx LineProtocol setup_data[i] = setup_data[i].replace("$TIMESTAMP", str(ts)) expected['data']['result'][0]['values'][i][0] = start + i clear_data(victoria_metrics_config) insert_data(victoria_metrics_config, setup_data) qs.start() query = data["query_for"] query['start'] = str(start) query['end'] = str(end) yield stub, expected, query # cleanup environment qs.stop() clear_data(victoria_metrics_config)