def perf( *, events_file: Optional[str], repeat: int, profile_process: bool, profile_write: bool, dataset_name: str, log_level: Optional[str] = None, ) -> None: from snuba.perf import run, logger setup_logging(log_level) dataset = get_dataset(dataset_name) if not local_dataset_mode(): logger.error( "The perf tool is only intended for local dataset environment.") sys.exit(1) run( events_file, dataset, repeat=repeat, profile_process=profile_process, profile_write=profile_write, )
def perf( *, events_file: Optional[str], repeat: int, profile_process: bool, profile_write: bool, dataset_name: str, log_level: Optional[str] = None, ) -> None: from snuba.perf import run, logger setup_logging(log_level) dataset = get_dataset(dataset_name) if not all(storage.get_cluster().is_single_node() for storage in dataset.get_all_storages()): logger.error( "The perf tool is only intended for single node environment.") sys.exit(1) run( events_file, dataset, repeat=repeat, profile_process=profile_process, profile_write=profile_write, )
def test(self): assert self.clickhouse.execute("SELECT COUNT() FROM %s" % self.table)[0][0] == 0 perf.run('tests/perf-event.json', self.clickhouse, self.table) assert self.clickhouse.execute("SELECT COUNT() FROM %s" % self.table)[0][0] == 1
def test(self): dataset = get_dataset("events") table = dataset.get_table_writer().get_schema().get_local_table_name() assert self.clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 0 perf.run("tests/perf-event.json", dataset) assert self.clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 1
def test(self): dataset = get_dataset('events') table = dataset.get_dataset_schemas().get_write_schema_enforce( ).get_local_table_name() assert self.clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 0 perf.run('tests/perf-event.json', dataset) assert self.clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 1
def test_perf() -> None: dataset = get_dataset("events") storage = dataset.get_writable_storage() assert storage is not None table = storage.get_table_writer().get_schema().get_local_table_name() clickhouse = storage.get_cluster().get_query_connection( ClickhouseClientSettings.QUERY) assert clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 0 perf.run("tests/perf-event.json", dataset) assert clickhouse.execute("SELECT COUNT() FROM %s" % table)[0][0] == 1
def perf(events_file, repeat, profile_process, profile_write, dataset, log_level): from snuba.perf import run, logger logging.basicConfig(level=getattr(logging, log_level.upper()), format='%(asctime)s %(message)s') dataset = get_dataset(dataset) if not local_dataset_mode(): logger.error( "The perf tool is only intended for local dataset environment.") sys.exit(1) run(events_file, dataset, repeat=repeat, profile_process=profile_process, profile_write=profile_write)
def perf(events_file, repeat, profile_process, profile_write, clickhouse_server, table_name, log_level): from snuba.clickhouse import ClickhousePool from snuba.perf import run, logger logging.basicConfig(level=getattr(logging, log_level.upper()), format='%(asctime)s %(message)s') if settings.CLICKHOUSE_TABLE != 'dev': logger.error( "The migration tool is only intended for local development environment." ) sys.exit(1) clickhouse = ClickhousePool(clickhouse_server.split(':')[0], port=int(clickhouse_server.split(':')[1])) run(events_file, clickhouse, table_name, repeat=repeat, profile_process=profile_process, profile_write=profile_write)