def create_index(index_name, crate_home, output_dir): crate_layer = CrateLayer('data', crate_home=crate_home, port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT, settings={ 'es.api.enabled': True, }) crate_layer.start() crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT) try: with connect(crate_http) as conn: cur = conn.cursor() cmds = INDICES[index_name].split(';') for cmd in cmds[:-1]: LOGGER.info(cmd) cur.execute(cmd) cur.execute("select version['number'] from sys.nodes") version = cur.fetchone()[0] r = http.request('POST', crate_http + '/_flush') r.read() compress_index(index_name, version, crate_layer.wdPath(), output_dir) finally: crate_layer.stop()
def create_index(index_name, crate_home, output_dir): crate_layer = CrateLayer( 'data', crate_home=crate_home, port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT, settings={ 'es.api.enabled': True, # The disk.watermark settings can be removed once crate-python > 0.21.1 has been released "cluster.routing.allocation.disk.watermark.low" : "100k", "cluster.routing.allocation.disk.watermark.high" : "10k", "cluster.routing.allocation.disk.watermark.flood_stage" : "1k", } ) crate_layer.start() crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT) try: with connect(crate_http) as conn: cur = conn.cursor() cmds = INDICES[index_name].split(';') for cmd in cmds[:-1]: LOGGER.info(cmd) cur.execute(cmd) cur.execute("select version['number'] from sys.nodes") version = cur.fetchone()[0] r = http.request('POST', crate_http + '/_flush') r.read() compress_index(index_name, version, crate_layer.wdPath(), output_dir) finally: crate_layer.stop()
def create_index(index_name, crate_home, output_dir): crate_layer = CrateLayer( 'data', crate_home=crate_home, port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT, settings={ 'es.api.enabled': True, # The disk.watermark settings can be removed once crate-python > 0.21.1 has been released "cluster.routing.allocation.disk.watermark.low": "100k", "cluster.routing.allocation.disk.watermark.high": "10k", "cluster.routing.allocation.disk.watermark.flood_stage": "1k", }) crate_layer.start() crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT) try: with connect(crate_http) as conn: cur = conn.cursor() cmds = INDICES[index_name].split(';') for cmd in cmds[:-1]: LOGGER.info(cmd) cur.execute(cmd) cur.execute("select version['number'] from sys.nodes") version = cur.fetchone()[0] r = http.request('POST', crate_http + '/_flush') r.read() compress_index(index_name, version, crate_layer.wdPath(), output_dir) finally: crate_layer.stop()
def create_index(index_name, crate_home, output_dir): crate_layer = CrateLayer( 'data', crate_home=crate_home, port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT, settings={ 'es.api.enabled': True, } ) crate_layer.start() crate_http = 'localhost:{}'.format(CRATE_HTTP_PORT) try: with connect(crate_http) as conn: cur = conn.cursor() cmds = INDICES[index_name].split(';') for cmd in cmds[:-1]: LOGGER.info(cmd) cur.execute(cmd) cur.execute("select version['number'] from sys.nodes") version = cur.fetchone()[0] r = http.request('POST', crate_http + '/_flush') r.read() compress_index(index_name, version, crate_layer.wdPath(), output_dir) finally: crate_layer.stop()
def setUpClass(cls): # auto-discovery with unicast on the same host only works if all nodes are configured with the same port range transport_port_range = bind_range(range_size=cls.NUM_SERVERS) for i in range(cls.NUM_SERVERS): http_port = bind_port() layer = CrateLayer( cls.node_name(i), crate_path(), host='localhost', port=http_port, transport_port=transport_port_range, settings=CRATE_SETTINGS, env={'JAVA_HOME': os.environ.get('JAVA_HOME', '')}, cluster_name=cls.__class__.__name__) layer.start() cls.HTTP_PORTS.append(http_port) cls.CRATES.append(layer) dsn = cls.random_dns() num_nodes = 0 # wait until all nodes joined the cluster while num_nodes < len(cls.CRATES): with connect(dsn) as conn: c = conn.cursor() c.execute("select * from sys.nodes") num_nodes = len(c.fetchall()) time.sleep(5)
def main(): num_nodes = 3 node0_http_port = bind_port() dns_port = bind_port() transport_ports = [] zone_file = ''' crate.internal. 600 IN SOA localhost localhost ( 2007120710 1d 2h 4w 1h ) crate.internal. 400 IN NS localhost crate.internal. 600 IN A 127.0.0.1''' for i in range(0, num_nodes): port = bind_port() transport_ports.append(port) zone_file += ''' _test._srv.crate.internal. 600 IN SRV 1 10 {port} 127.0.0.1.'''.format( port=port) dns_server = DNSServer(ZoneResolver(zone_file), port=dns_port) dns_server.start_thread() crate_layers = [] for i in range(0, num_nodes): crate_layer = CrateLayer( 'node-' + str(i), cluster_name='crate-dns-discovery', crate_home=crate_path(), port=node0_http_port if i == 0 else bind_port(), transport_port=transport_ports[i], settings={ 'psql.port': bind_port(), "discovery.zen.hosts_provider": "srv", "discovery.srv.query": "_test._srv.crate.internal.", "discovery.srv.resolver": "127.0.0.1:" + str(dns_port) }) crate_layers.append(crate_layer) crate_layer.start() try: conn = connect('localhost:{}'.format(node0_http_port)) c = conn.cursor() c.execute('''select count() from sys.nodes''') result = c.fetchone() if result[0] != num_nodes: raise AssertionError( "Nodes could not join, expected number of nodes: " + str(num_nodes) + ", found: " + str(result[0])) finally: for crate_layer in crate_layers: crate_layer.stop() dns_server.stop()
def create_index(cfg): crate_layer = CrateLayer( 'data', crate_home=cfg.crate_home, port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT ) crate_layer.start() try: with connect('localhost:' + CRATE_HTTP_PORT) as conn: cur = conn.cursor() cmds = CREATE_INDEX_SQL.split(';') for cmd in cmds[:-1]: cur.execute(cmd) cur.execute("select version['number'] from sys.nodes") version = cur.fetchone()[0] compress_index(version, crate_layer.wdPath(), cfg.output_dir) finally: crate_layer.stop()
def main(): crate_layer = CrateLayer( 'crate-sqllogic', crate_home=crate_path(), port=CRATE_HTTP_PORT, transport_port=CRATE_TRANSPORT_PORT, settings={ 'psql.port': CRATE_PSQL_PORT, # The disk.watermark settings can be removed once crate-python > 0.21.1 has been released "cluster.routing.allocation.disk.watermark.low": "100k", "cluster.routing.allocation.disk.watermark.high": "10k", "cluster.routing.allocation.disk.watermark.flood_stage": "1k", }) crate_layer.start() logfiles = [] try: with ProcessPoolExecutor() as executor: futures = [] for i, filename in enumerate(tests_path.glob('**/*.test')): filepath = tests_path / filename relpath = str(filepath.relative_to(tests_path)) if not any(p.match(str(relpath)) for p in FILE_WHITELIST): continue logfile = f'sqllogic-{os.path.basename(relpath)}-{i}.log' logfiles.append(logfile) future = executor.submit(run_file, filename=str(filepath), host='localhost', port=str(CRATE_PSQL_PORT), log_level=logging.WARNING, log_file=logfile, failfast=True, schema=f'x{i}') futures.append(future) for future in as_completed(futures): future.result() finally: crate_layer.stop() # instead of having dozens file merge to one which is in gitignore merge_logfiles(logfiles)