def conn_pool_to_first_graph_service(pytestconfig): addr = pytestconfig.getoption("address") host_addr = addr.split(":") if addr else ["localhost", get_ports()[0]] assert len(host_addr) == 2 pool = get_conn_pool(host_addr[0], host_addr[1]) yield pool pool.close()
def start_nebula(nb, configs): if configs.address is not None and configs.address != "": print('test remote nebula graph, address is {}'.format( configs.address)) if len(configs.address.split(':')) != 2: raise Exception('Invalid address, address is {}'.format( configs.address)) address, port = configs.address.split(':') ports = [int(port)] else: nb.install() address = "localhost" debug = opt_is(configs.debug, "true") ports = nb.start(debug_log=debug, multi_graphd=configs.multi_graphd) # Load csv data pool = get_conn_pool("localhost", ports[0]) sess = pool.get_session(configs.user, configs.password) if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) with open(SPACE_TMP_PATH, "w") as f: spaces = [] for space in ("nba", "nba_int_vid", "student"): data_dir = os.path.join(CURR_PATH, "data", space) space_desc = load_csv_data(sess, data_dir, space) spaces.append(space_desc.__dict__) f.write(json.dumps(spaces)) with open(NB_TMP_PATH, "w") as f: data = {"ip": "localhost", "port": ports, "work_dir": nb.work_dir} f.write(json.dumps(data)) print('Start nebula successfully')
def conn_pool_to_second_graph_service(pytestconfig): addr = pytestconfig.getoption("address") host_addr = ["localhost", get_ports()[1]] assert len(host_addr) == 2 pool = get_conn_pool(host_addr[0], host_addr[1]) yield pool pool.close()
def given_nebulacluster_with_param( request, params, graphd_num, metad_num, storaged_num, class_fixture_variables, pytestconfig, ): graphd_param, metad_param, storaged_param = {}, {}, {} if params is not None: for param in params.splitlines(): module, config = param.strip().split(":") assert module.lower() in ["graphd", "storaged", "metad"] key, value = config.strip().split("=") if module.lower() == "graphd": graphd_param[key] = value elif module.lower() == "storaged": storaged_param[key] = value else: metad_param[key] = value user = pytestconfig.getoption("user") password = pytestconfig.getoption("password") build_dir = pytestconfig.getoption("build_dir") src_dir = pytestconfig.getoption("src_dir") nebula_svc = NebulaService( build_dir, src_dir, int(metad_num), int(storaged_num), int(graphd_num), ) for process in nebula_svc.graphd_processes: process.update_param(graphd_param) for process in nebula_svc.storaged_processes: process.update_param(storaged_param) for process in nebula_svc.metad_processes: process.update_param(metad_param) work_dir = os.path.join( build_dir, "C" + space_generator() + time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime()), ) nebula_svc.install(work_dir) nebula_svc.start() graph_ip = nebula_svc.graphd_processes[0].host graph_port = nebula_svc.graphd_processes[0].tcp_port # TODO add ssl pool if tests needed pool = get_conn_pool(graph_ip, graph_port, None) sess = pool.get_session(user, password) class_fixture_variables["current_session"] = sess class_fixture_variables["sessions"].append(sess) class_fixture_variables["cluster"] = nebula_svc class_fixture_variables["pool"] = pool
def when_login_graphd_fail(graph, user, password, class_fixture_variables, msg): index = parse_service_index(graph) assert index is not None, "Invalid graph name, name is {}".format(graph) nebula_svc = class_fixture_variables.get("cluster") assert nebula_svc is not None, "Cannot get the cluster" assert index < len(nebula_svc.graphd_processes) graphd_process = nebula_svc.graphd_processes[index] graph_ip, graph_port = graphd_process.host, graphd_process.tcp_port pool = get_conn_pool(graph_ip, graph_port, None) try: sess = pool.get_session(user, password) except AuthFailedException as e: assert msg in e.message except: raise
def when_login_graphd(graph, user, password, class_fixture_variables, pytestconfig): index = parse_service_index(graph) assert index is not None, "Invalid graph name, name is {}".format(graph) nebula_svc = class_fixture_variables.get("cluster") assert nebula_svc is not None, "Cannot get the cluster" assert index < len(nebula_svc.graphd_processes) graphd_process = nebula_svc.graphd_processes[index] graph_ip, graph_port = graphd_process.host, graphd_process.tcp_port pool = get_conn_pool(graph_ip, graph_port, None) sess = pool.get_session(user, password) # do not release original session, as we may have cases to test multiple sessions. # connection could be released after cluster stopped. class_fixture_variables["current_session"] = sess class_fixture_variables["sessions"].append(sess) class_fixture_variables["pool"] = pool
def start_standalone(nb, configs): if configs.address is not None and configs.address != "": print('test remote nebula graph, address is {}'.format( configs.address)) if len(configs.address.split(':')) != 2: raise Exception('Invalid address, address is {}'.format( configs.address)) address, port = configs.address.split(':') ports = [int(port)] else: print('Start standalone version') nb.install_standalone() address = "localhost" ports = nb.start_standalone() is_graph_ssl = opt_is(configs.enable_ssl, "true") or opt_is( configs.enable_graph_ssl, "true") ca_signed = opt_is(configs.enable_ssl, "true") # Load csv data pool = get_conn_pool(address, ports[0], get_ssl_config(is_graph_ssl, ca_signed)) sess = pool.get_session(configs.user, configs.password) if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) with open(SPACE_TMP_PATH, "w") as f: spaces = [] folder = os.path.join(CURR_PATH, "data") for space in os.listdir(folder): if not os.path.exists(os.path.join(folder, space, "config.yaml")): continue data_dir = os.path.join(folder, space) space_desc = load_csv_data(sess, data_dir, space) spaces.append(space_desc.__dict__) f.write(json.dumps(spaces)) with open(NB_TMP_PATH, "w") as f: data = { "ip": "localhost", "port": ports, "work_dir": nb.work_dir, "enable_ssl": configs.enable_ssl, "enable_graph_ssl": configs.enable_graph_ssl, "ca_signed": configs.ca_signed, } f.write(json.dumps(data)) print('Start standalone successfully')
def start_nebula(nb, configs): nb.install() port = nb.start() # Load csv data pool = get_conn_pool("localhost", port) sess = pool.get_session(configs.user, configs.password) if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) with open(SPACE_TMP_PATH, "w") as f: spaces = [] for space in ("nba", "nba_int_vid", "student"): data_dir = os.path.join(CURR_PATH, "data", space) space_desc = load_csv_data(sess, data_dir, space) spaces.append(space_desc.__dict__) f.write(json.dumps(spaces)) with open(NB_TMP_PATH, "w") as f: data = {"ip": "localhost", "port": port, "work_dir": nb.work_dir} f.write(json.dumps(data))