def use_cluster_with_graph(num_nodes): """ This is a work around to account for the fact that spark nodes will conflict over master assignment when started all at once. """ if USE_CASS_EXTERNAL: return # Create the cluster but don't start it. use_singledc(start=False, workloads=['graph', 'spark']) # Start first node. get_node(1).start(wait_for_binary_proto=True) # Wait binary protocol port to open wait_for_node_socket(get_node(1), 120) # Wait for spark master to start up spark_master_http = ("localhost", 7080) common.check_socket_listening(spark_master_http, timeout=60) tmp_cluster = TestCluster() # Start up remaining nodes. try: session = tmp_cluster.connect() statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (num_nodes) session.execute(statement) finally: tmp_cluster.shutdown() for i in range(1, num_nodes+1): if i is not 1: node = get_node(i) node.start(wait_for_binary_proto=True) wait_for_node_socket(node, 120) # Wait for workers to show up as Alive on master wait_for_spark_workers(3, 120)
def wait_for_up(cluster, node): tries = 0 addr = IP_FORMAT % node while tries < 100: host = cluster.metadata.get_host(addr) if host and host.is_up: wait_for_node_socket(get_node(node), 60) log.debug("Done waiting for node %s to be up", node) return else: log.debug("Host {} is still marked down, waiting".format(addr)) tries += 1 time.sleep(1) # todo: don't mix string interpolation methods in the same package raise RuntimeError("Host {0} is not up after {1} attempts".format(addr, tries))