def test_graph_profile(self): """ Test verifying various aspects of graph config properties. @since 1.0.0 @jira_ticket PYTHON-570 @test_category dse graph """ generate_classic(self.session) # Create variou execution policies exec_dif_factory = GraphExecutionProfile( row_factory=single_object_row_factory) exec_dif_factory.graph_options.graph_name = self.graph_name exec_dif_lbp = GraphExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) exec_dif_lbp.graph_options.graph_name = self.graph_name exec_bad_lbp = GraphExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2'])) exec_dif_lbp.graph_options.graph_name = self.graph_name exec_short_timeout = GraphExecutionProfile( request_timeout=1, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1'])) exec_short_timeout.graph_options.graph_name = self.graph_name # Add a single exection policy on cluster creation local_cluster = Cluster( protocol_version=PROTOCOL_VERSION, execution_profiles={"exec_dif_factory": exec_dif_factory}) local_session = local_cluster.connect() rs1 = self.session.execute_graph('g.V()') rs2 = local_session.execute_graph('g.V()', execution_profile='exec_dif_factory') # Verify default and non default policy works self.assertFalse(isinstance(rs2[0], Vertex)) self.assertTrue(isinstance(rs1[0], Vertex)) # Add other policies validate that lbp are honored local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp) local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp") local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp) with self.assertRaises(NoHostAvailable): local_session.execute_graph('g.V()', execution_profile="exec_bad_lbp") # Try with missing EP with self.assertRaises(ValueError): local_session.execute_graph('g.V()', execution_profile='bad_exec_profile') # Validate that timeout is honored local_cluster.add_execution_profile("exec_short_timeout", exec_short_timeout) with self.assertRaises(OperationTimedOut): local_session.execute_graph( 'java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', execution_profile='exec_short_timeout')
def load_schema(self, use_schema=False): """加载schema文件 """ if use_schema: # 创建默认的执行配置,指向特定的graph ep = GraphExecutionProfile(graph_options=GraphOptions( graph_name=self.graph_name)) cluster = Cluster( execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() # 创建graph session.execute_graph( "system.graph(name).ifNotExists().create()", {'name': self.graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) # 批量执行gremlin创建schema的命令 with open(self.schema_path) as f: pat = re.compile("[/ \n]") # 正则表达式 for line in f: if not re.match(pat, line): # 开头不是斜杠、空格、换行的 print("正在加载 {}".format(line.strip())) session.execute_graph(line.strip()) else: print("schema未加载,请确保graph中存在schema")
def create_execution_profile(graph_name): """ Creates an ExecutionProfile for GraphTraversal execution. You need to register that execution profile to the cluster by using `cluster.add_execution_profile`. :param graph_name: The graph name """ ep = GraphExecutionProfile( row_factory=graph_traversal_dse_object_row_factory, graph_options=GraphOptions( graph_name=graph_name, graph_language=DseGraph.DSE_GRAPH_QUERY_LANGUAGE)) return ep
def create_graph(self): # 创建默认的执行配置,指向特定的graph ep = GraphExecutionProfile(graph_options=GraphOptions( graph_name=self.graph_name)) cluster = Cluster(contact_points=[self.address], execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() # 创建graph session.execute_graph( "system.graph(name).ifNotExists().create()", {'name': self.graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)
def get_result(): # create the default execution profile pointing at a specific graph graph_name = 'test' ep = GraphExecutionProfile(graph_options=GraphOptions( graph_name=graph_name)) cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() # use the system execution profile (or one with no graph_options.graph_name set) when accessing the system API session.execute_graph("system.graph(name).ifNotExists().create()", {'name': graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) # ... set dev mode or configure graph schema ... # // Property Labels result = session.execute_graph( 'schema.propertyKey("genreId").Text().create()') result = session.execute_graph( 'schema.propertyKey("name").Text().create()') # // Vertex labels # schema.vertexLabel("movie").properties("movieId","title","year").create() # schema.vertexLabel("person").properties("personId","name").create() result = session.execute_graph( 'schema.vertexLabel("genre").properties("genreId","name").create()') # // Edge labels # schema.edgeLabel("director").single().connection("movie","person").create() # schema.edgeLabel("belongsTo").single().connection("movie","genre").create() # # // Vertex indexes # schema.vertexLabel("movie").index("moviesById").materialized().by("movieId").add() # schema.vertexLabel("person").index("personsById").materialized().by("personId").add() # schema.vertexLabel("genre").index("genresByName").materialized().by("name").add() result = session.execute_graph( 'graph.addVertex(label,"genre","genreId","g2","name","Adventure")') data = '' for r in result: data = r # Drop the graph database session.execute_graph("system.graph(name).drop()", {'name': graph_name}, execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) #Test return str(data)
def connect(self, nodes): graph_name = "powertrain_graph" ep = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name), consistency_level=ConsistencyLevel.LOCAL_QUORUM) cluster = Cluster(nodes, execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) metadata = cluster.metadata self.session = cluster.connect() query ="".join(['g.V().hasLabel("cassandra_summit").has("name", name)', '.tryNext().orElseGet {', 'g.addV("cassandra_summit").property("name", name).next()}']) self.session.execute_graph(query, {'name': "cassandra_summit"}) # uses the default execution profile log.info('Connected to cluster: ' + metadata.cluster_name) for host in metadata.all_hosts(): log.info('Datacenter: %s; Host: %s; Rack: %s', host.datacenter, host.address, host.rack)
# At the time of this blog post, dse_graph only supports gremlinpython version 3.2.x # This script was tested using gremlinpython version 3.2.6 from dse.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT from dse.graph import GraphOptions from dse_graph import DseGraph from gremlin_python.process.graph_traversal import __ from gremlin_python.structure.graph import Vertex graph_name = 'modern' ep_schema = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name)) ep = DseGraph.create_execution_profile(graph_name) cluster = Cluster(execution_profiles={'schema': ep_schema, EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() # Define schema session.execute_graph("system.graph(name).create()", { 'name': graph_name }, execution_profile = EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) session.execute_graph("schema.propertyKey('neighborhood').Bigint().create()", execution_profile = 'schema') session.execute_graph("schema.propertyKey('name').Text().create()", execution_profile = 'schema') session.execute_graph("schema.propertyKey('age').Bigint().create()", execution_profile = 'schema') session.execute_graph("schema.propertyKey('weight').Float().create()", execution_profile = 'schema') session.execute_graph("schema.vertexLabel('person').partitionKey('neighborhood').clusteringKey('name').properties('age').create()", execution_profile = 'schema') session.execute_graph("schema.edgeLabel('knows').properties('weight').connection('person', 'person').create()", execution_profile = 'schema') # Execute batch batch = DseGraph.batch() batch.add(__.addV('person').property('neighborhood', 0).property('name', 'bob').property('age', 23)) batch.add(__.addV('person').property('neighborhood', 0).property('name', 'alice').property('age', 21)) batch.add(__.addE('knows') .from_(Vertex({ 'neighborhood': 0, 'name': 'bob', '~label' : 'person' }))
import datetime from datetime import time from dse.cluster import GraphExecutionProfile, Cluster, EXEC_PROFILE_GRAPH_DEFAULT from dse.graph.query import GraphOptions, GraphProtocol from flask import Flask, escape, request, render_template, jsonify from interaction import Interaction from rest_model import process_model ep = GraphExecutionProfile(graph_options=GraphOptions( graph_name="prodcat", graph_protocol=GraphProtocol.GRAPHSON_3_0)) cluster = Cluster(execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep}) session = cluster.connect() app = Flask(__name__, static_url_path="", static_folder="web/public", template_folder="web/public") def main(): app.run(debug=True) @app.route("/") def index(): return render_template("index.html") @app.route("/product")