def getPaths(v1, v2): # execute gremiln query try: start = time.time() p = g.withSideEffect("Neptune#repeatMode","CHUNKED_DFS").withSack(0).V().hasId(v1). \ repeat(__.outE().sack(Operator.sum).by('weight').inV().simplePath()).times(3). \ emit(__.hasId(v2)).hasId(v2).limit(300).order().by(__.sack(),Order.incr). \ local(__.union(__.path().by(T.id).by('weight'),__.sack()).fold()). \ toList() end = time.time() timeDelta = end - start return v1, v2, timeDelta except Exception as e: return "error", str(e)
def get_known_associates(person_id): logging.info('Request Received: Get Known Associates') g = setup_graph() try: params = app.current_request.query_params if app.current_request.query_params else {} threshold = float(params.get('threshold', '0.5')) originating_person = get_person(person_id=person_id, g=g) # DEDUP people = g.withSack(1.0).V(originating_person).repeat(__.outE('knows').sack(Operator.mult).by('weight') .inV()).until(__.sack().is_(P.lt(threshold))).emit()\ .as_('b').sack().as_('a').select('a', 'b').toList() # Unfortunately the above query will include the final node which goes below the threshold # I'm sure there is a way to improve this query to not include it! Until then, handle explicitly. # Similarly, I am deduping in Python - but ideally I would push this into the query (it's not a simple dedup # since I need to retain the max edge weight to make sure I don't mistakenly filter a dupe with < threshold) people = list( set([person['b'] for person in people if person['a'] >= 0.5])) logging.info("Found People: %s" % str(people)) results = [] for person in people: results.append(vertex_to_json(vertex=person, g=g)) except (ValueError, AttributeError, TypeError) as e: logging.error(e, exc_info=True) raise BadRequestError('Could not retrieve known associates. Error: ' + str(e)) logging.info("Successfully retrieved known associates") return {'known_associates': results}
def get_related_videos(self, video_id, page_size, paging_state): # Note: we're building a single graph traversal, but describing in three parts for readability # Part 1: finding "relevant users" # - find the vertex for the video # - what users rated this video highly? # - but don't grab too many, or this won't work OLTP, and "by('rating')" favors the higher ratings # Part 2: finding videos that were highly rated by users who liked the source video # - For those users who rated the video highly, grab N highly rated videos. # - Save the rating so we can sum the scores later, and use sack() # - because it does not require path information. (as()/select() was slow) # - excluding the source video # - Filter out videos with no uploaded edge to a user # - what are the most popular videos as calculated by the sum of all their ratings # Part 3: now that we have that big map of [video: score], let's order it # - then grab properties of the video and the user who uploaded each video using project() # find users that watched (rated) this video highly # for those users, grab N highly rated videos and assemble results traversal = self.graph.V().has('video', 'videoId', video_id).as_('^video') \ .inE('rated').has('rating', gte(MIN_RATING)) \ .sample(NUM_RATINGS_TO_SAMPLE).by('rating').outV() \ .local(__.outE('rated').has('rating', gte(MIN_RATING)).limit(LOCAL_USER_RATINGS_TO_SAMPLE)) \ .sack(Operator.assign).by('rating').inV() \ .where(neq('^video')) \ .filter(__.in_('uploaded').hasLabel('user')) \ .group().by().by(__.sack().sum()) \ .order(Scope.local).by(Column.values, Order.decr) \ .limit(Scope.local, NUM_RECOMMENDATIONS).select(Column.keys).unfold() \ .project('video_id', 'added_date', 'name', 'preview_image_location', 'user_id') \ .by('videoId').by('added_date').by('name').by('preview_image_location').by(__.in_('uploaded').values('userId')) logging.debug('Traversal: ' + str(traversal.bytecode)) results = traversal.toList() logging.debug('Traversal generated ' + str(len(results)) + ' results') videos = list() for result in results: logging.debug('Traversal Result: ' + str(result)) videos.append( VideoPreview( video_id=result['video_id'], added_date=result['added_date'], user_id=result['user_id'], name=result['name'], preview_image_location=result['preview_image_location'])) return RelatedVideosResponse(video_id=video_id, videos=videos, paging_state=None)
def get_suggested_for_user(self, user_id, page_size, paging_state): # Note: we're building a single graph traversal, but describing in three parts for readability # Part 1: finding "similar users" # - find the vertex for the user # - get all of the videos the user watched and store them # - go back to our current user # - for the video's I rated highly... # - what other users rated those videos highly? (this is like saying "what users share my taste") # - but don't grab too many, or this won't work OLTP, and "by('rating')" favors the higher ratings # - (except the current user) # Part 2: finding videos that were highly rated by similar users # - For those users who share my taste, grab N highly rated videos. # - Save the rating so we can sum the scores later, and use sack() # - because it does not require path information. (as()/select() was slow) # - excluding the videos the user has already watched # - Filter out the video if for some reason there is no uploaded edge to a user # - what are the most popular videos as calculated by the sum of all their ratings # Part 3: now that we have that big map of [video: score], let's order it # - then grab properties of the video and the user who uploaded each video using project() traversal = self.graph.V().has('user', 'userId', user_id).as_('^user') \ .outE('rated').sideEffect(__.inV().aggregate('^watchedVideos')) \ .has('rating', gte(MIN_RATING).inV().inE('rated').has('rating'), gte(MIN_RATING)) \ .sample(NUM_RATINGS_TO_SAMPLE).by('rating').outV() \ .where(neq('^user')) \ .local(__.outE('rated').has('rating', gte(MIN_RATING)).limit(LOCAL_USER_RATINGS_TO_SAMPLE)) \ .sack(Operator.assign).by('rating').inV() \ .where(without('^watchedVideos')) \ .group().by().by(__.sack().sum()) \ .order(Scope.local).by(Column.values, Order.decr) \ .limit(Scope.local, NUM_RECOMMENDATIONS).select(Column.keys).unfold() \ .project('video_id', 'added_date', 'name', 'preview_image_location', 'user_id') \ .by('videoId').by('added_date').by('name').by('preview_image_location').by(__.in_('uploaded').values('userId')) logging.debug('Traversal: ' + str(traversal.bytecode)) results = traversal.toList() logging.debug('Traversal generated ' + str(len(results)) + ' results') videos = list() for result in results: logging.debug('Traversal Result: ' + str(result)) videos.append(VideoPreview(video_id=result['video_id'], added_date=result['added_date'], user_id=result['user_id'], name=result['name'], preview_image_location=result['preview_image_location'])) return SuggestedVideosResponse(user_id=user_id, videos=videos, paging_state=None)
most_runways = g.V().has('runways',P.gte(5)).\ order().\ by('runways',Order.desc).\ local(__.values('code','runways').fold()).\ toList() heading("Airports with the most runways") for rows in most_runways: print(rows[0],rows[1]) # Shortest routes by distance from AUS to WLG. # Note the use of the Operator enum. routes = g.withSack(0).\ V().\ has('code','AUS').\ repeat(__.outE().sack(Operator.sum).by('dist').\ inV().simplePath()).\ until(__.has('code','WLG')).\ limit(10).\ order().\ by(__.sack()).\ local(__.union(__.path().by('code').by('dist'),__.sack()).fold()).\ toList() heading("Sack step tests") for route in routes: print(route) # All done so close the connetion connection.close()
# Connect to a Gremlin Server using a remote connection and issue some basic queries. # Import some classes we will need to talk to our graph from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection from gremlin_python.structure.graph import Graph from gremlin_python import statics from gremlin_python.process.graph_traversal import __ from gremlin_python.process.strategies import * from gremlin_python.process.traversal import * # Path to our graph (this assumes a locally running Gremlin Server) # Note how the path is a Web Socket (ws) connection. endpoint = 'ws://neptunedbcluster-70fbv2otqb11.cluster-c814mxxksbjw.us-east-1.neptune.amazonaws.com:8182/gremlin' # Obtain a graph traversal source using a remote connection graph = Graph() g = graph.traversal().withRemote(DriverRemoteConnection(endpoint, 'g')) v1 = '5031468' v2 = '3140001' # rel-sci specific code to loop through two files. res = g.withSideEffect("Neptune#repeatMode","CHUNKED_DFS").withSack(0).V().hasId(v1). \ repeat(__.outE().sack(Operator.sum).by('weight').inV().simplePath()).times(3). \ emit(__.hasId(v2)).hasId(v2).limit(300).order().by(__.sack(),Order.incr). \ local(__.union(__.path().by(T.id).by('weight'),__.sack()).fold()). \ toList() print res
def test_translations(self): g = traversal().withGraph(Graph()) tests = list() # 0 tests.append([g.V(), "g.V()"]) # 1 tests.append([g.V('1', '2', '3', '4'), "g.V('1','2','3','4')"]) # 2 tests.append([g.V('3').valueMap(True), "g.V('3').valueMap(True)"]) # 3 tests.append([g.V().constant(5), "g.V().constant(5)"]) # 4 tests.append([g.V().constant(1.5), "g.V().constant(1.5)"]) # 5 tests.append([g.V().constant('Hello'), "g.V().constant('Hello')"]) # 6 tests.append([g.V().hasLabel('airport').limit(5), "g.V().hasLabel('airport').limit(5)"]) # 7 tests.append([g.V().hasLabel(within('a', 'b', 'c')), "g.V().hasLabel(within(['a','b','c']))"]) # 8 tests.append([g.V().hasLabel('airport', 'continent').out().limit(5), "g.V().hasLabel('airport','continent').out().limit(5)"]) # 9 tests.append([g.V().hasLabel('airport').out().values('code').limit(5), "g.V().hasLabel('airport').out().values('code').limit(5)"]) # 10 tests.append([g.V('3').as_('a').out('route').limit(10).where(eq('a')).by('region'), "g.V('3').as('a').out('route').limit(10).where(eq('a')).by('region')"]) # 11 tests.append([g.V('3').repeat(__.out('route').simplePath()).times(2).path().by('code'), "g.V('3').repeat(__.out('route').simplePath()).times(2).path().by('code')"]) # 12 tests.append([g.V().hasLabel('airport').out().has('region', 'US-TX').values('code').limit(5), "g.V().hasLabel('airport').out().has('region','US-TX').values('code').limit(5)"]) # 13 tests.append([g.V().hasLabel('airport').union(__.values('city'), __.values('region')).limit(5), "g.V().hasLabel('airport').union(__.values('city'),__.values('region')).limit(5)"]) # 14 tests.append([g.V('3').as_('a').out('route', 'routes'), "g.V('3').as('a').out('route','routes')"]) # 15 tests.append([g.V().where(__.values('runways').is_(5)), "g.V().where(__.values('runways').is(5))"]) # 16 tests.append([g.V('3').repeat(__.out().simplePath()).until(__.has('code', 'AGR')).path().by('code').limit(5), "g.V('3').repeat(__.out().simplePath()).until(__.has('code','AGR')).path().by('code').limit(5)"]) # 17 tests.append([g.V().hasLabel('airport').order().by(__.id()), "g.V().hasLabel('airport').order().by(__.id())"]) # 18 tests.append([g.V().hasLabel('airport').order().by(T.id), "g.V().hasLabel('airport').order().by(T.id)"]) # 19 tests.append([g.V().hasLabel('airport').order().by(__.id(),Order.desc), "g.V().hasLabel('airport').order().by(__.id(),Order.desc)"]) # 20 tests.append([g.V().hasLabel('airport').order().by('code',Order.desc), "g.V().hasLabel('airport').order().by('code',Order.desc)"]) # 21 tests.append([g.V('1', '2', '3').local(__.out().out().dedup().fold()), "g.V('1','2','3').local(__.out().out().dedup().fold())"]) # 22 tests.append([g.V('3').out().path().count(Scope.local), "g.V('3').out().path().count(Scope.local)"]) # 23 tests.append([g.E().count(), "g.E().count()"]) # 24 tests.append([g.V('5').outE('route').inV().path().limit(10), "g.V('5').outE('route').inV().path().limit(10)"]) # 25 tests.append([g.V('5').propertyMap().select(Column.keys), "g.V('5').propertyMap().select(Column.keys)"]) # 26 tests.append([g.V('5').propertyMap().select(Column.values), "g.V('5').propertyMap().select(Column.values)"]) # 27 tests.append([g.V('3').values('runways').math('_ + 1'), "g.V('3').values('runways').math('_ + 1')"]) # 28 tests.append([g.V('3').emit().repeat(__.out().simplePath()).times(3).limit(5).path(), "g.V('3').emit().repeat(__.out().simplePath()).times(3).limit(5).path()"]) # 29 tests.append([g.V().match(__.as_('a').has('code', 'LHR').as_('b')).select('b').by('code'), "g.V().match(__.as('a').has('code','LHR').as('b')).select('b').by('code')"]) # 30 tests.append([g.V().has('test-using-keyword-as-property','repeat'), "g.V().has('test-using-keyword-as-property','repeat')"]) # 31 tests.append([g.V('1').addE('test').to(__.V('4')), "g.V('1').addE('test').to(__.V('4'))"]) # 32 tests.append([g.V().values('runways').max(), "g.V().values('runways').max()"]) # 33 tests.append([g.V().values('runways').min(), "g.V().values('runways').min()"]) # 34 tests.append([g.V().values('runways').sum(), "g.V().values('runways').sum()"]) # 35 tests.append([g.V().values('runways').mean(), "g.V().values('runways').mean()"]) # 36 tests.append([g.withSack(0).V('3', '5').sack(Operator.sum).by('runways').sack(), "g.withSack(0).V('3','5').sack(Operator.sum).by('runways').sack()"]) # 37 tests.append([g.V('3').values('runways').store('x').V('4').values('runways').store('x').by(__.constant(1)).V('6').store('x').by(__.constant(1)).select('x').unfold().sum(), "g.V('3').values('runways').store('x').V('4').values('runways').store('x').by(__.constant(1)).V('6').store('x').by(__.constant(1)).select('x').unfold().sum()"]) # 38 tests.append([g.inject(3, 4, 5), "g.inject(3,4,5)"]) # 39 tests.append([g.inject([3, 4, 5]), "g.inject([3, 4, 5])"]) # 40 tests.append([g.inject(3, 4, 5).count(), "g.inject(3,4,5).count()"]) # 41 tests.append([g.V().has('runways', gt(5)).count(), "g.V().has('runways',gt(5)).count()"]) # 42 tests.append([g.V().has('runways', lte(5.3)).count(), "g.V().has('runways',lte(5.3)).count()"]) # 43 tests.append([g.V().has('code', within(123,124)), "g.V().has('code',within([123,124]))"]) # 44 tests.append([g.V().has('code', within(123, 'abc')), "g.V().has('code',within([123,'abc']))"]) # 45 tests.append([g.V().has('code', within('abc', 123)), "g.V().has('code',within(['abc',123]))"]) # 46 tests.append([g.V().has('code', within('abc', 'xyz')), "g.V().has('code',within(['abc','xyz']))"]) # 47 tests.append([g.V('1', '2').has('region', P.within('US-TX','US-GA')), "g.V('1','2').has('region',within(['US-TX','US-GA']))"]) # 48 tests.append([g.V().and_(__.has('runways', P.gt(5)), __.has('region','US-TX')), "g.V().and(__.has('runways',gt(5)),__.has('region','US-TX'))"]) # 49 tests.append([g.V().union(__.has('runways', gt(5)), __.has('region','US-TX')), "g.V().union(__.has('runways',gt(5)),__.has('region','US-TX'))"]) # 50 tests.append([g.V('3').choose(__.values('runways').is_(3),__.constant('three'),__.constant('not three')), "g.V('3').choose(__.values('runways').is(3),__.constant('three'),__.constant('not three'))"]) # 51 tests.append([g.V('3').choose(__.values('runways')).option(1,__.constant('three')).option(2,__.constant('not three')), "g.V('3').choose(__.values('runways')).option(1,__.constant('three')).option(2,__.constant('not three'))"]) # 52 tests.append([g.V('3').choose(__.values('runways')).option(1.5,__.constant('one and a half')).option(2,__.constant('not three')), "g.V('3').choose(__.values('runways')).option(1.5,__.constant('one and a half')).option(2,__.constant('not three'))"]) # 53 tests.append([g.V('3').repeat(__.out().simplePath()).until(__.loops().is_(1)).count(), "g.V('3').repeat(__.out().simplePath()).until(__.loops().is(1)).count()"]) # 54 tests.append([g.V().hasLabel('airport').limit(20).group().by('region').by('code').order(Scope.local).by(Column.keys), "g.V().hasLabel('airport').limit(20).group().by('region').by('code').order(Scope.local).by(Column.keys)"]) # 55 tests.append([g.V('1').as_('a').V('2').as_('a').select(Pop.all_, 'a'), "g.V('1').as('a').V('2').as('a').select(Pop.all,'a')"]) # 56 tests.append([g.addV('test').property(Cardinality.set_, 'p1', 10), "g.addV('test').property(Cardinality.set,'p1',10)"]) # 57 tests.append([g.addV('test').property(Cardinality.list_, 'p1', 10), "g.addV('test').property(Cardinality.list,'p1',10)"]) # 58 tests.append([g.addV('test').property(Cardinality.single, 'p1', 10), "g.addV('test').property(Cardinality.single,'p1',10)"]) # 59 tests.append([g.V().limit(5).order().by(T.label), "g.V().limit(5).order().by(T.label)"]) # 60 tests.append([g.V().range(1, 5), "g.V().range(1,5)"]) # 61 tests.append([g.addV('test').property('p1', 123), "g.addV('test').property('p1',123)"]) # 62 tests.append([g.addV('test').property('date',datetime(2021, 2, 1, 9, 30)), "g.addV('test').property('date',new Date(121,2,1,9,30,0))"]) # 63 tests.append([g.addV('test').property('date',datetime(2021, 2, 1)), "g.addV('test').property('date',new Date(121,2,1,0,0,0))"]) # 64 tests.append([g.addE('route').from_(__.V('1')).to(__.V('2')), "g.addE('route').from(__.V('1')).to(__.V('2'))"]) # 65 tests.append([g.withSideEffect('a', [1, 2]).V('3').select('a'), "g.withSideEffect('a',[1, 2]).V('3').select('a')"]) # 66 tests.append([g.withSideEffect('a', 1).V('3').select('a'), "g.withSideEffect('a',1).V('3').select('a')"]) # 67 tests.append([g.withSideEffect('a', 'abc').V('3').select('a'), "g.withSideEffect('a','abc').V('3').select('a')"]) # 68 tests.append([g.V().has('airport', 'region', 'US-NM').limit(3).values('elev').fold().index(), "g.V().has('airport','region','US-NM').limit(3).values('elev').fold().index()"]) # 69 tests.append([g.V('3').repeat(__.timeLimit(1000).out().simplePath()).until(__.has('code', 'AGR')).path(), "g.V('3').repeat(__.timeLimit(1000).out().simplePath()).until(__.has('code','AGR')).path()"]) # 70 tests.append([g.V().hasLabel('airport').where(__.values('elev').is_(gt(14000))), "g.V().hasLabel('airport').where(__.values('elev').is(gt(14000)))"]) # 71 tests.append([g.V().hasLabel('airport').where(__.out().count().is_(gt(250))).values('code'), "g.V().hasLabel('airport').where(__.out().count().is(gt(250))).values('code')"]) # 72 tests.append([g.V().hasLabel('airport').filter(__.out().count().is_(gt(250))).values('code'), "g.V().hasLabel('airport').filter(__.out().count().is(gt(250))).values('code')"]) # 73 tests.append([g.withSack(0). V('3'). repeat(__.outE('route').sack(Operator.sum).by('dist').inV()). until(__.has('code', 'AGR').or_().loops().is_(4)). has('code', 'AGR'). local(__.union(__.path().by('code').by('dist'),__.sack()).fold()). limit(10), "g.withSack(0).V('3').repeat(__.outE('route').sack(Operator.sum).by('dist').inV()).until(__.has('code','AGR').or().loops().is(4)).has('code','AGR').local(__.union(__.path().by('code').by('dist'),__.sack()).fold()).limit(10)"]) # 74 tests.append([g.addV().as_('a').addV().as_('b').addE('knows').from_('a').to('b'), "g.addV().as('a').addV().as('b').addE('knows').from('a').to('b')"]) # 75 tests.append([g.addV('Person').as_('a').addV('Person').as_('b').addE('knows').from_('a').to('b'), "g.addV('Person').as('a').addV('Person').as('b').addE('knows').from('a').to('b')"]) # 76 tests.append([g.V('3').project('Out','In').by(__.out().count()).by(__.in_().count()), "g.V('3').project('Out','In').by(__.out().count()).by(__.in().count())"]) # 77 tests.append([g.V('44').out().aggregate('a').out().where(within('a')).path(), "g.V('44').out().aggregate('a').out().where(within(['a'])).path()"]) # 78 tests.append([g.V().has('date', datetime(2021, 2, 22)), "g.V().has('date',new Date(121,2,22,0,0,0))"]) # 79 tests.append([g.V().has('date', within(datetime(2021, 2, 22), datetime(2021, 1, 1))), "g.V().has('date',within([new Date(121,2,22,0,0,0),new Date(121,1,1,0,0,0)]))"]) # 80 tests.append([g.V().has('date', between(datetime(2021, 1, 1), datetime(2021, 2, 22))), "g.V().has('date',between(new Date(121,1,1,0,0,0),new Date(121,2,22,0,0,0)))"]) # 81 tests.append([g.V().has('date', inside(datetime(2021, 1, 1),datetime(2021, 2, 22))), "g.V().has('date',inside(new Date(121,1,1,0,0,0),new Date(121,2,22,0,0,0)))"]) # 82 tests.append([g.V().has('date', P.gt(datetime(2021, 1, 1, 9, 30))), "g.V().has('date',gt(new Date(121,1,1,9,30,0)))"]) # 83 tests.append([g.V().has('runways', between(3,5)), "g.V().has('runways',between(3,5))"]) # 84 tests.append([g.V().has('runways', inside(3,5)), "g.V().has('runways',inside(3,5))"]) # 85 tests.append([g.V('44').outE().elementMap(), "g.V('44').outE().elementMap()"]) # 86 tests.append([g.V('44').valueMap().by(__.unfold()), "g.V('44').valueMap().by(__.unfold())"]) # 87 tests.append([g.V('44').valueMap().with_(WithOptions.tokens,WithOptions.labels), "g.V('44').valueMap().with(WithOptions.tokens,WithOptions.labels)"]) # 88 tests.append([g.V('44').valueMap().with_(WithOptions.tokens), "g.V('44').valueMap().with(WithOptions.tokens)"]) # 89 tests.append([g.withStrategies(ReadOnlyStrategy()).addV('test'), "g.withStrategies(new ReadOnlyStrategy()).addV('test')"]) # 90 strategy = SubgraphStrategy(vertices=__.has('region', 'US-TX'), edges=__.hasLabel('route')) tests.append([g.withStrategies(strategy).V().count(), "g.withStrategies(new SubgraphStrategy(vertices:__.has('region','US-TX'),edges:__.hasLabel('route'))).V().count()"]) # 91 strategy = SubgraphStrategy(vertex_properties=__.hasNot('runways')) tests.append([g.withStrategies(strategy).V().count(), "g.withStrategies(new SubgraphStrategy(vertexProperties:__.hasNot('runways'))).V().count()"]) # 92 strategy = SubgraphStrategy(vertices=__.has('region', 'US-TX'),vertex_properties=__.hasNot('runways')) tests.append([g.withStrategies(strategy).V().count(), "g.withStrategies(new SubgraphStrategy(vertices:__.has('region','US-TX'),vertexProperties:__.hasNot('runways'))).V().count()"]) # 93 strategy = SubgraphStrategy(vertices=__.has('region', 'US-TX'), edges=__.hasLabel('route')) tests.append([g.withStrategies(ReadOnlyStrategy(),strategy).V().count(), "g.withStrategies(new ReadOnlyStrategy(),new SubgraphStrategy(vertices:__.has('region','US-TX'),edges:__.hasLabel('route'))).V().count()"]) # 94 strategy = SubgraphStrategy(vertices=__.has('region', 'US-TX')) tests.append([g.withStrategies(ReadOnlyStrategy(), strategy).V().count(), "g.withStrategies(new ReadOnlyStrategy(),new SubgraphStrategy(vertices:__.has('region','US-TX'))).V().count()"]) # 95 tests.append([g.with_('evaluationTimeout', 500).V().count(), "g.withStrategies(new OptionsStrategy(evaluationTimeout:500)).V().count()"]) # 96 tests.append([g.withStrategies(OptionsStrategy({'evaluationTimeout': 500})).V().count(), "g.withStrategies(new OptionsStrategy(evaluationTimeout:500)).V().count()"]) # 97 tests.append([g.withStrategies(PartitionStrategy(partition_key="partition", write_partition="a", read_partitions=["a"])).addV('test'), "g.withStrategies(new PartitionStrategy(partitionKey:'partition',writePartition:'a',readPartitions:['a'])).addV('test')"]) # 98 tests.append([g.withComputer().V().shortestPath().with_(ShortestPath.target, __.has('name','peter')), "g.withStrategies(new VertexProgramStrategy()).V().shortestPath().with('~tinkerpop.shortestPath.target',__.has('name','peter'))"]) tlr = Translator().of('g') for t in range(len(tests)): a = tlr.translate(tests[t][0].bytecode) assert a == tests[t][1]