def verify_storage(self): global rt1 global rt2 rt1_id = utils.get_node_id(rt1) rt2_id = utils.get_node_id(rt2) assert rt1_id assert rt2_id print "RUNTIMES:", rt1_id, rt2_id _log.analyze("TESTRUN", "+ IDS", {}) caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT1 CAPS", {}) caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT2 CAPS", {}) assert utils.get_index( rt1, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode2' } ])) _log.analyze("TESTRUN", "+ RT1 INDEX", {}) assert utils.get_index( rt2, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode1' } ])) _log.analyze("TESTRUN", "+ RT2 INDEX", {})
def control_storage(args): from calvin.utilities.attribute_resolver import format_index_string import json if args.cmd == 'get_index': try: index = json.loads(args.index) except: raise Exception("Malformed JSON index string:\n%s" % args.index) formated_index = format_index_string(index) return utils.get_index(args.node, formated_index) elif args.cmd == 'raw_get_index': try: index = json.loads(args.index) except: raise Exception("Malformed JSON index string:\n%s" % args.index) return utils.get_index(args.node, index)
def testNodeIndexMany(self): """ Since storage is eventually consistent, and we don't really know when, this test is quite loose on its asserts but shows some warnings when inconsistent. It is also extremly slow. """ self.hosts = [("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)] self.rt = [dispatch_node(h[0], h[1], attributes={'indexed_public': {'owner':{'personOrGroup': h[2]}}})[0] for h in self.hosts] time.sleep(3) owner = [] for i in range(len(self.hosts)): res = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': self.hosts[i][2]}})) owner.append(res) assert(set(res['result']) == set([self.rt[i].id])) owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set([r.id for r in self.rt])) if not set(owners['result']) >= set([r.id for r in self.rt]): warn("Not all nodes manage to reach the index %d of %d" % (len(owners['result']), len(self.rt))) rt = self.rt[:] ids = [r.id for r in rt] hosts = self.hosts[:] utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set(ids)) if ids[10] in set(owners['result']): warn("The removed node is still in the all owners set") removed_owner = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': hosts[10][2]}})) assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]])) if removed_owner['result']: warn("The removed node is still in its own index") # Destroy a bunch of the nodes for _ in range(7): utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] time.sleep(2) owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set(ids)) l = len(set(owners['result'])) if l > (len(ids)-8): warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
def testNodeIndexMany(self): """ Since storage is eventually consistent, and we don't really know when, this test is quite loose on its asserts but shows some warnings when inconsistent. It is also extremly slow. """ self.hosts = [("calvinip://127.0.0.1:%d" % d, "http://localhost:%d" % (d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)] self.rt = [dispatch_node(h[0], h[1], attributes=["node/affiliation/owner/%s" % h[2]]) for h in self.hosts] time.sleep(3) owner = [] for i in range(len(self.hosts)): res = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % self.hosts[i][2]) owner.append(res) assert(set(res['result']) == set([self.rt[i].id])) owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set([r.id for r in self.rt])) if set(owners['result']) == set([r.id for r in self.rt]): warn("Not all nodes manage to reach the index") rt = self.rt[:] ids = [r.id for r in rt] hosts = self.hosts[:] utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set(ids)) if ids[10] in set(owners['result']): warn("The removed node is still in the all owners set") removed_owner = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % hosts[10][2]) assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]])) if removed_owner['result']: warn("The removed node is still in its own index") # Destroy a bunch of the nodes for _ in range(7): utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] time.sleep(2) owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set(ids)) l = len(set(owners['result'])) if l > (len(ids)-8): warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
def testNodeIndexThree(self): time.sleep(4) print self.rt1.id, self.rt2.id, self.rt3.id owner1 = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}})) assert(set(owner1['result']) == set([self.rt1.id, self.rt2.id])) owner2 = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner2'}})) assert(set(owner2['result']) == set([self.rt3.id])) owners = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample'}})) assert(set(owners['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id])) names = utils.get_index(self.rt1, format_index_string({'node_name':{}})) assert(set(names['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id])) addr2 = utils.get_index(self.rt1, format_index_string({'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 2}})) assert(set(addr2['result']) == set([self.rt3.id]))
def verify_storage(self): global rt1 global rt2 rt1_id = utils.get_node_id(rt1) rt2_id = utils.get_node_id(rt2) assert rt1_id assert rt2_id print "RUNTIMES:", rt1_id, rt2_id _log.analyze("TESTRUN", "+ IDS", {}) caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT1 CAPS", {}) caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT2 CAPS", {}) assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}])) _log.analyze("TESTRUN", "+ RT1 INDEX", {}) assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}])) _log.analyze("TESTRUN", "+ RT2 INDEX", {})
def testNodeIndexThree(self): time.sleep(4) print self.rt1.id, self.rt2.id, self.rt3.id owner1 = utils.get_index(self.rt1, "node/affiliation/owner/org.testexample/testOwner1") assert(set(owner1['result']) == set([self.rt1.id, self.rt2.id])) owner2 = utils.get_index(self.rt1, "node/affiliation/owner/org.testexample/testOwner2") assert(set(owner2['result']) == set([self.rt3.id])) owners = utils.get_index(self.rt1, "node/affiliation/owner/org.testexample") assert(set(owners['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id])) names = utils.get_index(self.rt1, "node/affiliation/name") assert(set(names['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id])) addr2 = utils.get_index(self.rt1, "node/address/testCountry/testCity/testStreet/2") assert(set(addr2['result']) == set([self.rt3.id]))
def verify_storage(self): global rt1 global rt2 rt1_id = None rt2_id = None failed = True # Try 10 times waiting for control API to be up and running for i in range(10): try: rt1_id = rt1_id or utils.get_node_id(rt1) rt2_id = rt2_id or utils.get_node_id(rt2) failed = False break except: time.sleep(0.1) assert not failed assert rt1_id assert rt2_id print "RUNTIMES:", rt1_id, rt2_id _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i}) failed = True # Try 20 times waiting for storage to be connected caps1 = [] caps2 = [] for i in range(20): try: if len(caps1) != 2: caps1 = utils.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result'] if len(caps2) != 2: caps2 = utils.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result'] if len(caps1) == 2 and len(caps2) == 2: failed = False break else: time.sleep(0.1) except: time.sleep(0.1) assert not failed _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i}) # Now check for the values needed by this specific test caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT1 CAPS", {}) caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT2 CAPS", {}) assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}])) _log.analyze("TESTRUN", "+ RT1 INDEX", {}) assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}])) _log.analyze("TESTRUN", "+ RT2 INDEX", {})
def verify_storage(self): global rt1 global rt2 global rt3 rt1_id = utils.get_node_id(rt1) rt2_id = utils.get_node_id(rt2) rt3_id = utils.get_node_id(rt3) assert rt1_id assert rt2_id assert rt3_id print "RUNTIMES:", rt1_id, rt2_id, rt3_id #caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) #caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) #caps = utils.get_index(rt3, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}])) assert utils.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}])) assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}])) assert utils.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}])) assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}])) assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
def testLocalIndex(self): # We don't have a way of preventing DHT storage from starting, # but if this test is run first the storage is not yet up and running lindex = {} lindex['Harald'] = [calvinuuid.uuid("NODE") for i in range(1,5)] lindex['Per'] = [calvinuuid.uuid("NODE") for i in range(1,5)] common = calvinuuid.uuid("NODE") for n, node_ids in lindex.items(): for id_ in node_ids: #print "ADD", n, id_ utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/" + n, id_) h_ = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") h = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson") assert(h_['result'] is None) # Test that the storage is local assert(set(h['result']) == set(lindex["Harald"])) assert(set(p['result']) == set(lindex["Per"])) assert(set(e['result']) == set(lindex["Per"] + lindex["Harald"])) for n, node_ids in lindex.items(): utils.remove_index(self.rt1, "node/affiliation/owner/com.ericsson/" + n, node_ids[0]) h = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set(lindex["Harald"][1:])) assert(set(p['result']) == set(lindex["Per"][1:])) assert(set(e['result']) == set(lindex["Per"][1:] + lindex["Harald"][1:])) utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald", common) utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/Per", common) h = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set(lindex["Harald"][1:] + [common])) assert(set(p['result']) == set(lindex["Per"][1:] + [common])) assert(set(e['result']) == set(lindex["Per"][1:] + lindex["Harald"][1:] + [common])) for node_id in lindex['Harald']: utils.remove_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald", node_id) h = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt1, "node/affiliation/owner/com.ericsson") h_ = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") if h_['result'] is not None: # Test that the storage is local warn("Storage is no longer only local, it had time to start %s" % h_['result']) assert(set(h['result']) == set([common])) assert(set(p['result']) == set(lindex["Per"][1:] + [common])) assert(set(e['result']) == set(lindex["Per"][1:] + [common])) time.sleep(2) h = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set([common])) assert(set(p['result']) == set(lindex["Per"][1:] + [common])) assert(set(e['result']) == set(lindex["Per"][1:] + [common]))
def testGlobalIndex(self): time.sleep(2) lindex = {} lindex['Harald'] = [calvinuuid.uuid("NODE") for i in range(1,5)] lindex['Per'] = [calvinuuid.uuid("NODE") for i in range(1,5)] common = calvinuuid.uuid("NODE") for n, node_ids in lindex.items(): for id_ in node_ids: #print "ADD", n, id_ utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/" + n, id_) h = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set(lindex["Harald"])) assert(set(p['result']) == set(lindex["Per"])) assert(set(e['result']) == set(lindex["Per"] + lindex["Harald"])) for n, node_ids in lindex.items(): utils.remove_index(self.rt1, "node/affiliation/owner/com.ericsson/" + n, node_ids[0]) h = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set(lindex["Harald"][1:])) assert(set(p['result']) == set(lindex["Per"][1:])) assert(set(e['result']) == set(lindex["Per"][1:] + lindex["Harald"][1:])) utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald", common) utils.add_index(self.rt1, "node/affiliation/owner/com.ericsson/Per", common) h = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set(lindex["Harald"][1:] + [common])) assert(set(p['result']) == set(lindex["Per"][1:] + [common])) assert(set(e['result']) == set(lindex["Per"][1:] + lindex["Harald"][1:] + [common])) for node_id in lindex['Harald']: utils.remove_index(self.rt1, "node/affiliation/owner/com.ericsson/Harald", node_id) h = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Harald") p = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson/Per") e = utils.get_index(self.rt2, "node/affiliation/owner/com.ericsson") assert(set(h['result']) == set([common])) assert(set(p['result']) == set(lindex["Per"][1:] + [common])) assert(set(e['result']) == set(lindex["Per"][1:] + [common]))
def setup_module(module): global runtime global runtimes global peerlist global kill_peers ip_addr = None try: ip_addr = os.environ["CALVIN_TEST_IP"] purpose = os.environ["CALVIN_TEST_UUID"] except KeyError: pass if ip_addr: remote_node_count = 2 kill_peers = False test_peers = None import socket ports=[] for a in range(2): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) addr = s.getsockname() ports.append(addr[1]) s.close() runtime,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1])) _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0])) interval = 0.5 for retries in range(1,20): time.sleep(interval) _log.debug("Trying to get test nodes for 'purpose' %s" % purpose) test_peers = utils.get_index(runtime, format_index_string({'node_name': {'organization': 'com.ericsson', 'purpose': purpose} })) if not test_peers is None and not test_peers["result"] is None and \ len(test_peers["result"]) == remote_node_count: test_peers = test_peers["result"] break if test_peers is None or len(test_peers) != remote_node_count: _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers) raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers) test_peer2_id = test_peers[0] test_peer2 = utils.get_node(runtime, test_peer2_id) if test_peer2: runtime2 = utils.RT(test_peer2["control_uri"]) runtime2.id = test_peer2_id runtime2.uri = test_peer2["uri"] runtimes.append(runtime2) test_peer3_id = test_peers[1] if test_peer3_id: test_peer3 = utils.get_node(runtime, test_peer3_id) if test_peer3: runtime3 = utils.RT(test_peer3["control_uri"]) runtime3.id = test_peer3_id runtime3.uri = test_peer3["uri"] runtimes.append(runtime3) else: try: ip_addr = os.environ["CALVIN_TEST_LOCALHOST"] except: import socket ip_addr = socket.gethostbyname(socket.gethostname()) localhost = "calvinip://%s:5000" % (ip_addr,), "http://localhost:5001" remotehosts = [("calvinip://%s:%d" % (ip_addr, d), "http://localhost:%d" % (d+1)) for d in range(5002, 5005, 2)] # remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")] for host in remotehosts: runtimes += [dispatch_node(host[0], host[1])[0]] runtime, _ = dispatch_node(localhost[0], localhost[1]) time.sleep(1) # FIXME When storage up and running peersetup not needed, but still useful during testing utils.peer_setup(runtime, [i[0] for i in remotehosts]) time.sleep(0.5) """ # FIXME Does not yet support peerlist try: self.peerlist = peerlist( self.runtime, self.runtime.id, len(remotehosts)) # Make sure all peers agree on network [peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist] except: self.peerlist = [] """ peerlist = [rt.control_uri for rt in runtimes] print "SETUP DONE ***", peerlist
def setup_module(module): global rt1 global rt2 global rt3 global kill_peers ip_addr = None try: ip_addr = os.environ["CALVIN_TEST_IP"] purpose = os.environ["CALVIN_TEST_UUID"] _log.debug("Running remote tests") except KeyError: _log.debug("Running lcoal test") pass if ip_addr: remote_node_count = 2 kill_peers = False test_peers = None import socket ports=[] for a in range(2): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) addr = s.getsockname() ports.append(addr[1]) s.close() rt1,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1])) _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0])) interval = 0.5 for retries in range(1,20): time.sleep(interval) _log.debug("Trying to get test nodes for 'purpose' %s" % purpose) test_peers = utils.get_index(rt1, format_index_string({'node_name': {'organization': 'com.ericsson', 'purpose': purpose} })) if not test_peers is None and not test_peers["result"] is None and \ len(test_peers["result"]) == remote_node_count: test_peers = test_peers["result"] break if test_peers is None or len(test_peers) != remote_node_count: _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers) raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers) _log.debug("All remote nodes found!") test_peer2_id = test_peers[0] test_peer2 = utils.get_node(rt1, test_peer2_id) if test_peer2: rt2 = utils.RT(test_peer2["control_uri"]) rt2.id = test_peer2_id rt2.uri = test_peer2["uri"] test_peer3_id = test_peers[1] if test_peer3_id: test_peer3 = utils.get_node(rt1, test_peer3_id) if test_peer3: rt3 = utils.RT(test_peer3["control_uri"]) rt3.id = test_peer3_id rt3.uri = test_peer3["uri"] else: try: ip_addr = os.environ["CALVIN_TEST_LOCALHOST"] except: import socket ip_addr = socket.gethostbyname(socket.gethostname()) rt1,_ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://localhost:5003") rt2,_ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://localhost:5004") rt3,_ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://localhost:5005") time.sleep(.4) utils.peer_setup(rt1, ["calvinip://%s:5001" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(rt2, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(rt3, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5001" % (ip_addr, )]) time.sleep(.4)
def verify_storage(self): global rt1 global rt2 rt1_id = None rt2_id = None failed = True # Try 10 times waiting for control API to be up and running for i in range(10): try: rt1_id = rt1_id or utils.get_node_id(rt1) rt2_id = rt2_id or utils.get_node_id(rt2) failed = False break except: time.sleep(0.1) assert not failed assert rt1_id assert rt2_id print "RUNTIMES:", rt1_id, rt2_id _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1 * i}) failed = True # Try 20 times waiting for storage to be connected caps1 = [] caps2 = [] for i in range(20): try: if len(caps1) != 2: caps1 = utils.get_index( rt1, "node/capabilities/calvinsys.native.python-json" )['result'] if len(caps2) != 2: caps2 = utils.get_index( rt2, "node/capabilities/calvinsys.native.python-json" )['result'] if len(caps1) == 2 and len(caps2) == 2: failed = False break else: time.sleep(0.1) except: time.sleep(0.1) assert not failed _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1 * i}) # Now check for the values needed by this specific test caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT1 CAPS", {}) caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') assert rt1_id in caps['result'] _log.analyze("TESTRUN", "+ RT2 CAPS", {}) assert utils.get_index( rt1, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode2' } ])) _log.analyze("TESTRUN", "+ RT1 INDEX", {}) assert utils.get_index( rt2, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode1' } ])) _log.analyze("TESTRUN", "+ RT2 INDEX", {})
def verify_storage(self): global rt1 global rt2 global rt3 rt1_id = utils.get_node_id(rt1) rt2_id = utils.get_node_id(rt2) rt3_id = utils.get_node_id(rt3) assert rt1_id assert rt2_id assert rt3_id print "RUNTIMES:", rt1_id, rt2_id, rt3_id #caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) #caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) #caps = utils.get_index(rt3, 'node/capabilities/calvinsys.events.timer') #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id]) assert utils.get_index( rt2, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode1' } ])) assert utils.get_index( rt3, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode1' } ])) assert utils.get_index( rt1, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode2' } ])) assert utils.get_index( rt3, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode2' } ])) assert utils.get_index( rt1, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode3' } ])) assert utils.get_index( rt2, format_index_string([ 'node_name', { 'organization': 'org.testexample', 'name': 'testNode3' } ]))