def setUp(self): self.rt1 = dispatch_node("calvinip://localhost:5000", "http://localhost:5003") self.rt2 = dispatch_node("calvinip://localhost:5001", "http://localhost:5004") self.rt3 = dispatch_node("calvinip://localhost:5002", "http://localhost:5005") utils.peer_setup(self.rt1, ["calvinip://localhost:5001", "calvinip://localhost:5002"]) utils.peer_setup(self.rt2, ["calvinip://localhost:5000", "calvinip://localhost:5002"]) utils.peer_setup(self.rt3, ["calvinip://localhost:5000", "calvinip://localhost:5001"])
def setup(self, request): global rt1 global rt2 global rt3 global test_script_dir rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://%s:5003" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode1'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) time.sleep(1) # Less storage operations are droped if we wait a bit rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://%s:5004" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode2'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) time.sleep(1) # Less storage operations are droped if we wait a bit rt3, _ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://%s:5005" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner2'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode3'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 2}}}) time.sleep(1) # Less storage operations are droped if we wait a bit test_script_dir = absolute_filename('scripts/') request.addfinalizer(self.teardown)
def setup_module(module): global runtime global runtimes global peerlist localhost = "calvinip://127.0.0.1:5000", "http://localhost:5001" remotehosts = [("calvinip://127.0.0.1:%d" % d, "http://localhost:%d" % (d+1)) for d in range(5002, 5005, 2)] # remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")] for host in remotehosts: runtimes += [dispatch_node(host[0], host[1])] runtime = dispatch_node(localhost[0], localhost[1]) # FIXME When storage up and running peersetup not needed, but still useful during testing utils.peer_setup(runtime, [i[0] for i in remotehosts]) time.sleep(0.5) """ # FIXME Does not yet support peerlist try: self.peerlist = peerlist( self.runtime, self.runtime.id, len(remotehosts)) # Make sure all peers agree on network [peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist] except: self.peerlist = [] """ peerlist = [rt.id for rt in runtimes] print "SETUP DONE ***", peerlist
def setup(self, request): global rt1 global rt2 global test_script_dir rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://%s:5003" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode1'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) # Hack to get different config actorpath in actor store for each runtime and blacklist timers on node2 # (since dispatch will use the same global) # FIXME do it properly import calvin.actorstore.store import calvin.calvinsys import copy calvin.actorstore.store._conf = copy.deepcopy(calvin.actorstore.store._conf) calvin.actorstore.store._conf.config['global']['actor_paths'] = [absolute_filename('test_store')] calvin.calvinsys._conf = copy.deepcopy(calvin.actorstore.store._conf) calvin.calvinsys._conf.config['global']['capabilities_blacklist'] = ['calvinsys.events.timer'] time.sleep(1) # Less storage operations are droped if we wait a bit rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://%s:5004" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode2'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) time.sleep(1) # Less storage operations are droped if we wait a bit test_script_dir = absolute_filename('scripts/') request.addfinalizer(self.teardown)
def setUp(self): self.rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://%s:5003" % ip_addr) self.rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://%s:5004" % ip_addr) self.rt3, _ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://%s:5005" % ip_addr) utils.peer_setup(self.rt1, ["calvinip://%s:5001" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(self.rt2, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(self.rt3, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5001" % (ip_addr, )])
def setup_module(module): global runtime global runtimes global peerlist localhost = "calvinip://127.0.0.1:5000", "http://localhost:5001" remotehosts = [("calvinip://127.0.0.1:%d" % d, "http://localhost:%d" % (d + 1)) for d in range(5002, 5005, 2)] # remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")] for host in remotehosts: runtimes += [dispatch_node(host[0], host[1])] runtime = dispatch_node(localhost[0], localhost[1]) # FIXME When storage up and running peersetup not needed, but still useful during testing utils.peer_setup(runtime, [i[0] for i in remotehosts]) time.sleep(0.5) """ # FIXME Does not yet support peerlist try: self.peerlist = peerlist( self.runtime, self.runtime.id, len(remotehosts)) # Make sure all peers agree on network [peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist] except: self.peerlist = [] """ peerlist = [rt.id for rt in runtimes] print "SETUP DONE ***", peerlist
def setUp(self): global request_handler request_handler = RequestHandler() self.rt1, _ = dispatch_node(["calvinip://%s:5000" % (ip_addr,)], "http://%s:5003" % ip_addr) self.rt2, _ = dispatch_node(["calvinip://%s:5001" % (ip_addr,)], "http://%s:5004" % ip_addr) self.rt3, _ = dispatch_node(["calvinip://%s:5002" % (ip_addr,)], "http://%s:5005" % ip_addr) request_handler.peer_setup(self.rt1, ["calvinip://%s:5001" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) request_handler.peer_setup(self.rt2, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) request_handler.peer_setup(self.rt3, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5001" % (ip_addr, )])
def test_dispatch_node(start_node, get_node_id): nodecontrol.dispatch_node(URI, CONTROL_URI, attributes={'a': 1}, barrier=False) assert start_node.called assert not get_node_id.called start_node.reset_mock() nodecontrol.dispatch_node(URI, CONTROL_URI, attributes={'a': 1}, barrier=True) assert start_node.called assert get_node_id.called
def setup_module(module): global rt1 global rt2 global rt3 rt1 = dispatch_node("calvinip://localhost:5000", "http://localhost:5003") rt2 = dispatch_node("calvinip://localhost:5001", "http://localhost:5004") rt3 = dispatch_node("calvinip://localhost:5002", "http://localhost:5005") utils.peer_setup(rt1, ["calvinip://localhost:5001", "calvinip://localhost:5002"]) utils.peer_setup(rt2, ["calvinip://localhost:5000", "calvinip://localhost:5002"]) utils.peer_setup(rt3, ["calvinip://localhost:5000", "calvinip://localhost:5001"])
def setup_module(module): global rt1 global rt2 global rt3 rt1 = dispatch_node("calvinip://localhost:5000", "http://localhost:5003") rt2 = dispatch_node("calvinip://localhost:5001", "http://localhost:5004") rt3 = dispatch_node("calvinip://localhost:5002", "http://localhost:5005") utils.peer_setup( rt1, ["calvinip://localhost:5001", "calvinip://localhost:5002"]) utils.peer_setup( rt2, ["calvinip://localhost:5000", "calvinip://localhost:5002"]) utils.peer_setup( rt3, ["calvinip://localhost:5000", "calvinip://localhost:5001"])
def setUp(self): self.rt1 = dispatch_node("calvinip://localhost:5000", "http://localhost:5003", attributes=["node/affiliation/owner/org.testexample/testOwner1", "node/affiliation/name/org.testexample.testNode1", "node/address/testCountry/testCity/testStreet/1"]) self.rt2 = dispatch_node("calvinip://localhost:5001", "http://localhost:5004", attributes=["node/affiliation/owner/org.testexample/testOwner1", "node/affiliation/name/org.testexample.testNode2", "node/address/testCountry/testCity/testStreet/1"]) self.rt3 = dispatch_node("calvinip://localhost:5002", "http://localhost:5005", attributes=["node/affiliation/owner/org.testexample/testOwner2", "node/affiliation/name/org.testexample.testNode3", "node/address/testCountry/testCity/testStreet/2"])
def runtime(uri, control_uri, attributes=None, dispatch=False): from calvin.utilities.nodecontrol import dispatch_node, start_node kwargs = {'attributes': attributes} if attributes else {} if dispatch: return dispatch_node(uri=uri, control_uri=control_uri, **kwargs) else: start_node(uri, control_uri, **kwargs)
def setup_extra_local(ip_addr, request_handler, nbr, proxy_storage): first_calvinip = "calvinip://%s:%d" % (ip_addr, 5200) host = ("calvinip://%s:%d" % (ip_addr, 5198 + nbr * 2), "http://%s:%d" % (ip_addr, 5199 + nbr * 2)) attr_rest = {u'indexed_public': {u'node_name': { u'organization': u'com.ericsson', u'purpose': u'distributed-test', u'group': u'rest', u'name': u'runtime' + str(nbr)}}} if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'proxy') calvin.runtime.north.storage._conf.set('global', 'storage_proxy', first_calvinip) if True: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.update('calvinsys', 'capabilities', {"mock.shadow": { "module": "mock.MockInputOutput", "attributes": {"data": []} }}) _log.info("starting extra runtime %s %s" % host) rt, _ = dispatch_node([host[0]], host[1], attributes=attr_rest) _log.info("started extra runtime %s %s" % host) return rt
def setUp(self): self.rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://%s:5003" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode1'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) self.rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://%s:5004" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode2'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}}) self.rt3, _ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://%s:5005" % ip_addr, attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner2'}, 'node_name': {'organization': 'org.testexample', 'name': 'testNode3'}, 'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 2}}})
def testNodeIndexMany(self): """ Since storage is eventually consistent, and we don't really know when, this test is quite loose on its asserts but shows some warnings when inconsistent. It is also extremly slow. """ self.hosts = [ ("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d + 1), "owner%d" % ((d - 5000) / 2)) for d in range(5000, 5041, 2) ] self.rt = [ dispatch_node([h[0]], h[1], attributes={"indexed_public": {"owner": {"personOrGroup": h[2]}}})[0] for h in self.hosts ] time.sleep(3) owner = [] for i in range(len(self.hosts)): res = request_handler.get_index( self.rt[0], format_index_string({"owner": {"personOrGroup": self.hosts[i][2]}}) ) owner.append(res) assert set(res["result"]) == set([self.rt[i].id]) owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}})) assert set(owners["result"]) <= set([r.id for r in self.rt]) if not set(owners["result"]) >= set([r.id for r in self.rt]): warn("Not all nodes manage to reach the index %d of %d" % (len(owners["result"]), len(self.rt))) rt = self.rt[:] ids = [r.id for r in rt] hosts = self.hosts[:] request_handler.quit(self.rt[10]) del self.rt[10] del self.hosts[10] owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}})) assert set(owners["result"]) <= set(ids) if ids[10] in set(owners["result"]): warn("The removed node is still in the all owners set") removed_owner = request_handler.get_index( self.rt[0], format_index_string({"owner": {"personOrGroup": hosts[10][2]}}) ) assert not removed_owner["result"] or set(removed_owner["result"]) == set([ids[10]]) if removed_owner["result"]: warn("The removed node is still in its own index") # Destroy a bunch of the nodes for _ in range(7): request_handler.quit(self.rt[10]) del self.rt[10] del self.hosts[10] time.sleep(2) owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}})) assert set(owners["result"]) <= set(ids) l = len(set(owners["result"])) if l > (len(ids) - 8): warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
def runtime(uris, control_uri, attributes=None, dispatch=False): from calvin.utilities.nodecontrol import dispatch_node, start_node kwargs = {'attributes': attributes} if attributes else {} try: if dispatch: return dispatch_node(uris=uris, control_uri=control_uri, **kwargs) else: start_node(uris, control_uri, **kwargs) except Exception as e: print "Starting runtime failed:", e raise
def setUp(self): global request_handler request_handler = RequestHandler() self.rt1, _ = dispatch_node( ["calvinip://%s:5000" % (ip_addr,)], "http://%s:5003" % ip_addr, attributes={ "indexed_public": { "owner": {"organization": "org.testexample", "personOrGroup": "testOwner1"}, "node_name": {"organization": "org.testexample", "name": "testNode1"}, "address": {"country": "SE", "locality": "testCity", "street": "testStreet", "streetNumber": 1}, } }, ) self.rt2, _ = dispatch_node( ["calvinip://%s:5001" % (ip_addr,)], "http://%s:5004" % ip_addr, attributes={ "indexed_public": { "owner": {"organization": "org.testexample", "personOrGroup": "testOwner1"}, "node_name": {"organization": "org.testexample", "name": "testNode2"}, "address": {"country": "SE", "locality": "testCity", "street": "testStreet", "streetNumber": 1}, } }, ) self.rt3, _ = dispatch_node( ["calvinip://%s:5002" % (ip_addr,)], "http://%s:5005" % ip_addr, attributes={ "indexed_public": { "owner": {"organization": "org.testexample", "personOrGroup": "testOwner2"}, "node_name": {"organization": "org.testexample", "name": "testNode3"}, "address": {"country": "SE", "locality": "testCity", "street": "testStreet", "streetNumber": 2}, } }, )
def runtime(uri, control_uri, attributes=None, dispatch=False, authz_server=False): from calvin.utilities.nodecontrol import dispatch_node, start_node kwargs = {} if attributes: kwargs['attributes'] = attributes if authz_server: kwargs['authz_server'] = authz_server try: if dispatch: return dispatch_node(uri=uri, control_uri=control_uri, **kwargs) else: start_node(uri, control_uri, **kwargs) except Exception as e: print "Starting runtime failed:\n%s" % e return 1
def testNodeIndexMany(self): """ Since storage is eventually consistent, and we don't really know when, this test is quite loose on its asserts but shows some warnings when inconsistent. It is also extremly slow. """ self.hosts = [("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)] self.rt = [dispatch_node(h[0], h[1], attributes={'indexed_public': {'owner':{'personOrGroup': h[2]}}})[0] for h in self.hosts] time.sleep(3) owner = [] for i in range(len(self.hosts)): res = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': self.hosts[i][2]}})) owner.append(res) assert(set(res['result']) == set([self.rt[i].id])) owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set([r.id for r in self.rt])) if not set(owners['result']) >= set([r.id for r in self.rt]): warn("Not all nodes manage to reach the index %d of %d" % (len(owners['result']), len(self.rt))) rt = self.rt[:] ids = [r.id for r in rt] hosts = self.hosts[:] utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set(ids)) if ids[10] in set(owners['result']): warn("The removed node is still in the all owners set") removed_owner = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': hosts[10][2]}})) assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]])) if removed_owner['result']: warn("The removed node is still in its own index") # Destroy a bunch of the nodes for _ in range(7): utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] time.sleep(2) owners = utils.get_index(self.rt[0], format_index_string({'owner':{}})) assert(set(owners['result']) <= set(ids)) l = len(set(owners['result'])) if l > (len(ids)-8): warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
def testNodeIndexMany(self): """ Since storage is eventually consistent, and we don't really know when, this test is quite loose on its asserts but shows some warnings when inconsistent. It is also extremly slow. """ self.hosts = [("calvinip://127.0.0.1:%d" % d, "http://localhost:%d" % (d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)] self.rt = [dispatch_node(h[0], h[1], attributes=["node/affiliation/owner/%s" % h[2]]) for h in self.hosts] time.sleep(3) owner = [] for i in range(len(self.hosts)): res = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % self.hosts[i][2]) owner.append(res) assert(set(res['result']) == set([self.rt[i].id])) owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set([r.id for r in self.rt])) if set(owners['result']) == set([r.id for r in self.rt]): warn("Not all nodes manage to reach the index") rt = self.rt[:] ids = [r.id for r in rt] hosts = self.hosts[:] utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set(ids)) if ids[10] in set(owners['result']): warn("The removed node is still in the all owners set") removed_owner = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % hosts[10][2]) assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]])) if removed_owner['result']: warn("The removed node is still in its own index") # Destroy a bunch of the nodes for _ in range(7): utils.quit(self.rt[10]) del self.rt[10] del self.hosts[10] time.sleep(2) owners = utils.get_index(self.rt[0], "node/affiliation/owner") assert(set(owners['result']) <= set(ids)) l = len(set(owners['result'])) if l > (len(ids)-8): warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
def setup_extra_local(ip_addr, request_handler, nbr, proxy_storage): first_calvinip = "calvinip://%s:%d" % (ip_addr, 5200) host = ("calvinip://%s:%d" % (ip_addr, 5198 + nbr * 2), "http://%s:%d" % (ip_addr, 5199 + nbr * 2)) attr_rest = { u'indexed_public': { u'node_name': { u'organization': u'com.ericsson', u'purpose': u'distributed-test', u'group': u'rest', u'name': u'runtime' + str(nbr) } } } if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'proxy') calvin.runtime.north.storage._conf.set('global', 'storage_proxy', first_calvinip) if True: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.update( 'calvinsys', 'capabilities', { "mock.shadow": { "module": "mock.MockInputOutput", "attributes": { "data": [] } } }) _log.info("starting extra runtime %s %s" % host) rt, _ = dispatch_node([host[0]], host[1], attributes=attr_rest) _log.info("started extra runtime %s %s" % host) return rt
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.nodecontrol import dispatch_node from calvin.utilities import utils import time # create two nodes, named node-1 and node-2, respectively node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001", attributes={'name': 'node-1'}) node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003", attributes={'name': 'node-2'}) # send 'new actor' command to node_2 counter_id = utils.new_actor(node_2, 'std.Counter', 'counter') # send 'new actor' command to node_1 output_id = utils.new_actor(node_1, 'io.StandardOut', 'output') # inform node_1 about peers utils.peer_setup(node_1, ["calvinip://localhost:5002"]) # allow network to stabilize
def runtime(uri, control_uri, attributes=None): kwargs = {'attributes': attributes} if attributes else {} return dispatch_node(uri=uri, control_uri=control_uri, **kwargs)
def setup(self, request): global rt1 global rt2 global test_script_dir rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr, ), "http://%s:5003" % ip_addr, attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'testOwner1' }, 'node_name': { 'organization': 'org.testexample', 'name': 'testNode1' }, 'address': { 'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1 } } }) # Hack to get different config actorpath in actor store for each runtime and blacklist timers on node2 # (since dispatch will use the same global) # FIXME do it properly import calvin.actorstore.store import calvin.calvinsys import copy calvin.actorstore.store._conf = copy.deepcopy( calvin.actorstore.store._conf) calvin.actorstore.store._conf.config['global']['actor_paths'] = [ absolute_filename('test_store') ] calvin.calvinsys._conf = copy.deepcopy(calvin.actorstore.store._conf) calvin.calvinsys._conf.config['global']['capabilities_blacklist'] = [ 'calvinsys.events.timer' ] time.sleep(1) # Less storage operations are droped if we wait a bit rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr, ), "http://%s:5004" % ip_addr, attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'testOwner1' }, 'node_name': { 'organization': 'org.testexample', 'name': 'testNode2' }, 'address': { 'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1 } } }) time.sleep(1) # Less storage operations are droped if we wait a bit test_script_dir = absolute_filename('scripts/') request.addfinalizer(self.teardown)
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.nodecontrol import dispatch_node from calvin.requests.request_handler import RequestHandler import time # Get the handler for sending the API requests request_handler = RequestHandler() # create one node node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001", attributes={'indexed_public': {'owner':{'organization': 'org.testexample', 'personOrGroup': 'me'}, 'node_name': {'organization': 'org.testexample', 'name': 'node-1'}}}) # send 'new actor' command to node counter_id = request_handler.new_actor(node_1, 'std.Counter', 'counter') # send 'new actor' command to node output_id = request_handler.new_actor(node_1, 'io.StandardOut', 'output') # send 'connect' command to node request_handler.connect(node_1, output_id, 'token', node_1.id, counter_id, 'integer') # runt app for 3 seconds time.sleep(3) # send quite to node
def setup_module(module): global runtime global runtimes global peerlist global kill_peers ip_addr = None try: ip_addr = os.environ["CALVIN_TEST_IP"] purpose = os.environ["CALVIN_TEST_UUID"] except KeyError: pass if ip_addr: remote_node_count = 2 kill_peers = False test_peers = None import socket ports=[] for a in range(2): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) addr = s.getsockname() ports.append(addr[1]) s.close() runtime,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1])) _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0])) interval = 0.5 for retries in range(1,20): time.sleep(interval) _log.debug("Trying to get test nodes for 'purpose' %s" % purpose) test_peers = utils.get_index(runtime, format_index_string({'node_name': {'organization': 'com.ericsson', 'purpose': purpose} })) if not test_peers is None and not test_peers["result"] is None and \ len(test_peers["result"]) == remote_node_count: test_peers = test_peers["result"] break if test_peers is None or len(test_peers) != remote_node_count: _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers) raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers) test_peer2_id = test_peers[0] test_peer2 = utils.get_node(runtime, test_peer2_id) if test_peer2: runtime2 = utils.RT(test_peer2["control_uri"]) runtime2.id = test_peer2_id runtime2.uri = test_peer2["uri"] runtimes.append(runtime2) test_peer3_id = test_peers[1] if test_peer3_id: test_peer3 = utils.get_node(runtime, test_peer3_id) if test_peer3: runtime3 = utils.RT(test_peer3["control_uri"]) runtime3.id = test_peer3_id runtime3.uri = test_peer3["uri"] runtimes.append(runtime3) else: try: ip_addr = os.environ["CALVIN_TEST_LOCALHOST"] except: import socket ip_addr = socket.gethostbyname(socket.gethostname()) localhost = "calvinip://%s:5000" % (ip_addr,), "http://localhost:5001" remotehosts = [("calvinip://%s:%d" % (ip_addr, d), "http://localhost:%d" % (d+1)) for d in range(5002, 5005, 2)] # remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")] for host in remotehosts: runtimes += [dispatch_node(host[0], host[1])[0]] runtime, _ = dispatch_node(localhost[0], localhost[1]) time.sleep(1) # FIXME When storage up and running peersetup not needed, but still useful during testing utils.peer_setup(runtime, [i[0] for i in remotehosts]) time.sleep(0.5) """ # FIXME Does not yet support peerlist try: self.peerlist = peerlist( self.runtime, self.runtime.id, len(remotehosts)) # Make sure all peers agree on network [peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist] except: self.peerlist = [] """ peerlist = [rt.control_uri for rt in runtimes] print "SETUP DONE ***", peerlist
def setup(self, request): global rt1 global rt2 global rt3 global test_script_dir rt1, _ = dispatch_node("calvinip://%s:5000" % (ip_addr, ), "http://%s:5003" % ip_addr, attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'testOwner1' }, 'node_name': { 'organization': 'org.testexample', 'name': 'testNode1' }, 'address': { 'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1 } } }) time.sleep(1) # Less storage operations are droped if we wait a bit rt2, _ = dispatch_node("calvinip://%s:5001" % (ip_addr, ), "http://%s:5004" % ip_addr, attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'testOwner1' }, 'node_name': { 'organization': 'org.testexample', 'name': 'testNode2' }, 'address': { 'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1 } } }) time.sleep(1) # Less storage operations are droped if we wait a bit rt3, _ = dispatch_node("calvinip://%s:5002" % (ip_addr, ), "http://%s:5005" % ip_addr, attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'testOwner2' }, 'node_name': { 'organization': 'org.testexample', 'name': 'testNode3' }, 'address': { 'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 2 } } }) time.sleep(1) # Less storage operations are droped if we wait a bit test_script_dir = absolute_filename('scripts/') request.addfinalizer(self.teardown)
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.nodecontrol import dispatch_node from calvin.utilities import utils import time # create two nodes, named node-1 and node-2, respectively node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001", attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'me' }, 'node_name': { 'organization': 'org.testexample', 'name': 'node-1' } } }) node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003", attributes={ 'indexed_public': { 'owner': { 'organization': 'org.testexample', 'personOrGroup': 'me' }, 'node_name': {
def runtime(uri, control_uri, start_new): if start_new: rt = dispatch_node(uri=uri, control_uri=control_uri) else: rt = node_control(control_uri) return rt
def setup_local(ip_addr, request_handler, nbr, proxy_storage): def check_storage(rt, n, index): index_string = format_index_string(index) retries = 0 while retries < 120: try: retries += 1 peers = request_handler.get_index(rt, index_string, timeout=60) except Exception as e: try: notfound = e.message.startswith("404") except: notfound = False if notfound: peers = {'result': []} else: _log.info("Timed out when finding peers retrying") retries += 39 # A timeout counts more we don't want to wait 60*100 seconds continue if len(peers['result']) >= n: _log.info("Found %d peers (%r)", len(peers['result']), peers['result']) return _log.info("Only %d peers found (%r)", len(peers['result']), peers['result']) time.sleep(1) # No more retrying raise Exception("Storage check failed, could not find peers.") hosts = [("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d + 1)) for d in range(5200, 5200 + 2 * nbr, 2)] runtimes = [] host = hosts[0] attr = { u'indexed_public': { u'node_name': { u'organization': u'com.ericsson', u'purpose': u'distributed-test' } } } attr_first = copy.deepcopy(attr) attr_first['indexed_public']['node_name']['group'] = u'first' attr_first['indexed_public']['node_name']['name'] = u'runtime1' attr_rest = copy.deepcopy(attr) attr_rest['indexed_public']['node_name']['group'] = u'rest' _log.info("starting runtime %s %s" % host) if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'local') rt, _ = dispatch_node([host[0]], host[1], attributes=attr_first) check_storage(rt, len(runtimes) + 1, attr['indexed_public']) runtimes += [rt] if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'proxy') calvin.runtime.north.storage._conf.set('global', 'storage_proxy', host[0]) _log.info("started runtime %s %s" % host) count = 2 for host in hosts[1:]: if nbr > 3: # Improve likelihood of success if runtimes started with a time interval time.sleep(10.0) _log.info("starting runtime %s %s" % host) attr_rt = copy.deepcopy(attr_rest) attr_rt['indexed_public']['node_name']['name'] = u'runtime' + str( count) count += 1 rt, _ = dispatch_node([host[0]], host[1], attributes=attr_rt) check_storage(rt, len(runtimes) + 1, attr['indexed_public']) _log.info("started runtime %s %s" % host) runtimes += [rt] for host in hosts: check_storage(RT(host[1]), nbr, attr['indexed_public']) for host in hosts: request_handler.peer_setup(RT(host[1]), [h[0] for h in hosts if h != host]) return runtimes
def setup_module(module): global rt1 global rt2 global rt3 global kill_peers ip_addr = None try: ip_addr = os.environ["CALVIN_TEST_IP"] purpose = os.environ["CALVIN_TEST_UUID"] _log.debug("Running remote tests") except KeyError: _log.debug("Running lcoal test") pass if ip_addr: remote_node_count = 2 kill_peers = False test_peers = None import socket ports=[] for a in range(2): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) addr = s.getsockname() ports.append(addr[1]) s.close() rt1,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1])) _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0])) interval = 0.5 for retries in range(1,20): time.sleep(interval) _log.debug("Trying to get test nodes for 'purpose' %s" % purpose) test_peers = utils.get_index(rt1, format_index_string({'node_name': {'organization': 'com.ericsson', 'purpose': purpose} })) if not test_peers is None and not test_peers["result"] is None and \ len(test_peers["result"]) == remote_node_count: test_peers = test_peers["result"] break if test_peers is None or len(test_peers) != remote_node_count: _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers) raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers) _log.debug("All remote nodes found!") test_peer2_id = test_peers[0] test_peer2 = utils.get_node(rt1, test_peer2_id) if test_peer2: rt2 = utils.RT(test_peer2["control_uri"]) rt2.id = test_peer2_id rt2.uri = test_peer2["uri"] test_peer3_id = test_peers[1] if test_peer3_id: test_peer3 = utils.get_node(rt1, test_peer3_id) if test_peer3: rt3 = utils.RT(test_peer3["control_uri"]) rt3.id = test_peer3_id rt3.uri = test_peer3["uri"] else: try: ip_addr = os.environ["CALVIN_TEST_LOCALHOST"] except: import socket ip_addr = socket.gethostbyname(socket.gethostname()) rt1,_ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://localhost:5003") rt2,_ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://localhost:5004") rt3,_ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://localhost:5005") time.sleep(.4) utils.peer_setup(rt1, ["calvinip://%s:5001" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(rt2, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )]) utils.peer_setup(rt3, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5001" % (ip_addr, )]) time.sleep(.4)
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.nodecontrol import dispatch_node from calvin.utilities import utils import time # create two nodes, named node-1 and node-2, respectively node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001", attributes={'name': 'node-1'}) node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003", attributes={'name': 'node-2'}) # send 'new actor' command to node_2 counter_id = utils.new_actor(node_2, 'std.Counter', 'counter') # send 'new actor' command to node_1 output_id = utils.new_actor(node_1, 'io.StandardOut', 'output') # inform node_1 about peers utils.peer_setup(node_1, ["calvinip://localhost:5002"]) # allow network to stabilize time.sleep(1.0) # send connect command to node_1
def setup_local(ip_addr, request_handler, nbr, proxy_storage): def check_storage(rt, n, index): index_string = format_index_string(index) retries = 0 while retries < 120: try: retries += 1 peers = request_handler.get_index(rt, index_string, timeout=60) except Exception as e: try: notfound = e.message.startswith("404") except: notfound = False if notfound: peers={'result':[]} else: _log.info("Timed out when finding peers retrying") retries += 39 # A timeout counts more we don't want to wait 60*100 seconds continue if len(peers['result']) >= n: _log.info("Found %d peers (%r)", len(peers['result']), peers['result']) return _log.info("Only %d peers found (%r)", len(peers['result']), peers['result']) time.sleep(1) # No more retrying raise Exception("Storage check failed, could not find peers.") hosts = [ ("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d+1)) for d in range(5200, 5200 + 2 * nbr, 2) ] runtimes = [] host = hosts[0] attr = {u'indexed_public': {u'node_name': {u'organization': u'com.ericsson', u'purpose': u'distributed-test'}}} attr_first = copy.deepcopy(attr) attr_first['indexed_public']['node_name']['group'] = u'first' attr_first['indexed_public']['node_name']['name'] = u'runtime1' attr_rest = copy.deepcopy(attr) attr_rest['indexed_public']['node_name']['group'] = u'rest' _log.info("starting runtime %s %s" % host) if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'local') rt, _ = dispatch_node([host[0]], host[1], attributes=attr_first) check_storage(rt, len(runtimes)+1, attr['indexed_public']) runtimes += [rt] if proxy_storage: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.set('global', 'storage_type', 'proxy') calvin.runtime.north.storage._conf.set('global', 'storage_proxy', host[0]) _log.info("started runtime %s %s" % host) if True: import calvin.runtime.north.storage calvin.runtime.north.storage._conf.update('calvinsys', 'capabilities', {"mock.shadow": { "module": "mock.MockInputOutput", "attributes": {"data": []} }}) count = 2 for host in hosts[1:]: if nbr > 3: # Improve likelihood of success if runtimes started with a time interval time.sleep(10.0) _log.info("starting runtime %s %s" % host) attr_rt = copy.deepcopy(attr_rest) attr_rt['indexed_public']['node_name']['name'] = u'runtime' + str(count) count += 1 rt, _ = dispatch_node([host[0]], host[1], attributes=attr_rt) check_storage(rt, len(runtimes)+1, attr['indexed_public']) _log.info("started runtime %s %s" % host) runtimes += [rt] if len(hosts) > 1: for host in hosts: _log.info("Checking storage for {}".format(host)) check_storage(RT(host[1]), nbr, attr['indexed_public']) for host in hosts: _log.info("Peer setup for {}".format(host)) request_handler.peer_setup(RT(host[1]), [h[0] for h in hosts if h != host]) return runtimes
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.nodecontrol import dispatch_node from calvin.utilities import utils import time # create two nodes, named node-1 and node-2, respectively node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001", attributes=["node/affiliation/owner/me", "node/affiliation/name/node-1"]) node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003", attributes=["node/affiliation/owner/me", "node/affiliation/name/node-2"]) # send 'new actor' command to node_2 counter_id = utils.new_actor(node_2, 'std.Counter', 'counter') # send 'new actor' command to node_1 output_id = utils.new_actor(node_1, 'io.StandardOut', 'output') # inform node_1 about peers utils.peer_setup(node_1, ["calvinip://localhost:5002"]) # allow network to stabilize time.sleep(1.0)