Esempio n. 1
0
 def verify_storage(self):
     global rt1
     global rt2
     rt1_id = utils.get_node_id(rt1)
     rt2_id = utils.get_node_id(rt2)
     assert rt1_id
     assert rt2_id
     print "RUNTIMES:", rt1_id, rt2_id
     _log.analyze("TESTRUN", "+ IDS", {})
     caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert utils.get_index(
         rt1,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode2'
             }
         ]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert utils.get_index(
         rt2,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode1'
             }
         ]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 2
0
    def testNodeIndexThree(self):
        time.sleep(4)

        print self.rt1.id, self.rt2.id, self.rt3.id

        owner1 = request_handler.get_index(
            self.rt1, format_index_string({"owner": {"organization": "org.testexample", "personOrGroup": "testOwner1"}})
        )
        assert set(owner1["result"]) == set([self.rt1.id, self.rt2.id])

        owner2 = request_handler.get_index(
            self.rt1, format_index_string({"owner": {"organization": "org.testexample", "personOrGroup": "testOwner2"}})
        )
        assert set(owner2["result"]) == set([self.rt3.id])

        owners = request_handler.get_index(
            self.rt1, format_index_string({"owner": {"organization": "org.testexample"}})
        )
        assert set(owners["result"]) == set([self.rt1.id, self.rt2.id, self.rt3.id])

        names = request_handler.get_index(self.rt1, format_index_string({"node_name": {}}))
        assert set(names["result"]) == set([self.rt1.id, self.rt2.id, self.rt3.id])

        addr2 = request_handler.get_index(
            self.rt1,
            format_index_string(
                {"address": {"country": "SE", "locality": "testCity", "street": "testStreet", "streetNumber": 2}}
            ),
        )
        assert set(addr2["result"]) == set([self.rt3.id])
Esempio n. 3
0
    def testNodeIndexMany(self):
        """ Since storage is eventually consistent, and we don't really know when,
            this test is quite loose on its asserts but shows some warnings when
            inconsistent. It is also extremly slow.
        """
        self.hosts = [
            ("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d + 1), "owner%d" % ((d - 5000) / 2))
            for d in range(5000, 5041, 2)
        ]
        self.rt = [
            dispatch_node([h[0]], h[1], attributes={"indexed_public": {"owner": {"personOrGroup": h[2]}}})[0]
            for h in self.hosts
        ]
        time.sleep(3)
        owner = []
        for i in range(len(self.hosts)):
            res = request_handler.get_index(
                self.rt[0], format_index_string({"owner": {"personOrGroup": self.hosts[i][2]}})
            )
            owner.append(res)
            assert set(res["result"]) == set([self.rt[i].id])

        owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}}))
        assert set(owners["result"]) <= set([r.id for r in self.rt])
        if not set(owners["result"]) >= set([r.id for r in self.rt]):
            warn("Not all nodes manage to reach the index %d of %d" % (len(owners["result"]), len(self.rt)))
        rt = self.rt[:]
        ids = [r.id for r in rt]
        hosts = self.hosts[:]
        request_handler.quit(self.rt[10])
        del self.rt[10]
        del self.hosts[10]
        owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}}))
        assert set(owners["result"]) <= set(ids)
        if ids[10] in set(owners["result"]):
            warn("The removed node is still in the all owners set")

        removed_owner = request_handler.get_index(
            self.rt[0], format_index_string({"owner": {"personOrGroup": hosts[10][2]}})
        )
        assert not removed_owner["result"] or set(removed_owner["result"]) == set([ids[10]])
        if removed_owner["result"]:
            warn("The removed node is still in its own index")

        # Destroy a bunch of the nodes
        for _ in range(7):
            request_handler.quit(self.rt[10])
            del self.rt[10]
            del self.hosts[10]

        time.sleep(2)
        owners = request_handler.get_index(self.rt[0], format_index_string({"owner": {}}))
        assert set(owners["result"]) <= set(ids)
        l = len(set(owners["result"]))
        if l > (len(ids) - 8):
            warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
Esempio n. 4
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     rt1_id = None
     rt2_id = None
     rt3_id = None
     failed = True
     # Try 30 times waiting for control API to be up and running
     for i in range(30):
         try:
             rt1_id = rt1_id or request_handler.get_node_id(rt1)
             rt2_id = rt2_id or request_handler.get_node_id(rt2)
             rt3_id = rt3_id or request_handler.get_node_id(rt3)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     assert rt3_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
     failed = True
     # Try 30 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     caps3 = []
     rt_ids = set([rt1_id, rt2_id, rt3_id])
     for i in range(30):
         try:
             if not (rt1_id in caps1  and rt2_id in caps2 and rt3_id in caps1):
                 caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2):
                 caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3):
                 caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
             if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3):
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
     assert request_handler.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert request_handler.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
     _log.analyze("TESTRUN", "+ rt2 INDEX", {})
     assert request_handler.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     _log.analyze("TESTRUN", "+ rt3 INDEX", {})
 def verify_storage(self):
     global rt1
     global rt2
     rt1_id = None
     rt2_id = None
     failed = True
     # Try 10 times waiting for control API to be up and running
     for i in range(10):
         try:
             rt1_id = rt1_id or utils.get_node_id(rt1)
             rt2_id = rt2_id or utils.get_node_id(rt2)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     print "RUNTIMES:", rt1_id, rt2_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
     failed = True
     # Try 20 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     for i in range(20):
         try:
             if len(caps1) != 2:
                 caps1 = utils.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
             if len(caps2) != 2:
                 caps2 = utils.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
             if len(caps1) == 2 and len(caps2) == 2:
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
     # Now check for the values needed by this specific test
     caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 6
0
    def testNodeIndexThree(self):
        time.sleep(4)

        print self.rt1.id, self.rt2.id, self.rt3.id

        owner1 = request_handler.get_index(
            self.rt1,
            format_index_string({
                'owner': {
                    'organization': 'org.testexample',
                    'personOrGroup': 'testOwner1'
                }
            }))
        assert (set(owner1['result']) == set([self.rt1.id, self.rt2.id]))

        owner2 = request_handler.get_index(
            self.rt1,
            format_index_string({
                'owner': {
                    'organization': 'org.testexample',
                    'personOrGroup': 'testOwner2'
                }
            }))
        assert (set(owner2['result']) == set([self.rt3.id]))

        owners = request_handler.get_index(
            self.rt1,
            format_index_string({'owner': {
                'organization': 'org.testexample'
            }}))
        assert (set(owners['result']) == set(
            [self.rt1.id, self.rt2.id, self.rt3.id]))

        names = request_handler.get_index(
            self.rt1, format_index_string({'node_name': {}}))
        assert (set(names['result']) == set(
            [self.rt1.id, self.rt2.id, self.rt3.id]))

        addr2 = request_handler.get_index(
            self.rt1,
            format_index_string({
                'address': {
                    'country': 'SE',
                    'locality': 'testCity',
                    'street': 'testStreet',
                    'streetNumber': 2
                }
            }))
        assert (set(addr2['result']) == set([self.rt3.id]))
Esempio n. 7
0
def setup_distributed(control_uri, purpose, request_handler):
    from functools import partial

    remote_node_count = 3
    test_peers = None
    runtimes = []

    runtime = RT(control_uri)
    index = {"node_name": {"organization": "com.ericsson", "purpose": purpose}}
    index_string = format_index_string(index)

    get_index = partial(request_handler.get_index, runtime, index_string)

    def criteria(peers):
        return peers and peers.get("result", None) and len(peers["result"]) >= remote_node_count

    test_peers = retry(10, get_index, criteria, "Not all nodes found")
    test_peers = test_peers["result"]

    for peer_id in test_peers:
        peer = request_handler.get_node(runtime, peer_id)
        if not peer:
            _log.warning("Runtime '%r' peer '%r' does not exist" % (runtime, peer_id, ))
            continue
        rt = RT(peer["control_uri"])
        rt.id = peer_id
        rt.uris = peer["uri"]
        runtimes.append(rt)

    return runtimes
Esempio n. 8
0
 def check_storage(rt, n, index):
     index_string = format_index_string(index)
     retries = 0
     while retries < 120:
         try:
             retries += 1
             peers = request_handler.get_index(rt, index_string, timeout=60)
         except Exception as e:
             try:
                 notfound = e.message.startswith("404")
             except:
                 notfound = False
             if notfound:
                 peers = {'result': []}
             else:
                 _log.info("Timed out when finding peers retrying")
                 retries += 39  # A timeout counts more we don't want to wait 60*100 seconds
                 continue
         if len(peers['result']) >= n:
             _log.info("Found %d peers (%r)", len(peers['result']),
                       peers['result'])
             return
         _log.info("Only %d peers found (%r)", len(peers['result']),
                   peers['result'])
         time.sleep(1)
     # No more retrying
     raise Exception("Storage check failed, could not find peers.")
Esempio n. 9
0
def setup_distributed(control_uri, purpose, request_handler):
    from functools import partial

    remote_node_count = 3
    test_peers = None
    runtimes = []

    runtime = RT(control_uri)
    index = {"node_name": {"organization": "com.ericsson", "purpose": purpose}}
    index_string = format_index_string(index)

    get_index = partial(request_handler.get_index, runtime, index_string)

    def criteria(peers):
        return peers and peers.get(
            "result", None) and len(peers["result"]) >= remote_node_count

    test_peers = retry(10, get_index, criteria, "Not all nodes found")
    test_peers = test_peers["result"]

    for peer_id in test_peers:
        peer = request_handler.get_node(runtime, peer_id)
        if not peer:
            _log.warning("Runtime '%r' peer '%r' does not exist" % (
                runtime,
                peer_id,
            ))
            continue
        rt = RT(peer["control_uri"])
        rt.id = peer_id
        rt.uris = peer["uri"]
        runtimes.append(rt)

    return runtimes
Esempio n. 10
0
def req_op(node, cb, index, actor_id=None, component=None):
    """ Lockup index if found callback cb is called with the node ids else None
        actor_id is the actor that this is requested for
        component contains a list of all actor_ids of the component if the actor belongs to a component else None
    """
    index_str = format_index_string(index)
    # Utilize a cache of the storage response for components
    # This is mainly to illustrate how to utilize the component and actor_id params for rules where this is needed
    if component:
        cache_key = (index_str, tuple(component))
        if cache_key in _cache:
            # Add in our callback
            _cache[cache_key][0][actor_id] = cb
            if _cache[cache_key][1]:
                # Already got value call cb directly
                value = _cache[cache_key][2]
                cb(value if value else None)
                if len(_cache[cache_key][0]) == len(component):
                    # Done then clean cache
                    _cache.pop(cache_key) 
            return
        else:
            _cache[cache_key] = ({actor_id:cb}, False, None)
    else:
        cache_key = None

    node.storage.get_index(index_str, CalvinCB(_req_op_cb, cache_key=cache_key,
                                                                actor_id=actor_id, cb=cb, component=component))
Esempio n. 11
0
 def check_storage(rt, n, index):
     index_string = format_index_string(index)
     retries = 0
     while retries < 120:
         try:
             retries += 1
             peers = request_handler.get_index(rt, index_string, timeout=60)
         except Exception as e:
             try:
                 notfound = e.message.startswith("404")
             except:
                 notfound = False
             if notfound:
                 peers={'result':[]}
             else:
                 _log.info("Timed out when finding peers retrying")
                 retries += 39  # A timeout counts more we don't want to wait 60*100 seconds
                 continue
         if len(peers['result']) >= n:
             _log.info("Found %d peers (%r)", len(peers['result']), peers['result'])
             return
         _log.info("Only %d peers found (%r)", len(peers['result']), peers['result'])
         time.sleep(1)
     # No more retrying
     raise Exception("Storage check failed, could not find peers.")
Esempio n. 12
0
    def testNodeIndexMany(self):
        """ Since storage is eventually consistent, and we don't really know when,
            this test is quite loose on its asserts but shows some warnings when
            inconsistent. It is also extremly slow.
        """
        self.hosts = [("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)]
        self.rt = [dispatch_node(h[0], h[1], attributes={'indexed_public': {'owner':{'personOrGroup': h[2]}}})[0] for h in self.hosts]
        time.sleep(3)
        owner = []
        for i in range(len(self.hosts)):
            res = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': self.hosts[i][2]}}))
            owner.append(res)
            assert(set(res['result']) == set([self.rt[i].id]))

        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set([r.id for r in self.rt]))
        if not set(owners['result']) >= set([r.id for r in self.rt]):
            warn("Not all nodes manage to reach the index %d of %d" % (len(owners['result']), len(self.rt)))
        rt = self.rt[:]
        ids = [r.id for r in rt]
        hosts = self.hosts[:]
        utils.quit(self.rt[10])
        del self.rt[10]
        del self.hosts[10]
        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set(ids))
        if ids[10] in set(owners['result']):
            warn("The removed node is still in the all owners set")

        removed_owner = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': hosts[10][2]}}))
        assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]]))
        if removed_owner['result']:
            warn("The removed node is still in its own index")

        # Destroy a bunch of the nodes
        for _ in range(7):
            utils.quit(self.rt[10])
            del self.rt[10]
            del self.hosts[10]

        time.sleep(2)
        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set(ids))
        l = len(set(owners['result']))
        if l > (len(ids)-8):
            warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
Esempio n. 13
0
def req_op(node, index, actor_id=None, component=None):
    """ Lockup index returns a dynamic iterable which 
        actor_id is the actor that this is requested for
        component contains a list of all actor_ids of the component if the actor belongs to a component else None
    """
    index_str = format_index_string(index)
    it = node.storage.get_index_iter(index_str)
    it.set_name("attr_match")
    return it
Esempio n. 14
0
def req_op(node, index, actor_id=None, component=None):
    """ Lockup index returns a dynamic iterable which 
        actor_id is the actor that this is requested for
        component contains a list of all actor_ids of the component if the actor belongs to a component else None
    """
    index_str = format_index_string(index)
    it = node.storage.get_index_iter(index_str)
    it.set_name("attr_match")
    return it
Esempio n. 15
0
 def verify_storage(self):
     global rt1
     global rt2
     rt1_id = utils.get_node_id(rt1)
     rt2_id = utils.get_node_id(rt2)
     assert rt1_id
     assert rt2_id
     print "RUNTIMES:", rt1_id, rt2_id
     _log.analyze("TESTRUN", "+ IDS", {})
     caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 16
0
    def testNodeIndexThree(self):
        time.sleep(4)

        print self.rt1.id, self.rt2.id, self.rt3.id

        owner1 = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'}}))
        assert(set(owner1['result']) == set([self.rt1.id, self.rt2.id]))

        owner2 = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner2'}}))
        assert(set(owner2['result']) == set([self.rt3.id]))

        owners = utils.get_index(self.rt1, format_index_string({'owner':{'organization': 'org.testexample'}}))
        assert(set(owners['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id]))

        names = utils.get_index(self.rt1, format_index_string({'node_name':{}}))
        assert(set(names['result']) == set([self.rt1.id, self.rt2.id, self.rt3.id]))

        addr2 = utils.get_index(self.rt1, format_index_string({'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 2}}))
        assert(set(addr2['result']) == set([self.rt3.id]))
Esempio n. 17
0
def handle_post_node_attribute_indexed_public_cb(self, key, value, handle, connection, attributes):
    try:
        indexed_public = []
        for attr in attributes.items():
            indexed_string = format_index_string(attr)
            indexed_public.append(indexed_string)
            self.node.storage.add_index(indexed_string, key)
        value['attributes']['indexed_public'] = indexed_public
        self.node.storage.set(prefix="node-", key=key, value=value,
            cb=CalvinCB(self.index_cb, handle, connection))
    except Exception as e:
        _log.error("Failed to update node %s", e)
        self.send_response(handle, connection, None, status=calvinresponse.INTERNAL_ERROR)
Esempio n. 18
0
def control_storage(args):
    from calvin.utilities.attribute_resolver import format_index_string
    import json
    if args.cmd == 'get_index':
        try:
            index = json.loads(args.index)
        except:
            raise Exception("Malformed JSON index string:\n%s" % args.index)
        formated_index = format_index_string(index)
        return utils.get_index(args.node, formated_index)
    elif args.cmd == 'raw_get_index':
        try:
            index = json.loads(args.index)
        except:
            raise Exception("Malformed JSON index string:\n%s" % args.index)
        return utils.get_index(args.node, index)
Esempio n. 19
0
def control_storage(args):
    from calvin.utilities.attribute_resolver import format_index_string
    import json
    if args.cmd == 'get_index':
        try:
            index = json.loads(args.index)
        except:
            raise Exception("Malformed JSON index string:\n%s" % args.index)
        formated_index = format_index_string(index)
        return utils.get_index(args.node, formated_index)
    elif args.cmd == 'raw_get_index':
        try:
            index = json.loads(args.index)
        except:
            raise Exception("Malformed JSON index string:\n%s" % args.index)
        return utils.get_index(args.node, index)
Esempio n. 20
0
def security_verify_storage(rt, request_handler):
    from functools import partial
    _log.info("Let's verify storage, rt={}".format(rt))
    rt_id = [None] * len(rt)
    # Wait for control API to be up and running
    for j in range(len(rt)):
        rt_id[j] = retry(30, partial(request_handler.get_node_id, rt[j]),
                         lambda _: True, "Failed to get node id")
    _log.info("RUNTIMES:{}".format(rt_id))
    # Wait for storage to be connected
    index = "node/capabilities/calvinsys.native.python-json"
    failed = True

    def test():
        count = [0] * len(rt)
        caps = [0] * len(rt)
        #Loop through all runtimes to ask them which runtimes nodes they know with calvisys.native.python-json
        for j in range(len(rt)):
            caps[j] = retry(30, partial(request_handler.get_index, rt[j],
                                        index), lambda _: True,
                            "Failed to get index")['result']
            #Add the known nodes to statistics of how many nodes store keys from that node
            for k in range(len(rt)):
                count[k] = count[k] + caps[j].count(rt_id[k])
        _log.info("rt_ids={}\n\tcount={}".format(rt_id, count))
        return count

    criterion = lambda count: (x >= min(5, len(rt)) for x in count)
    retry(30, test, criterion, "Storage has not spread as it should")
    #Loop through all runtimes and make sure they can lookup all other runtimes
    for runtime1 in rt:
        for runtime2 in rt:
            node_name = runtime2.attributes['indexed_public']['node_name']
            index = format_index_string(['node_name', node_name])
            retry(10, partial(request_handler.get_index, runtime1, index),
                  lambda _: True, "Failed to get index")
    return True
Esempio n. 21
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     rt1_id = utils.get_node_id(rt1)
     rt2_id = utils.get_node_id(rt2)
     rt3_id = utils.get_node_id(rt3)
     assert rt1_id
     assert rt2_id
     assert rt3_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id
     #caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     #caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     #caps = utils.get_index(rt3, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     assert utils.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     assert utils.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     assert utils.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
     assert utils.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
Esempio n. 22
0
def req_op(node, cb, actor_id=None, component=None):
    """ Lockup all nodes that have registered a node_name """
    node.storage.get_index(format_index_string(("node_name", {})),
                           CalvinCB(_req_op_cb, cb=cb))
Esempio n. 23
0
    def verify_storage(self):
        global rt
        global request_handler
        global storage_verified
        _log.info("storage_verified={}".format(storage_verified))
        if not storage_verified:
            _log.info("Let's verify storage, rt={}".format(rt))
            rt_id = [None] * len(rt)
            failed = True
            # Try 30 times waiting for control API to be up and running
            for i in range(30):
                try:
                    for j in range(len(rt)):
                        rt_id[j] = rt_id[j] or request_handler.get_node_id(
                            rt[j])
                    failed = False
                    break
                except Exception as err:
                    _log.error(
                        "request handler failed getting node_id from runtime, attempt={}, err={}"
                        .format(j, err))
                    time.sleep(0.1)
            assert not failed
            for id in rt_id:
                assert id
            _log.info("RUNTIMES:{}".format(rt_id))
            _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1 * i})
            failed = True
            # Try 10 times waiting for storage to be connected
            for i in range(10):
                _log.info(
                    "-----------------Round {}-----------------".format(i))
                count = [0] * len(rt)
                try:
                    caps = [0] * len(rt)
                    for j in range(len(rt)):
                        caps[j] = request_handler.get_index(
                            rt[j],
                            "node/capabilities/calvinsys.native.python-json"
                        )['result']
                        for k in range(len(rt)):
                            count[k] = count[k] + caps[j].count(rt_id[k])
                    _log.info(
                        "\n\trt_ids={}\n\tcount={}\n\tcaps0={}\n\tcaps1={}\n\tcaps2={}\n\tcaps3={}\n\tcaps4={}\n\tcaps5={}"
                        .format(rt_id, count, caps[0], caps[1], caps[2],
                                caps[3], caps[4], caps[5]))
                    if all(x >= 4 for x in count):
                        failed = False
                        break
                    else:
                        time.sleep(0.2)
                except Exception as err:
                    _log.error(
                        "exception from request_handler.get_index, err={}".
                        format(err))
                    time.sleep(0.1)
            assert not failed

            #Loop through all runtimes and check that they can lookup the nodename of all other runtimes
            try:
                for runtime in rt:
                    for rt_attribute in rt_attributes:
                        node_name = rt_attribute['indexed_public']['node_name']
                        _log.debug("get_index node_name={} from rt={}".format(
                            node_name, runtime))
                        response = request_handler.get_index(
                            runtime,
                            format_index_string(['node_name', node_name]))
                        _log.info("\tresponse={}".format(response))
                        assert (response)
                storage_verified = True
            except Exception as err:
                _log.error("Exception err={}".format(err))
                raise
        else:
            _log.info("Storage has already been verified")
Esempio n. 24
0
def req_op(node, actor_id=None, component=None):
    """ Returns an infinite dynamic iterable """
    index_str = format_index_string(("node_name", {}))
    it = node.storage.get_index_iter(index_str)
    it.set_name("all_match")
    return it
Esempio n. 25
0
def req_op(node, actor_id=None, component=None):
    """ Returns an infinite dynamic iterable """
    index_str = format_index_string(("node_name", {}))
    it = node.storage.get_index_iter(index_str)
    it.set_name("all_match")
    return it
Esempio n. 26
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     global rt1_id
     global rt2_id
     global rt3_id
     rt1_id = None
     rt2_id = None
     rt3_id = None
     failed = True
     # Try 30 times waiting for control API to be up and running
     for i in range(30):
         try:
             rt1_id = rt1_id or request_handler.get_node_id(rt1)
             rt2_id = rt2_id or request_handler.get_node_id(rt2)
             rt3_id = rt3_id or request_handler.get_node_id(rt3)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     assert rt3_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1 * i})
     failed = True
     # Try 30 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     caps3 = []
     rt_ids = set([rt1_id, rt2_id, rt3_id])
     for i in range(300):
         try:
             if not (rt1_id in caps1 and rt2_id in caps1
                     and rt3_id in caps1):
                 caps1 = request_handler.get_index(
                     rt1, "node/capabilities/calvinsys.native.python-json"
                 )['result']
             if not (rt1_id in caps2 and rt2_id in caps2
                     and rt3_id in caps2):
                 caps2 = request_handler.get_index(
                     rt2, "node/capabilities/calvinsys.native.python-json"
                 )['result']
             if not (rt1_id in caps3 and rt2_id in caps3
                     and rt3_id in caps3):
                 caps3 = request_handler.get_index(
                     rt3, "node/capabilities/calvinsys.native.python-json"
                 )['result']
             if rt_ids <= set(caps1) and rt_ids <= set(
                     caps2) and rt_ids <= set(caps3):
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     if failed:
         _log.analyze("TESTRUN", "+ Failed connecting secure DHT", {
             'caps1': caps1,
             'caps2': caps2,
             'caps3': caps3
         })
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1 * i})
     # Now check for the values needed by this specific test
     caps = request_handler.get_index(
         rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = request_handler.get_index(
         rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert request_handler.get_index(
         rt1, format_index_string(['node_name', {
             'name': 'node2'
         }]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert request_handler.get_index(
         rt2, format_index_string(['node_name', {
             'name': 'node1'
         }]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 27
0
def setup_module(module):
    global rt1
    global rt2
    global rt3
    global kill_peers
    ip_addr = None

    try:
        ip_addr = os.environ["CALVIN_TEST_IP"]
        purpose = os.environ["CALVIN_TEST_UUID"]
        _log.debug("Running remote tests")
    except KeyError:
        _log.debug("Running lcoal test")
        pass

    if ip_addr:
        remote_node_count = 2
        kill_peers = False
        test_peers = None

        import socket
        ports=[]
        for a in range(2):
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('', 0))
            addr = s.getsockname()
            ports.append(addr[1])
            s.close()

        rt1,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1]))

        _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0]))

        interval = 0.5
        for retries in range(1,20):
            time.sleep(interval)
            _log.debug("Trying to get test nodes for 'purpose' %s" % purpose)
            test_peers = utils.get_index(rt1, format_index_string({'node_name':
                                                                    {'organization': 'com.ericsson',
                                                                     'purpose': purpose}
                                                                  }))
            if not test_peers is None and not test_peers["result"] is None and \
                    len(test_peers["result"]) == remote_node_count:
                test_peers = test_peers["result"]
                break

        if test_peers is None or len(test_peers) != remote_node_count:
            _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers)
            raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers)

        _log.debug("All remote nodes found!")

        test_peer2_id = test_peers[0]
        test_peer2 = utils.get_node(rt1, test_peer2_id)
        if test_peer2:
            rt2 = utils.RT(test_peer2["control_uri"])
            rt2.id = test_peer2_id
            rt2.uri = test_peer2["uri"]
        test_peer3_id = test_peers[1]
        if test_peer3_id:
            test_peer3 = utils.get_node(rt1, test_peer3_id)
            if test_peer3:
                rt3 = utils.RT(test_peer3["control_uri"])
                rt3.id = test_peer3_id
                rt3.uri = test_peer3["uri"]
    else:
        try:
            ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
        except:
            import socket
            ip_addr = socket.gethostbyname(socket.gethostname())
        rt1,_ = dispatch_node("calvinip://%s:5000" % (ip_addr,), "http://localhost:5003")
        rt2,_ = dispatch_node("calvinip://%s:5001" % (ip_addr,), "http://localhost:5004")
        rt3,_ = dispatch_node("calvinip://%s:5002" % (ip_addr,), "http://localhost:5005")
        time.sleep(.4)
        utils.peer_setup(rt1, ["calvinip://%s:5001" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )])
        utils.peer_setup(rt2, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5002" % (ip_addr, )])
        utils.peer_setup(rt3, ["calvinip://%s:5000" % (ip_addr,), "calvinip://%s:5001" % (ip_addr, )])
        time.sleep(.4)
Esempio n. 28
0
    def verify_storage(self):
        global rt
        global request_handler
        global storage_verified
        _log.info("storage_verified={}".format(storage_verified))
        if not storage_verified:
            _log.info("Let's verify storage, rt={}".format(rt))
            rt_id = [None] * len(rt)
            failed = True
            # Try 30 times waiting for control API to be up and running
            for i in range(30):
                try:
                    for j in range(len(rt)):
                        rt_id[j] = rt_id[j] or request_handler.get_node_id(
                            rt[j])
                    failed = False
                    break
                except Exception as err:
                    _log.error(
                        "request handler failed getting node_id from runtime, attempt={}, err={}"
                        .format(j, err))
                    time.sleep(0.1)
            assert not failed
            for id in rt_id:
                assert id
            _log.info("RUNTIMES:{}".format(rt_id))
            _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1 * i})
            failed = True
            # Try 100 times waiting for storage to be connected
            for i in range(100):
                _log.info(
                    "-----------------Round {}-----------------".format(i))
                count = [0] * len(rt)
                try:
                    caps = [0] * len(rt)
                    for j in range(len(rt)):
                        caps[j] = request_handler.get_index(
                            rt[j],
                            "node/capabilities/calvinsys.native.python-json"
                        )['result']
                        for k in range(len(rt)):
                            count[k] = count[k] + caps[j].count(rt_id[k])
                    _log.info(
                        "\n\trt_ids={}\n\tcount={}\n\tcaps0={}\n\tcaps1={}\n\tcaps2={}\n\tcaps3={}\n\tcaps4={}\n\tcaps5={}"
                        .format(rt_id, count, caps[0], caps[1], caps[2],
                                caps[3], caps[4], caps[5]))
                    if all(x >= 4 for x in count):
                        failed = False
                        break
                    else:
                        time.sleep(0.2)
                except Exception as err:
                    _log.error(
                        "exception from request_handler.get_index, err={}".
                        format(err))
                    time.sleep(0.1)
            assert not failed
            try:
                _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1 * i})
                assert request_handler.get_index(
                    rt[0],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'CA'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT0 INDEX", {})
                assert request_handler.get_index(
                    rt[1],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'testNode1'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT1 INDEX", {})
                assert request_handler.get_index(
                    rt[2],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'testNode2'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT2 INDEX", {})
                assert request_handler.get_index(
                    rt[3],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'testNode3'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT3 INDEX", {})
                assert request_handler.get_index(
                    rt[4],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'testNode4'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT4 INDEX", {})
                assert request_handler.get_index(
                    rt[5],
                    format_index_string([
                        'node_name', {
                            'organization': 'org.testexample',
                            'name': 'testNode5'
                        }
                    ]))
                _log.analyze("TESTRUN", "+ RT5 INDEX", {})

                storage_verified = True
            except Exception as err:
                _log.error("Exception err={}".format(err))
                raise
        else:
            _log.info("Storage has already been verified")
Esempio n. 29
0
 def verify_storage(self):
     global rt1
     global rt2
     rt1_id = None
     rt2_id = None
     failed = True
     # Try 10 times waiting for control API to be up and running
     for i in range(10):
         try:
             rt1_id = rt1_id or utils.get_node_id(rt1)
             rt2_id = rt2_id or utils.get_node_id(rt2)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     print "RUNTIMES:", rt1_id, rt2_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1 * i})
     failed = True
     # Try 20 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     for i in range(20):
         try:
             if len(caps1) != 2:
                 caps1 = utils.get_index(
                     rt1, "node/capabilities/calvinsys.native.python-json"
                 )['result']
             if len(caps2) != 2:
                 caps2 = utils.get_index(
                     rt2, "node/capabilities/calvinsys.native.python-json"
                 )['result']
             if len(caps1) == 2 and len(caps2) == 2:
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1 * i})
     # Now check for the values needed by this specific test
     caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert utils.get_index(
         rt1,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode2'
             }
         ]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert utils.get_index(
         rt2,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode1'
             }
         ]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 30
0
def req_op(node, cb, actor_id=None, component=None):
    """ Lockup all nodes that have registered a node_name """
    node.storage.get_index(format_index_string(("node_name", {})), CalvinCB(_req_op_cb, cb=cb))
Esempio n. 31
0
def setup_module(module):
    global runtime
    global runtimes
    global peerlist
    global kill_peers
    ip_addr = None

    try:
        ip_addr = os.environ["CALVIN_TEST_IP"]
        purpose = os.environ["CALVIN_TEST_UUID"]
    except KeyError:
        pass

    if ip_addr:
        remote_node_count = 2
        kill_peers = False
        test_peers = None


        import socket
        ports=[]
        for a in range(2):
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('', 0))
            addr = s.getsockname()
            ports.append(addr[1])
            s.close()

        runtime,_ = dispatch_node("calvinip://%s:%s" % (ip_addr, ports[0]), "http://%s:%s" % (ip_addr, ports[1]))

        _log.debug("First runtime started, control http://%s:%s, calvinip://%s:%s" % (ip_addr, ports[1], ip_addr, ports[0]))

        interval = 0.5
        for retries in range(1,20):
            time.sleep(interval)
            _log.debug("Trying to get test nodes for 'purpose' %s" % purpose)
            test_peers = utils.get_index(runtime, format_index_string({'node_name':
                                                                         {'organization': 'com.ericsson',
                                                                          'purpose': purpose}
                                                                      }))
            if not test_peers is None and not test_peers["result"] is None and \
                    len(test_peers["result"]) == remote_node_count:
                test_peers = test_peers["result"]
                break

        if test_peers is None or len(test_peers) != remote_node_count:
            _log.debug("Failed to find all remote nodes within time, peers = %s" % test_peers)
            raise Exception("Not all nodes found dont run tests, peers = %s" % test_peers)

        test_peer2_id = test_peers[0]
        test_peer2 = utils.get_node(runtime, test_peer2_id)
        if test_peer2:
            runtime2 = utils.RT(test_peer2["control_uri"])
            runtime2.id = test_peer2_id
            runtime2.uri = test_peer2["uri"]
            runtimes.append(runtime2)
        test_peer3_id = test_peers[1]
        if test_peer3_id:
            test_peer3 = utils.get_node(runtime, test_peer3_id)
            if test_peer3:
                runtime3 = utils.RT(test_peer3["control_uri"])
                runtime3.id = test_peer3_id
                runtime3.uri = test_peer3["uri"]
                runtimes.append(runtime3)
    else:
        try:
            ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
        except:
            import socket
            ip_addr = socket.gethostbyname(socket.gethostname())
        localhost = "calvinip://%s:5000" % (ip_addr,), "http://localhost:5001"
        remotehosts = [("calvinip://%s:%d" % (ip_addr, d), "http://localhost:%d" % (d+1)) for d in range(5002, 5005, 2)]
        # remotehosts = [("calvinip://127.0.0.1:5002", "http://localhost:5003")]

        for host in remotehosts:
            runtimes += [dispatch_node(host[0], host[1])[0]]

        runtime, _ = dispatch_node(localhost[0], localhost[1])

        time.sleep(1)

        # FIXME When storage up and running peersetup not needed, but still useful during testing
        utils.peer_setup(runtime, [i[0] for i in remotehosts])

        time.sleep(0.5)
        """

        # FIXME Does not yet support peerlist
        try:
            self.peerlist = peerlist(
                self.runtime, self.runtime.id, len(remotehosts))

            # Make sure all peers agree on network
            [peerlist(self.runtime, p, len(self.runtimes)) for p in self.peerlist]
        except:
            self.peerlist = []
        """

    peerlist = [rt.control_uri for rt in runtimes]
    print "SETUP DONE ***", peerlist
Esempio n. 32
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     global rt1_id
     global rt2_id
     global rt3_id
     rt1_id = None
     rt2_id = None
     rt3_id = None
     failed = True
     # Try 30 times waiting for control API to be up and running
     for i in range(30):
         try:
             rt1_id = rt1_id or request_handler.get_node_id(rt1)
             rt2_id = rt2_id or request_handler.get_node_id(rt2)
             rt3_id = rt3_id or request_handler.get_node_id(rt3)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     assert rt3_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
     failed = True
     # Try 30 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     caps3 = []
     rt_ids = set([rt1_id, rt2_id, rt3_id])
     for i in range(300):
         try:
             if not (rt1_id in caps1 and rt2_id in caps1 and rt3_id in caps1):
                 caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2):
                 caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3):
                 caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
             if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3):
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     if failed:
         _log.analyze("TESTRUN", "+ Failed connecting secure DHT", {'caps1': caps1, 'caps2': caps2, 'caps3': caps3})
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
     # Now check for the values needed by this specific test
     caps = request_handler.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT1 CAPS", {})
     caps = request_handler.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     assert rt1_id in caps['result']
     _log.analyze("TESTRUN", "+ RT2 CAPS", {})
     assert request_handler.get_index(rt1, format_index_string(['node_name', {'name': 'node2'}]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert request_handler.get_index(rt2, format_index_string(['node_name', {'name': 'node1'}]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
Esempio n. 33
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     rt1_id = utils.get_node_id(rt1)
     rt2_id = utils.get_node_id(rt2)
     rt3_id = utils.get_node_id(rt3)
     assert rt1_id
     assert rt2_id
     assert rt3_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id
     #caps = utils.get_index(rt1, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     #caps = utils.get_index(rt2, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     #caps = utils.get_index(rt3, 'node/capabilities/calvinsys.events.timer')
     #assert set(caps['result']) >= set([rt1_id, rt2_id, rt3_id])
     assert utils.get_index(
         rt2,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode1'
             }
         ]))
     assert utils.get_index(
         rt3,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode1'
             }
         ]))
     assert utils.get_index(
         rt1,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode2'
             }
         ]))
     assert utils.get_index(
         rt3,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode2'
             }
         ]))
     assert utils.get_index(
         rt1,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode3'
             }
         ]))
     assert utils.get_index(
         rt2,
         format_index_string([
             'node_name', {
                 'organization': 'org.testexample',
                 'name': 'testNode3'
             }
         ]))
Esempio n. 34
0
 def verify_storage(self):
     global rt1
     global rt2
     global rt3
     global rt4
     rt1_id = None
     rt2_id = None
     rt3_id = None
     rt4_id = None
     failed = True
     # Try 30 times waiting for control API to be up and running
     for i in range(30):
         try:
             rt1_id = rt1_id or request_handler.get_node_id(rt1)
             rt2_id = rt2_id or request_handler.get_node_id(rt2)
             rt3_id = rt3_id or request_handler.get_node_id(rt3)
             rt4_id = rt4_id or request_handler.get_node_id(rt4)
             failed = False
             break
         except:
             time.sleep(0.1)
     assert not failed
     assert rt1_id
     assert rt2_id
     assert rt3_id
     assert rt4_id
     print "RUNTIMES:", rt1_id, rt2_id, rt3_id, rt4_id
     _log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
     failed = True
     # Try 30 times waiting for storage to be connected
     caps1 = []
     caps2 = []
     caps3 = []
     caps4 = []
     rt_ids = set([rt1_id, rt2_id, rt3_id, rt4_id])
     for i in range(30):
         try:
             if not (rt1_id in caps1 and rt2_id in caps1 and rt3_id in caps1 and rt4_id in caps1):
                 caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2 and rt4_id in caps2):
                 caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3 and rt4_id in caps3):
                 caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
             if not (rt1_id in caps4 and rt2_id in caps4 and rt3_id in caps4 and rt4_id in caps4):
                 caps4 = request_handler.get_index(rt4, "node/capabilities/calvinsys.native.python-json")['result']
             if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3) and rt_ids <= set(caps4):
                 failed = False
                 break
             else:
                 time.sleep(0.1)
         except:
             time.sleep(0.1)
     assert not failed
     _log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
     assert request_handler.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
     _log.analyze("TESTRUN", "+ RT1 INDEX", {})
     assert request_handler.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
     _log.analyze("TESTRUN", "+ RT2 INDEX", {})
     assert request_handler.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
     _log.analyze("TESTRUN", "+ RT3 INDEX", {})
     assert request_handler.get_index(rt4, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode4'}]))
     _log.analyze("TESTRUN", "+ RT4 INDEX", {})