コード例 #1
0
 def tearDown(self):
     for r in self.rt:
         utils.quit(r)
     time.sleep(0.2)
     for p in multiprocessing.active_children():
         p.terminate()
     time.sleep(0.2)
コード例 #2
0
 def teardown(self):
     global rt1
     global rt2
     utils.quit(rt1)
     utils.quit(rt2)
     time.sleep(0.2)
     for p in multiprocessing.active_children():
         p.terminate()
     time.sleep(0.2)
コード例 #3
0
 def teardown(self):
     global rt1
     global rt2
     utils.quit(rt1)
     utils.quit(rt2)
     time.sleep(0.2)
     for p in multiprocessing.active_children():
         p.terminate()
     time.sleep(0.2)
コード例 #4
0
ファイル: test_calvin.py プロジェクト: LlsDimple/calvin-base
def teardown_module(module):
    global runtime
    global runtimes

    for peer in runtimes:
        utils.quit(peer)
        time.sleep(0.2)
    utils.quit(runtime)
    time.sleep(0.2)
    for p in multiprocessing.active_children():
        p.terminate()
        time.sleep(0.2)
コード例 #5
0
def teardown_module(module):
    global runtime
    global runtimes

    for peer in runtimes:
        utils.quit(peer)
        time.sleep(0.2)
    utils.quit(runtime)
    time.sleep(0.2)
    for p in multiprocessing.active_children():
        p.terminate()
        time.sleep(0.2)
コード例 #6
0
 def teardown(self):
     global rt1
     global rt2
     utils.quit(rt1)
     utils.quit(rt2)
     time.sleep(0.2)
     for p in multiprocessing.active_children():
         p.terminate()
     # They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
     os.system("pkill -9 -f -l 'csruntime -n %s -p 5000'" % (ip_addr, ))
     os.system("pkill -9 -f -l 'csruntime -n %s -p 5001'" % (ip_addr, ))
     time.sleep(0.2)
コード例 #7
0
 def teardown(self):
     global rt1
     global rt2
     utils.quit(rt1)
     utils.quit(rt2)
     time.sleep(0.2)
     for p in multiprocessing.active_children():
         p.terminate()
     # They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
     os.system("pkill -9 -f -l 'csruntime -n %s -p 5000'" % (ip_addr,))
     os.system("pkill -9 -f -l 'csruntime -n %s -p 5001'" % (ip_addr,))
     time.sleep(0.2)
コード例 #8
0
ファイル: csruntime.py プロジェクト: PStahl/calvin-base
def dispatch_and_deploy(app_info, wait, uri, control_uri, attr):
    from calvin.utilities import utils
    rt, process = runtime(uri, control_uri, attr, dispatch=True)
    app_id = None
    app_id = deploy(rt, app_info)
    print "Deployed application", app_id

    timeout = wait if wait else None
    if timeout:
        process.join(timeout)
        utils.quit(rt)
        time.sleep(0.1)
    else:
        process.join()
コード例 #9
0
ファイル: csruntime.py プロジェクト: yeshbourne/calvin-base
def dispatch_and_deploy(app_info, wait, uri, control_uri, attr):
    from calvin.utilities import utils
    rt, process = runtime(uri, control_uri, attr, dispatch=True)
    app_id = None
    app_id = deploy(rt, app_info)
    print "Deployed application", app_id

    timeout = wait if wait else None
    if timeout:
        process.join(timeout)
        utils.quit(rt)
        time.sleep(0.1)
    else:
        process.join()
コード例 #10
0
    def testNodeIndexMany(self):
        """ Since storage is eventually consistent, and we don't really know when,
            this test is quite loose on its asserts but shows some warnings when
            inconsistent. It is also extremly slow.
        """
        self.hosts = [("calvinip://%s:%d" % (ip_addr, d), "http://%s:%d" % (ip_addr, d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)]
        self.rt = [dispatch_node(h[0], h[1], attributes={'indexed_public': {'owner':{'personOrGroup': h[2]}}})[0] for h in self.hosts]
        time.sleep(3)
        owner = []
        for i in range(len(self.hosts)):
            res = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': self.hosts[i][2]}}))
            owner.append(res)
            assert(set(res['result']) == set([self.rt[i].id]))

        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set([r.id for r in self.rt]))
        if not set(owners['result']) >= set([r.id for r in self.rt]):
            warn("Not all nodes manage to reach the index %d of %d" % (len(owners['result']), len(self.rt)))
        rt = self.rt[:]
        ids = [r.id for r in rt]
        hosts = self.hosts[:]
        utils.quit(self.rt[10])
        del self.rt[10]
        del self.hosts[10]
        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set(ids))
        if ids[10] in set(owners['result']):
            warn("The removed node is still in the all owners set")

        removed_owner = utils.get_index(self.rt[0], format_index_string({'owner':{'personOrGroup': hosts[10][2]}}))
        assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]]))
        if removed_owner['result']:
            warn("The removed node is still in its own index")

        # Destroy a bunch of the nodes
        for _ in range(7):
            utils.quit(self.rt[10])
            del self.rt[10]
            del self.hosts[10]

        time.sleep(2)
        owners = utils.get_index(self.rt[0], format_index_string({'owner':{}}))
        assert(set(owners['result']) <= set(ids))
        l = len(set(owners['result']))
        if l > (len(ids)-8):
            warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
コード例 #11
0
    def testNodeIndexMany(self):
        """ Since storage is eventually consistent, and we don't really know when,
            this test is quite loose on its asserts but shows some warnings when
            inconsistent. It is also extremly slow.
        """
        self.hosts = [("calvinip://127.0.0.1:%d" % d, "http://localhost:%d" % (d+1), "owner%d" % ((d-5000)/2)) for d in range(5000, 5041, 2)]
        self.rt = [dispatch_node(h[0], h[1], attributes=["node/affiliation/owner/%s" % h[2]]) for h in self.hosts]
        time.sleep(3)
        owner = []
        for i in range(len(self.hosts)):
            res = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % self.hosts[i][2])
            owner.append(res)
            assert(set(res['result']) == set([self.rt[i].id]))

        owners = utils.get_index(self.rt[0], "node/affiliation/owner")
        assert(set(owners['result']) <= set([r.id for r in self.rt]))
        if set(owners['result']) == set([r.id for r in self.rt]):
            warn("Not all nodes manage to reach the index")
        rt = self.rt[:]
        ids = [r.id for r in rt]
        hosts = self.hosts[:]
        utils.quit(self.rt[10])
        del self.rt[10]
        del self.hosts[10]
        owners = utils.get_index(self.rt[0], "node/affiliation/owner")
        assert(set(owners['result']) <= set(ids))
        if ids[10] in set(owners['result']):
            warn("The removed node is still in the all owners set")

        removed_owner = utils.get_index(self.rt[0], "node/affiliation/owner/%s" % hosts[10][2])
        assert(not removed_owner['result'] or set(removed_owner['result']) == set([ids[10]]))
        if removed_owner['result']:
            warn("The removed node is still in its own index")

        # Destroy a bunch of the nodes
        for _ in range(7):
            utils.quit(self.rt[10])
            del self.rt[10]
            del self.hosts[10]

        time.sleep(2)
        owners = utils.get_index(self.rt[0], "node/affiliation/owner")
        assert(set(owners['result']) <= set(ids))
        l = len(set(owners['result']))
        if l > (len(ids)-8):
            warn("Did have %d nodes left even after removal of 8 from %d" % (l, len(ids)))
コード例 #12
0
ファイル: deploy_app.py プロジェクト: LlsDimple/calvin-base
def main():
    args = parse_arguments()

    set_loglevel(args.verbose, args.quiet)

    add_peer = args.peer
    kill_app = args.appid and not add_peer
    start_runtime = args.runtime and not kill_app and not add_peer
    deploy_app = args.deploy and args.file and not kill_app

    app_info = None
    if deploy_app:
        app_info = compile(args.file)
        if not app_info:
            return 1

    uri = "calvinip://%s:%d" % (args.host, args.port)
    control_uri = "http://%s:%d" % (args.host, args.controlport)

    rt = runtime(uri, control_uri, start_runtime)

    if add_peer:
        res = utils.peer_setup(rt, [args.peer])
        print res
        return 0

    if args.appid:
        res = utils.delete_application(rt, args.appid)
        print res['result']
        return 0

    app_id = None
    if deploy_app:
        app_id = deploy(rt, app_info, args.verbose)

    if start_runtime:
        timeout = int(args.wait) if int(args.wait) else None
        select.select([], [], [], timeout)
        utils.quit(rt)
        time.sleep(0.1)
    if app_id:
        print "Deployed application", app_id
コード例 #13
0
ファイル: deploy_app.py プロジェクト: zhangsj0608/calvin-base
def main():
    args = parse_arguments()

    set_loglevel(args.verbose, args.quiet)

    add_peer = args.peer
    kill_app = args.appid and not add_peer
    start_runtime = args.runtime and not kill_app and not add_peer
    deploy_app = args.deploy and args.file and not kill_app

    app_info = None
    if deploy_app:
        app_info = compile(args.file)
        if not app_info:
            return 1

    uri = "calvinip://%s:%d" % (args.host, args.port)
    control_uri = "http://%s:%d" % (args.host, args.controlport)

    rt = runtime(uri, control_uri, start_runtime)

    if add_peer:
        res = utils.peer_setup(rt, [args.peer])
        print res
        return 0

    if args.appid:
        res = utils.delete_application(rt, args.appid)
        print res['result']
        return 0

    app_id = None
    if deploy_app:
        app_id = deploy(rt, app_info, args.verbose)

    if start_runtime:
        timeout = int(args.wait) if int(args.wait) else None
        select.select([], [], [], timeout)
        utils.quit(rt)
        time.sleep(0.1)
    if app_id:
        print "Deployed application", app_id
コード例 #14
0
ファイル: cscontrol.py プロジェクト: yeshbourne/calvin-base
def control_nodes(args):
    from requests.exceptions import ConnectionError
    if args.cmd == 'list':
        return utils.get_nodes(args.node)
    elif args.cmd == 'add':
        return utils.peer_setup(args.node, *args.peerlist)
    elif args.cmd == 'stop':
        try:
            return utils.quit(args.node)
        except ConnectionError as e:
            # If the connection goes down before response that is OK
            return None
コード例 #15
0
ファイル: csruntime.py プロジェクト: carlostrom/calvin-base
def main():
    args = parse_arguments()

    if args.debug:
        import pdb
        pdb.set_trace()

    set_loglevel(args.loglevel)

    deploy_app = args.file

    app_info = None
    if deploy_app:
        app_info = compile(args.file)
        if not app_info:
            return 1

    uri = "calvinip://%s:%d" % (args.host, args.port)
    control_uri = "http://%s:%d" % (args.host, args.controlport)

    attr_list = None
    if args.attr:
        attr_list = args.attr.split(',')

    rt = runtime(uri, control_uri, attr_list)

    app_id = None
    if deploy_app:
        app_id = deploy(rt, app_info, args.loglevel)

    # FIXME: This is a weird construct that is required since python's
    #        MultiProcess will reap all it's children on exit, regardless
    #        of deamon attribute value
    timeout = int(args.wait) if int(args.wait) else None
    select.select([], [], [], timeout)
    utils.quit(rt)
    time.sleep(0.1)

    if app_id:
        print "Deployed application", app_id
コード例 #16
0
def main():
    args = parse_arguments()

    if args.debug:
        import pdb
        pdb.set_trace()

    set_loglevel(args.loglevel)

    deploy_app = args.file

    app_info = None
    if deploy_app:
        app_info = compile(args.file)
        if not app_info:
            return 1

    uri = "calvinip://%s:%d" % (args.host, args.port)
    control_uri = "http://%s:%d" % (args.host, args.controlport)

    attr_list = None
    if args.attr:
        attr_list = args.attr.split(',')

    rt = runtime(uri, control_uri, attr_list)

    app_id = None
    if deploy_app:
        app_id = deploy(rt, app_info, args.loglevel)

    # FIXME: This is a weird construct that is required since python's
    #        MultiProcess will reap all it's children on exit, regardless
    #        of deamon attribute value
    timeout = int(args.wait) if int(args.wait) else None
    select.select([], [], [], timeout)
    utils.quit(rt)
    time.sleep(0.1)

    if app_id:
        print "Deployed application", app_id
コード例 #17
0
ファイル: cscontrol.py プロジェクト: Tim-SHOOSAN/calvin-base
def control_nodes(args):
    from requests.exceptions import ConnectionError

    if args.cmd == "list":
        return utils.get_nodes(args.node)
    elif args.cmd == "add":
        return utils.peer_setup(args.node, *args.peerlist)
    elif args.cmd == "stop":
        try:
            return utils.quit(args.node)
        except ConnectionError as e:
            # If the connection goes down before response that is OK
            return None
コード例 #18
0
ファイル: cscontrol.py プロジェクト: PStahl/calvin-base
def control_nodes(args):
    from requests.exceptions import ConnectionError
    if args.cmd == 'info':
        if not args.peerlist:
            raise Exception("No node id given")
        return utils.get_node(args.node, args.peerlist[0])
    elif args.cmd == 'list':
        return utils.get_nodes(args.node)
    elif args.cmd == 'add':
        return utils.peer_setup(args.node, *args.peerlist)
    elif args.cmd == 'stop':
        try:
            return utils.quit(args.node)
        except ConnectionError:
            # If the connection goes down before response that is OK
            return None
コード例 #19
0
def teardown_module(module):
    global rt1
    global rt2
    global rt3
    utils.quit(rt1)
    utils.quit(rt2)
    utils.quit(rt3)
    time.sleep(0.2)
    for p in multiprocessing.active_children():
        p.terminate()
    time.sleep(0.2)
コード例 #20
0
def teardown_module(module):
    global rt1
    global rt2
    global rt3
    utils.quit(rt1)
    utils.quit(rt2)
    utils.quit(rt3)
    time.sleep(0.4)
    for p in multiprocessing.active_children():
        p.terminate()
    time.sleep(0.4)
コード例 #21
0
def teardown_module(module):
    global rt1
    global rt2
    global rt3
    global kill_peers

    utils.quit(rt1)
    if kill_peers:
        utils.quit(rt2)
        utils.quit(rt3)
    time.sleep(0.4)
    for p in multiprocessing.active_children():
        p.terminate()
    time.sleep(0.4)
コード例 #22
0
ファイル: dist-2.py プロジェクト: Tim-SHOOSAN/calvin-base
node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001",
                       attributes={'indexed_public':
                            {'owner':{'organization': 'org.testexample', 'personOrGroup': 'me'},
                             'node_name': {'organization': 'org.testexample', 'name': 'node-1'}}})
node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003",
                       attributes={'indexed_public':
                            {'owner':{'organization': 'org.testexample', 'personOrGroup': 'me'},
                             'node_name': {'organization': 'org.testexample', 'name': 'node-2'}}})

# send 'new actor' command to node_2
counter_id = utils.new_actor(node_2, 'std.Counter', 'counter')

# send 'new actor' command to node_1
output_id = utils.new_actor(node_1, 'io.StandardOut', 'output')

# inform node_1 about peers
utils.peer_setup(node_1, ["calvinip://localhost:5002"])

# allow network to stabilize
time.sleep(1.0)

# send connect command to node_1
utils.connect(node_1, output_id, 'token', node_2.id, counter_id, 'integer')

# run app for 3 seconds
time.sleep(3.0)

# send quit to nodes
utils.quit(node_1)
utils.quit(node_2)
コード例 #23
0
from calvin.utilities import utils
import time

# create two nodes, named node-1 and node-2, respectively
node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001",
                       attributes=["node/affiliation/owner/me", "node/affiliation/name/node-1"])
node_2 = dispatch_node(uri="calvinip://localhost:5002", control_uri="http://localhost:5003",
                       attributes=["node/affiliation/owner/me", "node/affiliation/name/node-2"])

# send 'new actor' command to node_2
counter_id = utils.new_actor(node_2, 'std.Counter', 'counter')

# send 'new actor' command to node_1
output_id = utils.new_actor(node_1, 'io.StandardOut', 'output')

# inform node_1 about peers
utils.peer_setup(node_1, ["calvinip://localhost:5002"])

# allow network to stabilize
time.sleep(1.0)

# send connect command to node_1
utils.connect(node_1, output_id, 'token', node_2.id, counter_id, 'integer')

# run app for 3 seconds
time.sleep(3.0)

# send quit to nodes
utils.quit(node_1)
utils.quit(node_2)