示例#1
0
def GetDist(sw):
    """
    find the node distance
    """

    taskname = sw.GetName()
    taskindex = int(taskname.split("-")[1])

    sw.cum_timer.cum_start("disk")

    msglist = sw.GetMsgList()
    sw.log.debug("msglist: %s" % msglist)

    with perf.Timer(sw.log, "LoadState-GetDistCpp"):
        ds = LoadState(sw)

    sw.cum_timer.cum_stop("disk")

    # process initialization
    if ds == None:

        # first iteration: input is the start node
        with perf.Timer(sw.log, "InitState-GetDistCpp"):
            ds = InitState(sw, taskindex, msglist)

    else:
        # successive iterations: input are the new nodes
        with perf.Timer(sw.log, "AddNewNodes-GetDistCpp"):
            AddNewNodes(taskindex, sw, ds, msglist)

    with perf.Timer(sw.log, "SaveState-GetDistCpp"):
        SaveState(sw, ds)
示例#2
0
def GetNbr(sw):
    """
    provide graph neighbors
    """

    taskname = sw.GetName()
    # tindex = sw.GetIndex()

    msglist = sw.GetMsgList()
    sw.log.debug("msglist %s" % msglist)

    with perf.Timer(sw.log, "LoadState-GetNbrCpp"):
        AdjLists = LoadState(sw)

    if AdjLists:
        # state is available, process requests for neighbors
        sw.log.debug('[%s] state available, length %d' %
                     (sw.GetName(), AdjLists.Len()))
        for item in msglist:
            name = sw.GetMsgName(item)

            # read the input nodes
            FIn = Snap.TFIn(Snap.TStr(name))
            msg = Snap.TIntV(FIn)

            GetNeighbors(sw, AdjLists, msg)
        return

    # state not found, initialize it with neighbors
    sw.log.debug('[%s] adjlist not found, initializing' % sw.GetName())
    Edges = Snap.TIntIntVV()

    for item in msglist:
        name = sw.GetMsgName(item)

        FIn = Snap.TFIn(Snap.TStr(name))
        Vec = Snap.TIntIntVV(FIn)

        Snap.AddVec64(Edges, Vec)

    # first iteration: input are edges, save the state
    AdjLists = GetEdges(sw, Edges)

    sw.log.debug('[%s] saving adjlist of size %d now' %
                 (sw.GetName(), AdjLists.Len()))

    with perf.Timer(sw.log, "SaveState-GetNbrCpp"):
        SaveState(sw, AdjLists)

    dmsgout = {}
    dmsgout["src"] = sw.GetName()
    dmsgout["cmd"] = "targets"
    dmsgout["body"] = {}
    sw.Send(0, dmsgout, "2")
示例#3
0
def GetNbr(sw):
    """
    provide graph neighbors
    """

    # taskname = sw.GetName()

    msglist = sw.GetMsgList()
    sw.log.debug("msglist %s" % msglist)

    with perf.Timer(sw.log, "LoadState-GetNbrCpp"):
        AdjLists = LoadState(sw)

    if AdjLists:
        # state is available, process requests for neighbors
        for item in msglist:
            name = sw.GetMsgName(item)

            # read the input nodes
            FIn = Snap.TFIn(Snap.TStr(name))
            msg = Snap.TIntV(FIn)

            GetNeighbors(sw, AdjLists, msg)
        return

    # state not found, initialize it with neighbors
    Edges = Snap.TIntV()

    for item in msglist:
        name = sw.GetMsgName(item)

        FIn = Snap.TFIn(Snap.TStr(name))
        Vec = Snap.TIntV(FIn)

        Edges.AddV(Vec)

    # first iteration: input are edges, save the state
    AdjLists = GetEdges(sw, Edges)
    sw.log.debug("state: %d" % AdjLists.Len())

    with perf.Timer(sw.log, "SaveState-GetNbrCpp"):
        SaveState(sw, AdjLists)

    dmsgout = {}
    dmsgout["src"] = sw.GetName()
    dmsgout["cmd"] = "targets"
    dmsgout["body"] = {}
    sw.Send(0, dmsgout, "2")
示例#4
0
    def execute_single_task(task):
        # get the executables
        bunch = get_task_name(task)
        execlist = []
        try:
            execlist = args.config["bunch"][bunch]["exec"].split(",")
        except:
            pass

        timer = perf.Timer(logging)
        timer.start('provision-execlist')
        # provision execlist on disk
        for item in execlist:
            execpath = os.path.join(execdir, item)
            # check if the program exists and its mtime
            mtime = None
            try:
                stat = os.stat(execpath)
                mtime = int(stat.st_mtime)
            except:
                pass
     
            if not mtime or mtime < tnow:
                # if the file does not exist or it is older than current time,
                # contact the head task
     
                content = client.getexec(args.master,item,mtime)
                swc = "None"
                if content:
                    swc = str(len(content))
     
                logging.debug("Host received %s" % (swc))
                if content:
                    if len(content) > 0:
                        logging.debug("Host saving to %s" % (execpath))
                        f = open(execpath,"w")
                        f.write(content)
                        f.close()
       
                    os.utime(execpath,(tnow, tnow))
        timer.stop('provision-execlist')
     
        prog = execlist[0]
        logging.debug("Task %s, exec %s" % (prog, execlist))
        progpath = os.path.join(execdir, prog)
     
        if not os.path.exists(progpath):
            logging.error("task %s not started, program %s not found" % (task, progpath))
            return
     
        taskdir = "snapw.%d/tasks/%s" % (args.pid, task)
        config.mkdir_p(taskdir)
     
        qdir = os.path.join(args.workdir, args.qactname, task)
        tdir = os.path.join(args.workdir, taskdir)
     
        logging.info("starting task %s, prog %s, workdir %s, qdir %s\n" % (task, prog, tdir, qdir))
             
        # get server information
        host = args.server.host
        port = args.server.port
     
        # construct a command line for worker
        cmd = python + " %s -t %s -h %s:%d -q %s" % (
            progpath, task, host, port, qdir)
        logging.info("starting cmd %s" % (cmd))
     
        # start the work process
        p = subprocess.Popen(cmd.split(), cwd=tdir, close_fds=True)
        return p, prog
示例#5
0
def Execute(args):
    logging.info("Execute " + str(args.active) + "")
    tnow = time.time()

    overall_timer = perf.Timer(logging)

    task_name = "overall: "
    if len(args.active) > 0:
        # Get's the task name
        task_name = "overall: %s" % get_task_name(args.active[0])

    overall_timer.start("superstep-%d-%s" % \
            (args.server.superstep_count, task_name))

    if len(args.active) > 0:
        execdir = os.path.join(args.workdir, "snapw.%d/exec" % (args.pid))
        config.mkdir_p(execdir)
     
    def execute_single_task(task):
        # get the executables
        bunch = get_task_name(task)
        execlist = []
        try:
            execlist = args.config["bunch"][bunch]["exec"].split(",")
        except:
            pass

        timer = perf.Timer(logging)
        timer.start('provision-execlist')
        # provision execlist on disk
        for item in execlist:
            execpath = os.path.join(execdir, item)
            # check if the program exists and its mtime
            mtime = None
            try:
                stat = os.stat(execpath)
                mtime = int(stat.st_mtime)
            except:
                pass
     
            if not mtime or mtime < tnow:
                # if the file does not exist or it is older than current time,
                # contact the head task
     
                content = client.getexec(args.master,item,mtime)
                swc = "None"
                if content:
                    swc = str(len(content))
     
                logging.debug("Host received %s" % (swc))
                if content:
                    if len(content) > 0:
                        logging.debug("Host saving to %s" % (execpath))
                        f = open(execpath,"w")
                        f.write(content)
                        f.close()
       
                    os.utime(execpath,(tnow, tnow))
        timer.stop('provision-execlist')
     
        prog = execlist[0]
        logging.debug("Task %s, exec %s" % (prog, execlist))
        progpath = os.path.join(execdir, prog)
     
        if not os.path.exists(progpath):
            logging.error("task %s not started, program %s not found" % (task, progpath))
            return
     
        taskdir = "snapw.%d/tasks/%s" % (args.pid, task)
        config.mkdir_p(taskdir)
     
        qdir = os.path.join(args.workdir, args.qactname, task)
        tdir = os.path.join(args.workdir, taskdir)
     
        logging.info("starting task %s, prog %s, workdir %s, qdir %s\n" % (task, prog, tdir, qdir))
             
        # get server information
        host = args.server.host
        port = args.server.port
     
        # construct a command line for worker
        cmd = python + " %s -t %s -h %s:%d -q %s" % (
            progpath, task, host, port, qdir)
        logging.info("starting cmd %s" % (cmd))
     
        # start the work process
        p = subprocess.Popen(cmd.split(), cwd=tdir, close_fds=True)
        return p, prog
     
    # Dynamically check what the number of processors we have on each host
    # In any error, default to 1.
    max_tasks = 1
    var_par_tasks = int(args.config['par_tasks'])
    if var_par_tasks <= 0:
        try:
            max_tasks = os.sysconf('SC_NPROCESSORS_ONLN')
        except:
            max_tasks = 1
    else:
        max_tasks = var_par_tasks

    # execute the tasks in a parallel fashion by running
    # at most max_tasks processes at any point.
    task_list = args.active[:]
    procs = []
    logging.info("Running %d tasks with %d-way parallelism: %s" % \
            (len(task_list), max_tasks, str(task_list)))

    timer = perf.Timer(logging)
    pcounter = 0
    counter_map = {}
    while True:
        while task_list and len(procs) < max_tasks:
            task = task_list.pop()
            timer.start("prog-%d" % pcounter)
            p, prog = execute_single_task(task)

            timer.update_extra("prog-%d" % pcounter, "step: %d, pid: %d, prog: %s" \
                    % (args.server.superstep_count, p.pid, prog))

            counter_map[p.pid] = pcounter
            pcounter += 1
            procs.append(p)

        for p in procs:
            # wait for the process to complete
            
            pid = p.pid
            logging.debug("polling %d" % pid)
            status = p.poll()
            if status is not None:
                timer.stop("prog-%d" % counter_map[p.pid])
                del counter_map[p.pid]

                logging.debug("finished %d with status %s" % (pid, str(status)))
                # error reporting
                if status <> 0:
                    msg = "Pid %d terminated unexpectedly with status %d" % (pid, status)
                    logging.error(msg)
                    client.error(args.master, args.id, msg)

                procs.remove(p) 
 
        if not procs and not task_list:
            break
        else:
            time.sleep(0.1)

    overall_timer.stop("superstep-%d-%s" % \
            (args.server.superstep_count, task_name))

    # send done to master
    client.done(args.master, args.id)
示例#6
0
    def do_GET(self):
        parsed_path = urlparse.urlparse(self.path)
        message_parts = [
                'CLIENT VALUES:',
                'client_address=%s (%s)' % (self.client_address,
                                            self.address_string()),
                'command=%s' % self.command,
                'path=%s' % self.path,
                'real path=%s' % parsed_path.path,
                'query=%s' % parsed_path.query,
                'request_version=%s' % self.request_version,
                '',
                'SERVER VALUES:',
                'server_type=%s' % "host server",
                'server_version=%s' % self.server_version,
                'sys_version=%s' % self.sys_version,
                'protocol_version=%s' % self.protocol_version,
                '',
                'HEADERS RECEIVED:',
                ]
        for name, value in sorted(self.headers.items()):
            message_parts.append('%s=%s' % (name, value.rstrip()))
        message_parts.append('')
        message = '\r\n'.join(message_parts)

        subpath = self.path.split("/")

        if self.path == "/prepare":

            prepare_timer = perf.Timer(logging)
            self.server.superstep_count += 1
            prepare_timer.start("prepare-%d" % self.server.superstep_count)

            # move qin to qact
            qinname = "snapw.%d/qin" % (self.pid)
            qactname = "snapw.%d/qact" % (self.pid)

            # rename an existing qact
            if os.path.exists(qactname):

                removed = False
                if not self.config['debug']:
                    try:
                        shutil.rmtree(qactname)
                        logging.debug("removed dir %s" % qactname)
                        removed = True
                    except:
                        logging.error("error on removing dir %s" % qactname)

                if not removed:
                    t = time.time()
                    s = time.strftime("%Y%m%d-%H%M%S", time.localtime(t))
                    mus = "%06d" % (t*1000000 - int(t)*1000000)
                    qactnewname = "%s-%s-%s" % (qactname, s, mus)
                    os.rename(qactname, qactnewname)
                    logging.debug("renamed %s to %s" % (qactname, qactnewname))
                    
            # get the number of active tasks, rename existing qin
            numtasks = 0
            if os.path.exists(qinname):
                os.rename(qinname, qactname)
                active = os.listdir(qactname)
                numtasks = len(active)

            # create new qin
            config.mkdir_p(qinname)
    
            logging.info("preparing next step: %s, %s" % \
                    (qinname, qactname))

            # send ready to master
            client.ready(self.master, self.id, numtasks)

            self.send_response(200)
            self.send_header('Content-Length', 0)
            self.end_headers()

            prepare_timer.stop("prepare-%d" % self.server.superstep_count)

            return

        elif self.path == "/quit":
            self.send_response(200)
            self.send_header('Content-Length', 0)
            self.end_headers()
            SYS_STATS = False

            # set the flag to terminate the server
            self.server.running = False
            self.server.self_dummy()
            return

        elif self.path == "/getkv":
            logging.debug("getting kv file")

            self.send_response(200)
            if self.server.superstep_count > 1:
                body = json.dumps(get_kv_file("supervisor"))
                self.send_header('Content-Length', len(body))
                self.end_headers()
                self.wfile.write(body)
            else:
                self.send_header('Content-Length', len("None"))
                self.end_headers()
                self.wfile.write("None")
            return

        elif self.path == "/dummy":
            logging.debug("dummy request")

            self.send_response(200)
            self.send_header('Content-Length', 0)
            self.end_headers()
            return

        elif self.path == "/step":

            logging.info("execute next step")

            self.send_response(200)
            self.send_header('Content-Length', 0)
            self.end_headers()

            # TODO, implement null action,
            #   skip execution if there are no tasks to execute,
            #   qact does not exist

            # get the tasks to execute
            qactname = "snapw.%d/qact" % (self.pid)
            active = []
            if os.path.exists(qactname):
                active = os.listdir(qactname)

            logging.debug("active tasks %s" % (str(active)))

            self.qactname = qactname
            self.active = active # the task list
            # start a thread to execute the work tasks
            t = threading.Thread(target=Execute, args=(self, ))
            t.start()
            return

        elif self.path == "/config":
    
            logging.debug("get configuration")

            body = json.dumps(self.config)
            self.send_response(200)
            self.send_header('Content-Length', len(body))
            self.end_headers()
            self.wfile.write(body)

            return

        elif self.path == "/quit":
    
            logging.info("terminate execution")
            SYS_STATS = False
            self.send_response(200)
            self.send_header('Content-Length', 0)
            self.end_headers()
            sys.exit(0)

        self.send_response(200)
        self.end_headers()
        self.wfile.write(message)
        return
示例#7
0
def AddNewNodes(taskindex, sw, ds, msglist):

    ds["dist"] += 1
    distance = ds["dist"]
    Visited = ds["visit"]

    timer = perf.Timer(sw.log)
    
    # nodes to add are on the input
    NewNodes = Snap.TIntV()

    timer.start("dist-msglist-iter")

    perf.DirSize(sw.log, sw.qin, "GetDist-qin")

    for item in msglist:

        sw.cum_timer.cum_start("disk")

        name = sw.GetMsgName(item)

        # read the input nodes
        FIn = Snap.TFIn(Snap.TStr(name))
        Vec = Snap.TIntV(FIn)

        sw.cum_timer.cum_stop("disk")

        # print "len", Vec.Len()
        # get new nodes, not visited before
        # timer.start("dist-nodes-iter")
        Snap.GetNewNodes1(Vec, Visited, NewNodes, distance)
        # timer.stop("dist-nodes-iter")

    timer.stop("dist-msglist-iter")

    # done, no new nodes
    if NewNodes.Len() <= 0:

        Visited.GetVal(ds["start"]).Val = 0    # reset start node to 0

        timer.start("dist-get-distribution")

        # get distance distribution, assume 1000 is the max
        DistCount = Snap.TIntV(1000)
        Snap.ZeroVec(DistCount)
        Snap.GetDistances(Visited,DistCount)

        l = []
        for i in xrange(0, DistCount.Len()):
            if DistCount.GetVal(i).Val <= 0:
                break
            l.append(DistCount.GetVal(i).Val)

        dmsg = {}
        dmsg["start"] = ds["start"]
        dmsg["dist"] = l

        dmsgout = {}
        dmsgout["src"] = sw.GetName()
        dmsgout["cmd"] = "results"
        dmsgout["body"] = dmsg

        sw.cum_timer.cum_start("network")
        sw.Send(0,dmsgout,"2")
        sw.cum_timer.cum_stop("network")

        sw.log.info("final: %s %s" % (str(ds["start"]), str(distance)))
        sw.log.info("distances: %s" % str(l))

        timer.stop("dist-get-distribution")
        return

    # nodes in each task
    tsize = sw.GetRange()

    timer.start("dist-collect-nodes")

    # collect nodes for the same task
    ntasks = int(sw.GetVar("gen_tasks"))
    Tasks = Snap.TIntIntVV(ntasks)

    # assign nodes to tasks
    Snap.Nodes2Tasks1(NewNodes, Tasks, tsize)

    timer.stop("dist-collect-nodes")

    # for i in range(0,Tasks.Len()):
    #     print "sending task %d, len %d" % (i, Tasks.GetVal(i).Len())

    # send the messages
    timer.start("dist-send-all")
    for i in range(0,Tasks.Len()):
        Vec1 = Tasks.GetVal(i)
        if Vec1.Len() <= 0:
            continue

        # add task# at the end
        Vec1.Add(taskindex)
        sw.cum_timer.cum_start("network")
        sw.Send(i,Vec1,swsnap=True)
        sw.cum_timer.cum_stop("network")
    timer.stop("dist-send-all")
示例#8
0
def Worker(sw):
    sw.cum_timer = perf.Timer(sw.log)
    GetDist(sw)
    sw.cum_timer.cum_print("disk")
    sw.cum_timer.cum_print("network")
示例#9
0
def AddNewNodes(taskindex, sw, ds, msglist):

    # all the nodes were visited already
    if ds["count"] >= ds["range"]:
        return

    distance = -1
    Visited = ds["visit"]

    timer = perf.Timer(sw.log)

    # nodes to add are on the input
    NewNodes = Snap.TIntV()

    timer.start("dist-msglist-iter")

    perf.DirSize(sw.log, sw.qin, "GetDist-qin")

    for item in msglist:

        sw.cum_timer.cum_start("disk")

        name = sw.GetMsgName(item)

        # read the input nodes
        FIn = Snap.TFIn(Snap.TStr(name))
        Vec = Snap.TIntV(FIn)

        sw.cum_timer.cum_stop("disk")

        distance = Vec.Last(
        ).Val + 1  # update current distance for fringe nodes
        Vec.DelLast()

        # subtract the starting index
        Snap.IncVal(Vec, -ds["first"])

        # print "len", Vec.Len()
        # get new nodes, not visited before
        # timer.start("dist-nodes-iter")
        Snap.GetNewNodes1(Vec, Visited, NewNodes, distance)
        # timer.stop("dist-nodes-iter")

    timer.stop("dist-msglist-iter")

    ds["dist"] = distance

    nnodes = ds["range"]
    ds["count"] += NewNodes.Len()


    sw.log.info("distance: %d, new: %d, count: %d, nodes: %d" % \
            (distance, NewNodes.Len(), ds["count"], nnodes))

    # done, no new nodes
    sw.log.debug("testing: %d %d %d" % \
            (ds["count"], nnodes, ds["count"] >= nnodes))

    if ds["count"] >= nnodes:

        sw.log.info("sending finish output")

        if ds["start"] >= 0:
            # reset start node to 0
            Visited.GetVal(ds["start"] - ds["first"]).Val = 0

        # get distance distribution, assume 1000 is the max
        DistCount = Snap.TIntV(1000)
        Snap.ZeroVec(DistCount)
        Snap.GetDistances(Visited, DistCount)

        # get the maximum positive distance
        maxdist = DistCount.Len() - 1
        while (maxdist > 0) and (DistCount.GetVal(maxdist).Val == 0):
            maxdist -= 1

        maxdist += 1

        sw.log.info("maxdist: %d" % maxdist)

        # collect the distances
        l = []
        for i in xrange(maxdist):
            # if DistCount.GetVal(i).Val <= 0: break
            l.append(DistCount.GetVal(i).Val)

        dmsg = {}
        dmsg["start"] = ds["start"]
        dmsg["dist"] = l

        dmsgout = {}
        dmsgout["src"] = sw.GetName()
        dmsgout["cmd"] = "results"
        dmsgout["body"] = dmsg

        sw.cum_timer.cum_start("network")
        sw.Send(0, dmsgout, "2")
        sw.cum_timer.cum_stop("network")

        sw.log.info("final: %s %s" % (str(ds["start"]), str(distance)))
        sw.log.info("distances: %s" % str(l))

    # nodes in each task
    tsize = sw.GetRange()

    timer.start("dist-collect-nodes")

    # collect nodes for the same task
    ntasks = int(sw.GetVar("gen_tasks"))
    Tasks = Snap.TIntIntVV(ntasks)

    Snap.IncVal(NewNodes, ds["first"])

    # assign nodes to tasks
    Snap.Nodes2Tasks1(NewNodes, Tasks, tsize)

    timer.stop("dist-collect-nodes")

    # send the messages
    timer.start("dist-send-all")
    for i in range(0, Tasks.Len()):
        Vec1 = Tasks.GetVal(i)
        if Vec1.Len() <= 0:
            continue

        # add task# at the end
        Vec1.Add(distance)
        sw.cum_timer.cum_start("network")
        sw.Send(i, Vec1, swsnap=True)
        sw.cum_timer.cum_stop("network")
    timer.stop("dist-send-all")
示例#10
0
def AddNewNodes(taskindex, sw, ds, msglist):

    ds["dist"] += 1
    distance = ds["dist"]
    Visited = ds["visit"]
    # print "Visited", type(Visited)

    timer = perf.Timer(sw.log)
    
    # nodes to add are on the input
    NewNodes = Snap.TIntH() 

    timer.start("dist-msglist-iter")

    perf.DirSize(sw.log, sw.qin, "GetDist-qin")

    for item in msglist:

        sw.cum_timer.cum_start("disk")
        name = sw.GetMsgName(item)

        # print "input", name
        # read the input nodes
        FIn = Snap.TFIn(Snap.TStr(name))
        Vec = Snap.TIntV(FIn)
        sw.cum_timer.cum_stop("disk")

        # print "len", Vec.Len()
        # get new nodes, not visited before
        timer.start("dist-nodes-iter")
        Snap.GetNewNodes(Vec, Visited, NewNodes, distance)
        timer.stop("dist-nodes-iter")

    timer.stop("dist-msglist-iter")

    # done, no new nodes
    if NewNodes.Len() <= 0:
        timer.start("dist-get-distribution")
        # get distance distribution
        dcount = {}
        # TODO move this loop to SNAP C++
        VIter = Visited.BegI()
        while not VIter.IsEnd():
            # snode = VIter.GetKey().Val
            distance = VIter.GetDat().Val
            if not dcount.has_key(distance):
                dcount[distance] = 0
            dcount[distance] += 1

            VIter.Next()

        nnodes = int(sw.GetVar("nodes"))
        l = []
        for i in xrange(0, nnodes):
            if not dcount.has_key(i):
                break
            l.append(dcount[i])

        dmsg = {}
        dmsg["start"] = ds["start"]
        dmsg["dist"] = l

        dmsgout = {}
        dmsgout["src"] = sw.GetName()
        dmsgout["cmd"] = "results"
        dmsgout["body"] = dmsg

        sw.cum_timer.cum_start("network")
        sw.Send(0,dmsgout,"2")
        sw.cum_timer.cum_stop("network")

        sw.log.info("final %s %s" % (str(ds["start"]), str(distance)))
        sw.log.info("distances %s" % str(l))

        timer.stop("dist-get-distribution")
        return

    # nodes in each task
    tsize = sw.GetRange()

    timer.start("dist-collect-nodes")

    # collect nodes for the same task
    ntasks = int(sw.GetVar("gen_tasks"))
    Tasks = Snap.TIntIntVV(ntasks)

    # assign nodes to tasks
    Snap.Nodes2Tasks(NewNodes, Tasks, tsize)

    timer.stop("dist-collect-nodes")

    # for i in range(0,Tasks.Len()):
    #     print "sending task %d, len %d" % (i, Tasks.GetVal(i).Len())

    # send the messages
    timer.start("dist-send-all")
    for i in range(0,Tasks.Len()):
        Vec1 = Tasks.GetVal(i)
        if Vec1.Len() <= 0:
            continue

        # add task# at the end
        Vec1.Add(taskindex)
        sw.cum_timer.cum_start("network")
        sw.Send(i,Vec1,swsnap=True)
        sw.cum_timer.cum_stop("network")
    timer.stop("dist-send-all")
示例#11
0
def AddNewNodes(taskindex, sw, ds, msglist):

    # all the nodes were visited already
    if ds["count"] >= ds["range"]:
        return

    distance = -1  # this should get overwritten if we have messages.
    # logger gives a warning if that doesn't happen

    Visited = ds["visit"]

    seg_bits = int(sw.GetVar('seg_bits'))
    this_segment_start = Snap.zeroLowOrderBits(ds['first'], seg_bits)
    sw.log.debug('this task starts at node %d' % ds['first'])
    sw.log.debug('this segment starts at node %d' % this_segment_start)

    timer = perf.Timer(sw.log)

    # nodes to add are on the input
    NewNodes = Snap.TIntV(
    )  # TODO (smacke): I think this is fine non-segmented

    timer.start("dist-msglist-iter")

    perf.DirSize(sw.log, sw.qin, "GetDist-qin")

    for item in msglist:

        sw.cum_timer.cum_start("disk")

        name = sw.GetMsgName(item)

        # read the input nodes
        FIn = Snap.TFIn(Snap.TStr(name))
        FringeSubset = Snap.TIntV(FIn)

        sw.cum_timer.cum_stop("disk")

        # it's okay to reassign and then use this later outside of the loop
        # since BSP should guarantee that this is the same at every loop iteration
        distance = FringeSubset.Last(
        ).Val + 1  # last value has prev distance, so we inc by 1
        FringeSubset.DelLast()

        # subtract the starting index
        sw.log.debug('[%s] offsetting by first node id' % sw.GetName())
        Snap.IncVal(FringeSubset, -(ds["first"] - this_segment_start))

        # get new nodes, not visited before
        # timer.start("dist-nodes-iter")

        # NewNodes will each have the segmented bits zero'd out, as well as
        # the high-order bits for this task
        sw.log.debug('[%s] calling GetNewNodes1' % sw.GetName())
        Snap.GetNewNodes1(FringeSubset, Visited, NewNodes, distance)
        # timer.stop("dist-nodes-iter")

    timer.stop("dist-msglist-iter")

    ds["dist"] = distance
    # This should never be -1 after processing message

    if distance == -1:
        sw.log.warn("[%s] thinks that the current distance is -1, \
        this almost certainly means that things are broken!" % sw.GetName())

    nnodes = ds["range"]
    ds["count"] += NewNodes.Len()  # no. of new things we visited


    sw.log.info("distance: %d, new: %d, count: %d, nodes: %d" % \
            (distance, NewNodes.Len(), ds["count"], nnodes))

    sw.log.debug("testing: %d %d %d" % \
            (ds["count"], nnodes, ds["count"] >= nnodes))

    # done, no new nodes
    if ds["count"] >= nnodes:

        sw.log.debug("sending finish output")

        if ds["source"] >= 0:
            # reset start node to 0
            Visited.GetVal(ds["source"] -
                           ds["first"]).Val = 0  # SMACKE: should be ok

        # get distance distribution, assume 1000 is the max
        DistCount = Snap.TIntV(1000)
        Snap.ZeroVec(DistCount)
        Snap.GetDistances(Visited, DistCount)

        # get the maximum positive distance
        maxdist = DistCount.Len() - 1
        while (maxdist > 0) and (DistCount.GetVal(maxdist).Val == 0):
            maxdist -= 1

        maxdist += 1

        sw.log.debug("maxdist: %d" % maxdist)

        # collect the distances
        l = []
        for i in xrange(maxdist):
            # if DistCount.GetVal(i).Val <= 0: break
            l.append(DistCount.GetVal(i).Val)

        dmsg = {}
        dmsg["start"] = ds["source"]
        dmsg["dist"] = l

        dmsgout = {}
        dmsgout["src"] = sw.GetName()
        dmsgout["cmd"] = "results"
        dmsgout["body"] = dmsg

        sw.cum_timer.cum_start("network")
        sw.Send(0, dmsgout, "2")
        sw.cum_timer.cum_stop("network")

        sw.log.debug("final: %s %s" % (str(ds["source"]), str(distance)))
        sw.log.debug("distances: %s" % str(l))
        return

    # nodes in each task
    tsize = sw.GetRange()

    timer.start("dist-collect-nodes")

    # collect nodes for the same task
    #ntasks = int(sw.GetVar("gen_tasks"))
    ntasks = (1 << seg_bits
              ) / tsize  # reduce number to only tasks within same segment
    Tasks = Snap.TIntIntVV(ntasks)

    # we will only ever send to tasks in the same segment, but # TODO (smacke) not wasting space anymore
    # the wasted space shouldn't hurt that much

    sw.log.debug('[%s] increase nodes to within-segment values now' %
                 sw.GetName())
    Snap.IncVal(NewNodes, ds["first"] - this_segment_start)

    # assign nodes to tasks
    sw.log.debug('[%s] calling Nodes2Tasks1' % sw.GetName())
    Snap.Nodes2Tasks1(NewNodes, Tasks, tsize)
    # All of the GetNbr tasks are in the same segment as this task
    # so this should still work; we just have to find the base task for
    # this segment and add it to all of the task indexes in Tasks

    timer.stop("dist-collect-nodes")

    # send the messages
    timer.start("dist-send-all")
    for i in xrange(Tasks.Len()):
        Vec1 = Tasks.GetVal(i)
        sw.log.debug('Vec1 length: %d' % Vec1.Len())
        if Vec1.Len() <= 0:
            continue

        # add task# at the end # TODO (smacke): I still don't understand this terminology
        Vec1.Add(distance)
        sw.cum_timer.cum_start("network")
        # we need to send to the correct segment, from which our
        # tasks are offset
        sw.Send(i + this_segment_start / tsize, Vec1, swsnap=True)
        sw.cum_timer.cum_stop("network")

    timer.stop("dist-send-all")
示例#12
0
    server.done = set()
    server.done_lock = threading.Lock()
    # set of hosts ready for the next step
    server.ready = set()
    server.ready_lock = threading.Lock()
    # indicator, if an application has started yet
    server.start = False
    # indicator that the head service is running
    server.running = True
    # indicator that an application is running
    server.executing = False
    # indicator that an application should execute the next step
    server.iterate = False
    # One lock to rule em all
    server.global_lock = threading.Lock()
    # Number of snapshots mades
    server.snapshot_enabled = dconf["snapshot"]
    server.snapshot_counter = 0
    server.superstep_count = 0
    # Timer
    server.timer = perf.Timer(logging, thread_safe=True)

    handler = BaseHTTPServer.BaseHTTPRequestHandler
    handler.config = dconf

    dconf["tasks"] = config.assign(dconf)

    logging.info('Starting head server on port %d, use <Ctrl-C> to stop' %
                 (port))
    server.execute()