示例#1
0
def run_parkinglot_expt(net, n):
    "Run experiment"

    seconds = args.time

    # Start the bandwidth and cwnd monitors in the background
    monitor = Process(target=monitor_devs_ng,
                      args=('%s/bwm.txt' % args.dir, 1.0))
    monitor.start()
    start_tcpprobe()

    # Get receiver and clients
    recvr = net.getNodeByName('receiver')
    #sender1 = net.getNodeByName('h1')

    # Start the receiver
    port = 5001
    recvr.cmd('iperf -s -p', port, '> %s/iperf_server.txt' % args.dir, '&')

    #waitListening(sender1, recvr, port)

    # TODO: start the sender iperf processes and wait for the flows to finish
    # Hint: Use getNodeByName() to get a handle on each sender.
    # Hint: Use sendCmd() and waitOutput() to start iperf and wait for them to finish
    # Hint: waitOutput waits for the command to finish allowing you to wait on a particular process on the host
    # iperf command to start flow: 'iperf -c %s -p %s -t %d -i 1 -yc > %s/iperf_%s.txt' % (recvr.IP(), 5001, seconds, args.dir, node_name)
    # Hint (not important): You may use progress(t) to track your experiment progress

    ### begin my code

    # get list of all hosts
    h = []  # Python list of hosts
    for i in range(n):
        #print "DEBUG", 'h%s' % (i+1)
        h.append(net.getNodeByName('h%s' % (i + 1)))

    # wait for ports on all iperf clients
    for i in range(n):
        waitListening(h[i], recvr, port)

    # send iperf cmd to all hosts
    for i in range(n):
        node_name = 'h%s' % (i + 1)
        h[i].sendCmd('iperf -c %s -p %s -t %d -i 1 -yc > %s/iperf_%s.txt' %
                     (recvr.IP(), port, seconds, args.dir, node_name))

    # wait for commands to finish
    iperf_results = {}
    progress(seconds)  # show progress while waiting
    for i in range(n):
        iperf_results[h[i].name] = h[i].waitOutput()

    ### end my code

    recvr.cmd('kill %iperf')

    # Shut down monitors
    monitor.terminate()
    stop_tcpprobe()
示例#2
0
def process_message(m, local_message_i, max_run_time, run_dir, aws_region,
                    server_name, log_stream_prefix):
    event = json.loads(m.body)

    # run this in a thread: pywren.wrenhandler.generic_handler(event)
    p = Process(target=job_handler,
                args=(event, local_message_i, run_dir, aws_region, server_name,
                      log_stream_prefix))
    # is thread done
    p.start()
    start_time = time.time()

    response = m.change_visibility(
        VisibilityTimeout=SQS_VISIBILITY_INCREMENT_SEC)

    # add 10s to visibility
    run_time = time.time() - start_time
    last_visibility_update_time = time.time()
    while run_time < max_run_time:
        if (time.time() - last_visibility_update_time) > (
                SQS_VISIBILITY_INCREMENT_SEC * 0.9):
            response = m.change_visibility(
                VisibilityTimeout=SQS_VISIBILITY_INCREMENT_SEC)
            last_visibility_update_time = time.time()
            logger.debug("incrementing visibility timeout by {} sec".format(
                SQS_VISIBILITY_INCREMENT_SEC))
        if p.exitcode is not None:
            logger.debug("attempting to join process")
            # FIXME will this join ever hang?
            p.join()
            break
        else:
            print "sleeping"
            time.sleep(PROCESS_SLEEP_DUR_SEC)

        run_time = time.time() - start_time

    if p.exitcode is None:
        p.terminate()  # PRINT LOTS OF ERRORS HERE

    m.delete()
示例#3
0
## Start weewx engine with options/args and pass in shared data dict
p_weewx = Process(target=weewx.engine.main, args=(options, args, sdata))
p_weewx.start()

## -------------------------------------------------------------
#  Forever loop for data transform, check process health
last_ts = sdata["t"]    # Timestamp
last_clear_time = last_ts
while True:
    time.sleep(0.1)	# Weather data post interval >= 2.5s, so take it easy.
    # Check for new weather data from weewx (will be a string, not dict)
    if sdata["t"] != last_ts and type(sdata["r"]) == str:	# Transform weather data to dict
    	sdata["r"] = format_weather_data(copy.deepcopy(sdata["r"]))
        sdata["r"]["status"] = "Station running..."
        last_ts = sdata["t"]

    # Check processes and kill this script if one dies
    if not p_weewx.is_alive():
        logger.error("start.py: weewx process died: exit code = %s\n" % (p_weewx.exitcode))
        p_flask.terminate()
        sys.exit(1)

    if not p_flask.is_alive():
        logger.error("start.py: flask process died: exit code = %s\n" % (p_flask.exitcode))
        p_weewx.terminate()
        sys.exit(1)

logger.error("start.py: Script exited from run loop unexpectedly")
sys.exit(1)

示例#4
0
文件: do_queue.py 项目: Gamain/PyCode
    print('Process to write: %s' % os.getpid())
    for value in ['A', 'B', 'C','D']:
        print('Put %s to queue...' % value)
        q.put(value)
        l.append(value)
        print l
        time.sleep(random.random())

# 读数据进程执行的代码:
def read(q,l):
    print('Process to read: %s' % os.getpid())
    while True:
        value = q.get(True)
        print('Get %s from queue.' % value)
        print l

if __name__=='__main__':
    # 父进程创建Queue,并传给各个子进程:
    q = Queue()
    l=[]
    pw = Process(target=write, args=(q,l))
    pr = Process(target=read, args=(q,l))
    # 启动子进程pw,写入:
    pw.start()
    # 启动子进程pr,读取:
    pr.start()
    # 等待pw结束:
    pw.join()
    # pr进程里是死循环,无法等待其结束,只能强行终止:
    pr.terminate()
示例#5
0
class _Task(object):
    def __init__(self,
                 name,
                 weight,
                 target,
                 args=None,
                 kwargs=None,
                 daemon=None,
                 force_quit=False):
        self.name = name
        self.weight = 0 - weight
        self._target = target
        self._args = args
        self._kwargs = kwargs
        self.force_quit = force_quit
        self._daemon = True
        self._lock = threading.Lock()
        self._task = None
        self._stop = False
        self._start_new()

    def _start_new(self):
        if self._task is not None and self._task.is_alive():
            logging.info(" %s is alive, no need to start new" % self.name)
            return
        self._task = Process(target=self._target,
                             name=self.name,
                             args=self._args,
                             kwargs=self._kwargs,
                             daemon=self._daemon)
        if isinstance(self._args[0], Args):
            logging.info("delete %s", self._args[0].export_path)
            if gfile.Exists(self._args[0].export_path):
                logging.info(" deleting")
                gfile.DeleteRecursively(self._args[0].export_path)
        self._task.start()
        logging.info("Task starts %s" % self.name)
        time.sleep(10)

    def __gt__(self, o):
        return self.weight > o.weight

    def kill(self, force=False):
        with self._lock:
            logging.info("Kill task %s", self.name)
            if self._task is None or not self._task.is_alive():
                return
            if force or self.force_quit:
                self._task.terminate()
            elif self._task.is_alive():
                raise ValueError("can not kill by force")

    def start(self):
        logging.info("begin to start")
        with self._lock:
            if self._stop:
                return
            if self._task.is_alive():
                logging.info("  %s is alive, no need to start" % self.name)
                return
            self._start_new()
            time.sleep(2)

    def is_alive(self):
        with self._lock:
            if self._task is None:
                return True
            return self._task.is_alive()
示例#6
0
class Job:
    def __init__(self, args):
        self.prefix = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
        self.file = args.file

        self.in_dir = args.in_dir if args.in_dir else args.turkey_home
        self.out_dir = args.out_dir if args.out_dir is not None else \
            os.path.join(args.turkey_home, 'out', self.prefix +
                         '_' + self.file.split('/')[-1].split('.')[0] + '.out')

        os.system('mkdir -p %s' % self.out_dir)

        # Copy over job file to out directory for easy parsing later
        os.system('cp %s %s' % (self.file, self.out_dir))

        with open(self.file, 'r') as f:
            self.tasks = [
                Task(task,
                     out_dir=self.out_dir,
                     in_dir=self.in_dir,
                     turkey_home=args.turkey_home) for task in json.load(f)
            ]

        self.pool_size = args.pool_size
        self.num_cpus = args.num_cpus

        # TODO: might want a better name (i.e., moldable Linux)
        self.intelligent = args.intelligent

    def run(self, stdout=False):
        # Set up up system stat collector
        self.stat_process = Process(target=write_stats,
                                    args=(os.path.join(self.out_dir,
                                                       'stats.csv'), ))
        self.stat_process.start()

        pool_size = min(len(self.tasks), self.pool_size, mp.cpu_count())
        pool = mp.Pool(pool_size)

        args = {}

        # Initialize the number of tasks remaining
        config.num_tasks_remaining.set(len(self.tasks))

        for task in self.tasks:
            # TODO: We should discuss the details of intelligent. For example, in
            # the case where tasks are pinned to fewer than cpu_count number of
            # cores
            if self.intelligent:
                args['nthreads'] = int(self.num_cpus /
                                       config.num_tasks_in_system)

            # Wait before we deliver the next task
            # TODO: Not great that this happens on the main thread
            task.delay()

            apply_args = {
                'args': args,
                'stdout': stdout,
                'wait': True,
                'count': True
            }
            pool.apply_async(task.run, (), apply_args)

        # Normally we'd use os.wait(), but between wanting to wait for the
        # async applies to finish and os.wait() also depending on the stat
        # process, we have a deadlock
        while config.num_tasks_remaining.value > 0:
            time.sleep(5)

        self.stat_process.terminate()
        os.system('stty sane')