Exemplo n.º 1
0
def startParallel(dispQ=dispQ, count=100, num=[]):

    print("Spawning processes!")

    numdevices = num
    if len(num) == 0: numdevices = range(len(devices))
    thread_list = []
    statQ = PQueue()
    movP = Process(target=movProc, args=(count, dispQ, statQ, num))
    movP.start()
    # *****************************************************************
    # Make text Windows for both displays.
    # *****************************************************************
    root = Tk()
    T = Text(root, height=50, width=30)
    T.pack()
    root1 = Toplevel()
    if len(numdevices) > 1:
        T1 = Text(root1, height=50, width=30)
        T1.pack()
    root2 = Toplevel()
    T2 = Text(root2, height=50, width=50)
    T2.pack()

    q = PQueue()
    dtp = Process(target=displayThreadProcess, args=(q, ))
    dtp.start()

    # *****************************************************************
    # Collect cpu stats to display
    # *****************************************************************
    pid = statQ.get()
    dpid = str(getpid())
    print("got pid!", pid)
    statP = Process(target=statProc, args=((pid, dpid), statQ))
    statP.start()
    statT = Thread(target=statThread, args=(statQ, T2))
    statT.start()

    thread_list.append(
        Thread(target=displaythread,
               args=(dispQ, q, T, numdevices[:1], count, False)))
    if len(numdevices) > 1:
        thread_list.append(
            Thread(target=displaythread,
                   args=(dispQ, q, T1, numdevices[1:], count, False)))

    for thread in thread_list:
        thread.start()
    root.mainloop()
    for thread in thread_list:
        thread.join()
Exemplo n.º 2
0
def setupConcurrency(settings, sess, coord, globalEpisodes):
    from queue import Queue
    from multiprocessing import Queue as PQueue

    import Concurrency
    from Worker import Worker
    from Trainer import Trainer

    trainingQueues = []
    trainerThreads = []
    for i in range(settings.trainerThreads):
        queue = Queue(100)
        trainingQueues.append(queue)
        trainerThreads.append(Concurrency.TrainerRunner(coord, queue))

    gameDataQueues = []
    workerThreads = []
    for i in range(settings.workerThreads):
        gameDataQueue = PQueue(100)
        gameDataQueues.append(gameDataQueue)
        workerThreads.append(Concurrency.WorkerRunner(coord, gameDataQueue))

    gameProcesses = []
    for i in range(settings.gameProcesses):
        gameProcesses.append(Concurrency.GameRunner(settings))

    trainers = []
    for i in range(settings.trainers):
        trainer = Trainer(settings, sess, i, coord, globalEpisodes)
        trainers.append(trainer)
        trainerThreads[i % len(trainerThreads)].addTrainer(trainer)

    for i in range(settings.workers):
        playerActionQueue = PQueue(100)

        queues = {
            "trainer": trainingQueues[i % len(trainingQueues)],
            "gameData": gameDataQueue,
            "playerAction": playerActionQueue
        }
        trainer = trainers[i % len(trainers)]
        worker = Worker(settings, sess, i, trainer.number, trainer.localAC,
                        queues, coord)
        workerThreads[i % len(workerThreads)].addWorker(worker)
        gameProcesses[i % len(gameProcesses)].addGame(
            gameDataQueues[i % len(gameDataQueues)], playerActionQueue)

    return trainerThreads, workerThreads, gameProcesses
Exemplo n.º 3
0
def grid_index_parallel(fltfile, parfile, tmp, gridpars, translations):
    """
    fltfile containing peaks
    parfile containing instrument geometry and unit cell
    tmp - base name for scratch files and results
    gridpars : dictionary of control parameters (rings to use, etc)
    translations : list of translation positions to try

    Runs a grid index algorithm using pythons multiprocessing module
    splits workload over processes (blocks of translations to each process)
    This thread should catch results via a queue
    """
    gridpars = initgrid(fltfile, parfile, tmp, gridpars)
    print("Done init")
    if 'NPROC' not in gridpars or gridpars['NPROC'] is None:
        NPR = multiprocessing.cpu_count() - 1
        cImageD11.cimaged11_omp_set_num_threads(
            2)  # assume hyperthreading is useful?
    else:
        NPR = int(gridpars['NPROC'])
    if 'NTHREAD' in gridpars:
        cImageD11.cimaged11_omp_set_num_threads(int(gridpars['NTHREAD']))
    elif NPR > 1:
        cImageD11.cimaged11_omp_set_num_threads(1)
    tsplit = [translations[i::NPR] for i in range(NPR)]
    args = [("%s.flt" % (tmp), parfile, t, gridpars)
            for i, t in enumerate(tsplit)]
    q = PQueue()
    p = Pool(processes=NPR,
             initializer=wrap_test_many_points_init,
             initargs=[q])
    print("Using a pool of", NPR, "processes")
    pa = p.map_async(wrap_test_many_points, args)
    ul = uniq_grain_list(gridpars['SYMMETRY'], gridpars['toldist'],
                         gridpars['tolangle'])
    lastsave = 0

    while True:
        # If we go more than 30 seconds without something, die
        try:
            grs = q.get(True, 10)
            gb4 = len(ul.uniqgrains)
            ul.add(grs)
            gnow = len(ul.uniqgrains)
            print("Got % 5d new %d from %d" % (gnow, gnow - gb4, len(grs)))
            if len(ul.uniqgrains) > lastsave:
                lastsave = len(ul.uniqgrains)
                grain.write_grain_file("all" + tmp + ".map", ul.uniqgrains)
            if pa._number_left == 0:
                break
        except Queue.Empty:
            sys.stderr.write(" Caught queue empty exception\n")
            if pa._number_left == 0:
                break
        except KeyboardInterrupt:
            break
    # write here to be on the safe side ....
    grain.write_grain_file("all" + tmp + ".map", ul.uniqgrains)
    p.close()
    p.join()
Exemplo n.º 4
0
def startAndWaitForZ3Instance() -> Tuple[Process, Pyro4.URI]:
    q = PQueue()  # type: PQueue

    def runDaemon(q: PQueue) -> None:
        import os

        out = "z3_child.%d.out" % os.getpid()
        err = "z3_child.%d.err" % os.getpid()

        error("Redirecting child", os.getpid(), "streams to", out, err)

        sys.stdout.close()
        sys.stderr.close()

        sys.stdout = open(out, "w")
        sys.stderr = open(err, "w")

        daemon = Pyro4.Daemon()
        uri = daemon.register(Z3ServerInstance)
        sys.stderr.write("Notify parent of my uri: " + str(uri) + "\n")
        sys.stderr.flush()
        q.put(uri)
        # Small window for racing
        daemon.requestLoop()

    p = Process(target=runDaemon, args=(q, ))
    p.start()
    uri = q.get()
    return p, uri
Exemplo n.º 5
0
def runNCS():
    global path_to_networks, graph_filename
    #mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
    devHandle = []
    graphHandle = []
    dispQ = []

    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()
    else:
        print("There was found {}".format(len(devices)))

    # Create n Queues based on the number of sticks plugged in
    for devnum in range(len(devices)):
        dispQ.append(PQueue())

    #Load graph
    with open(path_to_networks + graph_filename, mode='rb') as f:
        graphfile = f.read()

    # *****************************************************************
    # Open the device and load the graph into each of the devices
    # *****************************************************************
    for devnum in range(len(devices)):
        print("***********************************************")
        devHandle.append(mvnc.Device(devices[devnum]))
        devHandle[devnum].OpenDevice()

        opt = devHandle[devnum].GetDeviceOption(
            mvnc.DeviceOption.OPTIMISATIONLIST)
        print("Optimisations:")
        print(opt)

        graphHandle.append(devHandle[devnum].AllocateGraph(graphfile))
        graphHandle[devnum].SetGraphOption(mvnc.GraphOption.ITERATIONS, 1)
        iterations = graphHandle[devnum].GetGraphOption(
            mvnc.GraphOption.ITERATIONS)
        print("Iterations:", iterations)

    print("***********************************************")
    print("Loaded Graphs")
    print("***********************************************\n\n\n")
    return devices, graphHandle, dispQ
Exemplo n.º 6
0
    def schedule(self, daemon: bool = False):
        """
        计划
        :return:
        """
        jobs_module = __import__(self._jobs_module)
        module_path = getattr(jobs_module, '__path__')[0]
        module_names = [
            v.replace('.py', '') for v in os.listdir(module_path)
            if 'job.py' in v
        ]
        pprint(module_names)
        job_queue = PQueue(20)
        print("daemon: %d" % daemon)
        for v in module_names:
            try:
                job_module = __import__(self._jobs_module + '.' + v)
                cls_name = underline2hump(v)
                job = getattr(getattr(job_module, v), cls_name)
                job_obj = job(self._logger)
                schedule_obj = job_obj.schedule()
                if schedule_obj is not None:
                    schedule_obj(job_queue.put, v)
            except (ImportError, AttributeError) as e:
                print(e.__repr__())
            except Exception as e:
                print(e.__repr__())
        workers = []
        for i in range(0, self._worker_num):
            workers.append(
                Process(target=__class__._worker_main,
                        daemon=daemon,
                        kwargs={
                            'worker_no': i,
                            'jobs_module_name': self._jobs_module,
                            'queue': job_queue,
                            'logger': self._logger
                        }))
            workers[i].start()

        while 1:
            schedule.run_pending()
            time.sleep(1)
# total number of packets to be generated
num_requests = 20000
#simulation_time = 100
look_ahead_time = 0

# Packet arrival rate
lam = 500000

# Some scalar multiplication of packet arrival rate at varies leaf nodes
arrival_rates = [1, 2, 3, 2, 4]
arrival_rates = [lam * i for i in arrival_rates]

# Not used
server_req_queue = []
for i in range(num_server):
    q = PQueue()
    server_req_queue.append(q)


# Function to know all those packets which are supposed to leave in current iteration in the event based approach.
# Not used though
def count_less_time_stamps(l1, threshold):
    #l2 = [i for i in l1 if i > threshold]
    l2 = []
    ini_len = len(l1)
    for i in l1:
        if i > threshold:
            l2.append(i)
            l1.pop(l1.index(i))

    return (l2, l1, ini_len - len(l1))
Exemplo n.º 8
0
 def __init__(self, config: dict, queue=PQueue()) -> None:
     super().__init__(config, queue)
Exemplo n.º 9
0
def work(data):
    with lock:
        if pool.empty():
            for n in range(10):
                qin, quot, pin, pout = PQueue(), PQueue(), PQueue(), PQueue()
                p = Process(target=process.process,
                            args=(qin, quot, pin, pout))
                p.start()
                Thread(target=connector, args=(pin, pout, data, p)).start()
                pool.put((qin, quot, pin, pout, p))

    if not q in data['subproxy']:
        data['subproxy'].append(q)
    while 1:
        try:
            host, x = q.get()
            if host and x['n'] == 'exec':
                qin, quot, pin, pout, p = pool.get()
                qin.put((x.get('c'), x['v'], x['r'], x['j']))
                v = quot.get()
                pool.put((qin, quot, pin, pout, p))
                data['send'].put((host, {
                    'n': 'executed',
                    'v': v,
                    'r': x['r'],
                    'j': x['j']
                }))
            if x['n'] == 'run':
                if x.get('r'): run_id = x['r']
                else:
                    run_id = str(data['x']['jobs'][x['v']]['last_build_id'])
                    data['x']['jobs'][x['v']]['last_build_id'] += 1
                data['x']['jobs'][x['v']]['history'][run_id] = {}
                data['x']['jobs'][
                    x['v']]['history'][run_id]['status'] = 'sheduled'
                send_history(data, x)
                # v=run(data['x']['jobs'][x['v']]['code'],data,x['vars'],run_id,x['v'])
                qin, quot, pin, pout, p = pool.get()
                data['x']['jobs'][
                    x['v']]['history'][run_id]['status'] = 'running'
                send_history(data, x)
                start_time = datetime.now()
                data['x']['jobs'][x['v']]['history'][run_id]['start'] = str(
                    start_time).split('.')[0]
                qin.put((data['x']['jobs'][x['v']]['code'], x['vars'], run_id,
                         x['v']))
                v = quot.get()
                pool.put((qin, quot, pin, pout, p))
                end_time = datetime.now()
                data['x']['jobs'][x['v']]['history'][run_id]['end'] = str(
                    end_time).split('.')[0]
                data['x']['jobs'][x['v']]['history'][run_id]['delta'] = str(
                    end_time - start_time).split('.')[0]
                data['x']['jobs'][x['v']]['history'][run_id][
                    'status'] = 'failed' if v.get('trace') else 'success'
                data['x']['jobs'][x['v']]['status'] = [
                    'failed' if v.get('trace') else 'success'
                ]
                send_history(data, x)
                # print(job['history'])
                # print(data['x']['jobs'][x['v']]['history'])
        except:
            with open('err.log', 'a') as ff:
                traceback.print_exc()
                traceback.print_exc(file=ff)


# patch(open('exe/test.py',encoding='utf-8').read())
Exemplo n.º 10
0
from multiprocessing import Process
from multiprocessing import Queue as PQueue
from Queue import Queue as TQueue
import time
import multiprocessing
from connection import downLoad
from lxml import etree
import urlPattern
from urlExtractor import Extractor
from urlExtractor import Transmit
from urlExtractor import PTTransmit
from urlExtractor import DataExtractor
from mySQLInstance import initDbConnection

URL_QUEUE = PQueue()
LOG_QUEUE = PQueue()

CONTENT_CRAWLER_NUM = 4
CONTENT_CRAWLER_THREAD_NUM = 4

def urlCrawler(urlQueue, logQueue):
    rooturl =  {
        'lawfirm':  'http://www.legalminer.com/search/lawfirm?t=',
        'lawyer':   'http://www.legalminer.com/search/lawyer?t=',
        'court':    'http://www.legalminer.com/search/court?t=',
        'judge':    'http://www.legalminer.com/search/judge?t=',
        'corporate':'http://www.legalminer.com/search/corporate?t='
    }
    rootNum = {}
    for key, url in rooturl.items():
Exemplo n.º 11
0
# *****************************************************************
# Get a list of devices
# *****************************************************************
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
    print("No devices found")
    quit()
print(devices)
devHandle = []
graphHandle = []
dispQ = []

# Create n Queues based on the number of sticks plugged in
for devnum in range(len(devices)):
    dispQ.append(PQueue())

# *****************************************************************
# Read graph file, mean subtraction file and Categories
# *****************************************************************
#Load graph - this is the converted model from caffe
with open(graph_folder + "/graph", mode="rb") as f:
    graph = f.read()

##RR  #Load the Mean subtraction file from BVLC Caffe area
##RR  ilsvrc_mean = numpy.load(mean_file).mean(1).mean(1) #loading the mean file

#Load preprocessing data
mean = 118
std = 1 / 128
Exemplo n.º 12
0
        elapsed = time.time() - start
        print "MSE %.06f; took %.1f seconds" % (mse, elapsed)
        # Give the worker thread a chance to do stuff.
        time.sleep(0.05)


@app.route('/training_example', methods=['POST'])
def training_example():
    payload = request.json
    for example in payload:
        x = synthgrad.json_to_ndarray(example['x'])
        y = synthgrad.json_to_ndarray(example['y'])

        current_app.q.put((example['i'], x, y))

    return jsonify(ok=True)


if __name__ == '__main__':
    training_queue = PQueue()
    with app.app_context():
        current_app.q = training_queue

    trainer_proc = Process(target=train_forever, args=(training_queue, ))
    trainer_proc.start()
    try:
        app.run('localhost', 5000, debug=False)
    finally:
        training_queue.put(None)
        trainer_proc.join()
Exemplo n.º 13
0
from multiprocessing.pool import ThreadPool as Pool
from multiprocessing import Queue as PQueue
import Queue

my_dict = {
    'url1': 'url2',
    'url3': 'url4',
}

my_q = PQueue()


def test_p(uq):
    q, url = uq[0], uq[1]
    q.put(url, False)


def main():
    global my_dict
    global my_q
    print "Going to process (%d)" % len(my_dict.keys() + my_dict.values())
    p = Pool(processes=8)
    print p.map(test_p, [(my_q, url) for url in my_dict.keys() + my_dict.values()])

    its = []
    while True:

        # If we go more than 30 seconds without something, die
        try:
            print "Waiting for item from queue for up to 5 seconds"
            i = my_q.get(True, 5)
Exemplo n.º 14
0
    def __init__(self):

        self.task_queue = PQueue()
        self.speed_test = pyspeedtest.SpeedTest()
        self.procs = list()