Example #1
2
def checkmultiprocess(ipqueue,cacheResult):
    if ipqueue.qsize() == 0:
        return
    processlist = []
    "如果ip数小于512,只使用一个子进程,否则则使用指定进程数,每个进程处理平均值的数量ip"
    max_threads = g_maxthreads
    maxprocess = g_useprocess
    if ipqueue.qsize() < g_maxthreads:
        max_threads = ipqueue.qsize()
        maxprocess = 1
    else:
        max_threads = (ipqueue.qsize() + g_useprocess) / g_useprocess
        if max_threads > g_maxthreads:
            max_threads = g_maxthreads
    #multiprocessing.log_to_stderr(logging.DEBUG)
    for i in xrange(0,maxprocess):
        p = Process(target=callsingleprocess,args=(ipqueue,cacheResult,max_threads))
        p.daemon = True
        processlist.append(p)
        p.start()
    
    try:
        for p in processlist:
            p.join()
    except KeyboardInterrupt:
        PRINT("need wait all process end...")
        for p in processlist:
            if p.is_alive():
                p.terminate()  
Example #2
0
    def run(self):
        parent_pipe, child_pipe = Pipe(False)
        p = Process(target = run_metaheuristic,
                    args = (child_pipe, self.model, self.pt, self.aa,
                            self.algo, self.n, self.use_heur,
                            self.worst, self.best))
        p.start()

        for i in range(self.n + 1):
            if self.is_stopped() is True:
                parent_pipe.close()
                p.terminate()
                return

            try:
                result = parent_pipe.recv()
            except:
                break

            self.results.append(result[0])
            self.fitness.append(result[1])
            self.emit(QtCore.SIGNAL('update(int)'), i)

            if result[1] == 1:
                break

        parent_pipe.close()
        p.join()
Example #3
0
def load_link(browser, link):
    ''' Return true if load successful, false otherwise. '''

    while True:

        p = Process(target=browser_get, args=(browser, link))
        p.start()
        p.join(LOAD_TIME)
        if p.is_alive():
            p.terminate()
        else:
            break

    while True:

        wait_time = READY_TIME
        start_time = time.time()

        ''' Wait for page to have completely loaded. '''
        while True:
            state = browser.execute_script('return document.readyState;')
            if state == 'complete':
                return True
            if time.time() - start_time > wait_time:
                logging.info("Document %s not ready after %ds", link, wait_time)
                break
            time.sleep(1)

        wait_time = wait_time * READY_RATIO
        if wait_time > MAX_READY_TIME * READY_RATIO:
            logging.error("Skipping document %s.  Was never ready.", link)
            return False
        else:
            logging.info("Increasing wait time to %ds", wait_time)
Example #4
0
def nct_tagging(index_name, host, port_no, process_ids,
                stopwords, umls, pos, nprocs=1):

    # open the clinical trail ids file to process
    nct_ids = []
    for line in open(process_ids, 'rb'):
        nct_ids.append(line.strip())

    # Check if index exists
    index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
    index.add_field('ec_tags_umls', term_vector=True)

    # Get clinical
    # process each clinical trial and store to XML file
    log.info('processing clinical trials')
    procs = []
    chunksize = int(math.ceil(len(nct_ids) / float(nprocs)))
    for i in xrange(nprocs):
        p = Process(target=_worker, args=(nct_ids[chunksize * i:chunksize * (i + 1)],
                                          index_name, host, port_no,
                                          stopwords, umls, pos, (i + 1)))
        procs.append(p)
        p.start()

    for p in procs:
        p.join()
Example #5
0
def _get_output_shape(model_fn):
    if K.backend() == 'cntk':
        # Create model in a subprocess so that
        # the memory consumed by InceptionResNetV2 will be
        # released back to the system after this test
        # (to deal with OOM error on CNTK backend).
        # TODO: remove the use of multiprocessing from these tests
        # once a memory clearing mechanism
        # is implemented in the CNTK backend.
        def target(queue):
            model = model_fn()
            queue.put(model.output_shape)
        queue = Queue()
        p = Process(target=target, args=(queue,))
        p.start()
        p.join()
        # The error in a subprocess won't propagate
        # to the main process, so we check if the model
        # is successfully created by checking if the output shape
        # has been put into the queue
        assert not queue.empty(), 'Model creation failed.'
        return queue.get_nowait()
    else:
        model = model_fn()
        return model.output_shape
def run_stock_parser():
    symbol_q = Queue()
    price_q = Queue()

    stock_symbols = []
    with open('symbols.txt', 'r') as symfile:
        for n, line in enumerate(symfile):
            sym = line.strip()
            if sym:
                stock_symbols.append(sym)

    ncpu = len([x for x in open('/proc/cpuinfo').read().split('\n')\
                if x.find('processor') == 0])

    pool = [Process(target=read_stock_worker, args=(symbol_q, price_q, )) for _ in range(ncpu * 4)]

    for p in pool:
        p.start()
    output = Process(target=write_output_file, args=(price_q, ))
    output.start()

    for symbol in stock_symbols:
        symbol_q.put(symbol)
    symbol_q.put(_sentinel)
    for p in pool:
        p.join()
    price_q.put(_sentinel)
    output.join()
Example #7
0
def webgui(args):
    os.environ["FWDB_CONFIG"] = json.dumps(get_lp(args).to_dict())
    from fireworks.flask_site.app import app
    if args.wflowquery:
        app.BASE_Q_WF = json.loads(args.wflowquery)
    if args.fwquery:
        app.BASE_Q = json.loads(args.fwquery)
        if "state" in app.BASE_Q:
            app.BASE_Q_WF["state"] = app.BASE_Q["state"]

    if not args.server_mode:
        from multiprocessing import Process
        p1 = Process(
            target=app.run,
            kwargs={"host": args.host, "port": args.port, "debug": args.debug})
        p1.start()
        import webbrowser
        time.sleep(2)
        webbrowser.open("http://{}:{}".format(args.host, args.port))
        p1.join()
    else:
        from fireworks.flask_site.app import bootstrap_app
        try:
            from fireworks.flask_site.gunicorn import (
                StandaloneApplication, number_of_workers)
        except ImportError:
            import sys
            sys.exit("Gunicorn is required for server mode. "
                     "Install using `pip install gunicorn`.")
        options = {
            'bind': '%s:%s' % (args.host, args.port),
            'workers': number_of_workers(),
        }
        StandaloneApplication(bootstrap_app, options).run()
Example #8
0
def start_schedulers(options):
    apps = [app.strip() for app in options.scheduler.split(',')]
    try:
        from multiprocessing import Process
    except:
        sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
        return
    processes = []
    code = "from gluon import current;current._scheduler.loop()"
    for app in apps:
        if not check_existent_app(options, app):
            print "Application '%s' doesn't exist, skipping" % (app)
            continue
        print 'starting scheduler for "%s"...' % app
        args = (app,True,True,None,False,code)
        logging.getLogger().setLevel(options.debuglevel)
        p = Process(target=run, args=args)
        processes.append(p)
        print "Currently running %s scheduler processes" % (len(processes))
        p.start()
        print "Processes started"
    for p in processes:
        try:
            p.join()
        except (KeyboardInterrupt, SystemExit):
            print "Processes stopped"
        except:
            p.terminate()
            p.join()
Example #9
0
class ArtBox(object):
    def __init__(self, width, height):
        self._pen_comms = Pipe()
        self._paper_comms = Pipe()
        self._pen_ear, self._pen_mouth = Pipe()
        self._paper_ear, self._paper_mouth = Pipe()
        self._pen = pen.Pen()
        self._paper = paper.Paper(width=width, height=height)
        self._proc = Process(target=self._pen, args=(self._pen_comms, self._paper_comms))
        self._proc.daemon = True

    def add_resource_folder(self, folder_name):
        pyglet.resource.path.append(folder_name)
        pyglet.resource.reindex()

    def precache(self, asset_dict):
        for key in asset_dict:
            attributes = asset_dict[key]
            if len(attributes) == 1:
                self._paper._handle_command(Nibs.Cache(key, attributes[0]))
            elif len(attributes) == 2:
                self._paper._handle_command(Nibs.Cache(key, attributes[0], attributes[1]))

    def open(self):
        self._proc.start()
        self._paper.unfurl(self._pen_comms, self._paper_comms)
        self._proc.join(1)
        if self._proc.exitcode is None:
            self._proc.terminate()
Example #10
0
def apply_update(fname, status):
    # As soon as python-apt closes its opened files on object deletion
    # we can drop this fork workaround. As long as they keep their files
    # open, we run the code in an own fork, than the files are closed on
    # process termination an we can remount the filesystem readonly
    # without errors.
    p = Process(target=_apply_update, args=(fname, status))
    with rw_access("/", status):
        try:
            t_ver = get_target_version(fname)
        except BaseException:
            status.log('Reading xml-file failed!')
            return

        try:
            c_ver = get_current_version()
        except IOError as e:
            status.log('get current version failed: ' + str(e))
            c_ver = ""

        pre_sh(c_ver, t_ver, status)
        p.start()
        p.join()
        status.log("cleanup /var/cache/apt/archives")
        # don't use execute() here, it results in an error that the apt-cache
        # is locked. We currently don't understand this behaviour :(
        os.system("apt-get clean")
        if p.exitcode != 0:
            raise Exception(
                "Applying update failed. See logfile for more information")
        post_sh(c_ver, t_ver, status)
Example #11
0
def work_stop(local_ses,new_rcp_point):
	proto_all=logic.all_stops(local_ses)
	all_stops=api.use_api(new_rcp_point,proto_all)
	maps = pokemon_pb2.maps()
	maps.ParseFromString(all_stops)
	data_list=location.get_near(maps)
	data_list = sorted(data_list, key = lambda x: x[1])
	if len(data_list)>0:
		print '[+] found: %s Pokestops near'%(len(data_list))
		if local_ses is not None and data_list is not None:
			print '[+] starting show'
			if multi:
				a,b=split_list(data_list)
				p = Process(target=work_half_list, args=(a,local_ses.ses,new_rcp_point))
				o = Process(target=work_half_list, args=(a,local_ses.ses,new_rcp_point))
				p.start()
				o.start()
				p.join()
				o.join()
				print '[!] farming done..'
			else:
				for t in data_list:
					if config.debug:
						print '[!] farming pokestop..'
					work_with_stops(t,local_ses.ses,new_rcp_point)
	else:
		walk_random()
		work_stop(local_ses,new_rcp_point)
Example #12
0
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs['_conn'] = conn2
            th = Process(target=fn, args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                print 'TERMINATING DUE TO TIMEOUT'
                th.terminate()
                th.join()
                fn_rval = 'return', {
                    'status': hyperopt.STATUS_FAIL,
                    'failure': 'TimeOut'
                }

            assert fn_rval[0] in ('raise', 'return')
            if fn_rval[0] == 'raise':
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]['status'] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get('loss'))
                fn_preprocs = fn_rval[1].pop('preprocs')
                fn_classif = fn_rval[1].pop('classifier')
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
            return fn_rval[1]
Example #13
0
def main():
    # create session (listener)
    session = BaseXClient.Session('localhost', 1984, 'admin', 'admin')

    # create event
    session.execute("create event MY_EVENT");

    try:
        # register event watcher
        session.watch("MY_EVENT", dump_my_event)

        # fork child
        chp = Process(target=child, args=())
        chp.start()
        chp.join()

        # unregister event watcher
        session.unwatch("MY_EVENT")

    finally:
        # drop event
        session.execute("drop event MY_EVENT");

    # close session
    session.close()
Example #14
0
def main():
    """
    Creates instances of the above methods and occassionally checks for crashed
    worker processes & relaunches.
    """
    worker_process = list()
    get_update_process = Process(target=get_updates)
    get_update_process.start()
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = ThreadProcess(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = ThreadProcess(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()
Example #15
0
    def test():

        queue = Queue()

        proc = Process(target=doNothing, args=(queue, ))
        proc.start()

        _logger.info("Started dummy process with PID %d", proc.pid)
        startCodeCheckerServerAttachedToPid(proc.pid)
        time.sleep(3)
        _logger.info("Allowing the dummy process to finish")
        queue.put(1)
        proc.join()

        if utils.isProcessRunning(proc.pid):
            _logger.warning("Dummy process %d was still running", proc.pid)
            proc.terminate()
            time.sleep(1)
            it.assertFalse(utils.isProcessRunning(proc.pid),
                           "Process %d is still running after terminating "
                           "it!" % proc.pid)

        time.sleep(1)
        _logger.info("Server should have died by now")

        with it.assertRaises(requests.ConnectionError):
            requests.post(it._url + '/get_diagnose_info')
Example #16
0
class emailSubsystem(object):
    def __init__(self):
        ### will move to Celery eventually; with Celery, the app would be able to periodically
        # wakeup and check on replyQueue to see which emails were send, which were not and
        # what to do ...

        self.emailQueue = JoinableQueue()
        self.replyQueue = JoinableQueue()

        self.worker = Process(target=sendEmailWorker, args=(self.emailQueue, self.replyQueue))

    def start(self):
        # temporarily comment out starting a new process as it seems to leave zombies
        # and causes app not to start as max process limit is reached.
        #self.worker.start()
        return

    def shutdown(self):
        # post poison pill
        # wait on the queue to be done; ie join on emailQueue
        # wait on the worker process to die; ie join on worker

        self.emailQueue.put(None)
        self.emailQueue.join()
        self.worker.join()
Example #17
0
def submission():
	from CRABClient.UserUtilities import config
	config = config()
	config.General.workArea = '/nfs/dust/cms/user/%s/crab_kappa_skim-%s'%(getUsernameFromSiteDB(), date)
	check_path(config.General.workArea)
	config.General.transferOutputs = True
	config.General.transferLogs = True
	config.User.voGroup = 'dcms'
	
	config.JobType.pluginName = 'Analysis'
	config.JobType.psetName = 'kSkimming_run2_cfg.py'
	#config.JobType.inputFiles = ['Summer15_V5_MC.db']
	config.JobType.allowUndistributedCMSSW = True
	config.Site.blacklist = ["T2_BR_SPRACE"]
	config.Data.inputDBS = 'global'
	config.Data.splitting = 'FileBased'
	config.Data.unitsPerJob = 1
	config.Data.outLFNDirBase = '/store/user/%s/higgs-kit/skimming/%s'%(getUsernameFromSiteDB(), date)
	config.Data.publication = False
	
	config.Site.storageSite = "T2_DE_DESY"
	# load nicknames form gc-style config files and write them to a flat nicknames list
	nicknames = read_grid_control_includes(["samples/13TeV/Fall15_SM_Analysis.conf"])
	#nicknames = ['SUSYGluGluToHToTauTauM160_RunIIFall15MiniAODv2_76X_13TeV_MINIAOD_pythia8']

	# loop over datasets and get repsective nicks
	for nickname in nicknames:
		config.General.requestName = nickname
		config.JobType.pyCfgParams = ['globalTag=76X_dataRun2_16Dec2015_v0' if isData(nickname) else 'globalTag=76X_mcRun2_asymptotic_RunIIFall15DR76_v1' ,'kappaTag=KAPPA_2_1_0','nickname=%s'%(nickname),'outputfilename=kappa_%s.root'%(nickname),'testsuite=False']
		config.JobType.outputFiles = ['kappa_%s.root'%(nickname)]
		config.Data.inputDataset = get_sample_by_nick(nickname)
		p = Process(target=submit, args=(config,))
		p.start()
		p.join()
Example #18
0
class ServerProc(object):
    def __init__(self):
        self.proc = None
        self.daemon = None
        self.stop = Event()

    def start(self, init_func, config, paths, port):
        self.proc = Process(target=self.create_daemon, args=(init_func, config, paths, port))
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, config, paths, port):
        try:
            self.daemon = init_func(config, paths, port)
        except socket.error:
            logger.error("Socket error on port %s" % port)
            raise

        if self.daemon:
            self.daemon.start(block=False)
            try:
                self.stop.wait()
            except KeyboardInterrupt:
                pass

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()
Example #19
0
def createProcess2():  # Works on Windows
    print('Parent process %s.' % os.getpid())
    p = Process(target=run_process, args=('demo_process',))
    print('Child process start.')
    p.start()
    p.join()
    print('Child process end.')
Example #20
0
class PluginRunner:
  def __init__(self, plugin):
    self.name = plugin
    self.proc = None
    self.running = False
    self.local_pipe, self.remote_pipe = Pipe()

  def getConnection(self):
    return self.local_pipe

  def start(self):
    assert not self.running, "Already running."
    self.running = True
    self.thread = Thread(target=self.run)
    self.thread.start()

  def restart(self):
    self.proc.terminate()

  def stop(self):
    assert self.running, "Running"
    self.running = False
    self.proc.terminate()
    self.thread.join()
    self.remote_pipe.close()
    self.local_pipe.close()

  def run(self):
    while self.running:
      self.proc = Process(target=launch, args=('repeat', self.remote_pipe))
      self.proc.start()
      print("Waiting on proc to end")
      self.proc.join()
Example #21
0
    def __parseClub(self):
        """
        get all posts urls in a club
        url - the root url of a  club
        initDeadline - the default timestamp when there is no file existed
        """

        while not self.__PageUrlQueue.empty() and \
            self.__parsePage(self.__PageUrlQueue.get()):
            if len(self.__PostUrlList) > 50:
                break

        print("Length of List:%d", len(self.__PostUrlList))

        processes = []
        while len(self.__PostUrlList):
            listLen=len(self.__PostUrlList)
            if MaxProcessNum > listLen:
                processNum = listLen
            else:
                processNum = MaxProcessNum
            for i in range(processNum):
                url = self.__PostUrlList.pop()
                postParser = PostParser(self.__url, url)
                subProcess = Process(target=postParser.parse)
                processes.append(subProcess)
                subProcess.start()
                subProcess.join()

        print('Done retrieving all posts of club url : %s' % self.__url)
        return True
Example #22
0
def recoverPRNGState(cookie,timeMillisEstimate,PRNGMillisEstimate,IPAddr,serverPort,numWorkers,chunkSize):
    global PRNGMillisDelta
    global initalSeek
    
    q = Queue(0)
    i = 0
    
    if PRNGMillisDelta%chunkSize > 0:
        q.put((PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,PRNGMillisEstimate+PRNGMillisDelta,initalSeek))
    
    for i in range(PRNGMillisEstimate,PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,chunkSize):
        q.put((i,i+chunkSize,initalSeek))
        
    desc = []

    seedValue = Value('d', 0)

    # Start worker processes and assign work.                     
    for i in range(numWorkers):
        p = Process(target=recoverPRNGStateWorker, args=(cookie,timeMillisEstimate,q,IPAddr,serverPort,seedValue))
        p.start()
        desc.append(p)
        
    # Wait for worker processes finish.       
    for p in desc:
        p.join()

    return long(seedValue.value)
Example #23
0
class New_Process_Actor(Actor):
    '''Create an Actor in a new process. Connected as usual with scipysim 
    channels. When this Actor is started, it launches a new process, creates
    an instance of the Actor class passed to it in a second thread, and starts
    that actor.
    '''
    def __init__(self, cls, *args, **kwargs):
        super(New_Process_Actor, self).__init__()
        self.cls = cls
        self.args = list(args)
        self.kwargs = kwargs
        self.mqueue = MQueue()
        self.mevent = MEvent()
        
        if 'input_channel' not in kwargs:
            kwargs['input_channel'] = self.args[0]
        
        chan = kwargs['input_channel']
        kwargs['input_channel'] = self.mqueue
        
        
        print 'chan: ', chan
        self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
        
        self.c2p.start()


    def run(self):
        self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
        self.t.start()
        self.mevent.set() # signal that process is ready to receive
        self.c2p.join()
        self.t.join()
 def wrapper(*args, **kwargs):
     process = Process(None, func, None, args, kwargs)
     process.start()
     process.join(seconds)
     if process.is_alive():
         process.terminate()
         raise TimeoutError(error_message)
Example #25
0
def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
Example #26
0
def start_schedulers(apps='w2p_tvseries'):
    try:
        from multiprocessing import Process
    except:
        sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
        return
    processes = []
    apps = [app.strip() for app in apps.split(',')]
    code = "from gluon import current; current._scheduler.max_empty_runs=10; current._scheduler.loop()"
    logging.getLogger().setLevel(logging.INFO)
    if len(apps) == 1:
        print 'starting single-scheduler for "%s"...' % apps[0]
        run(apps[0], True, True, None, False, code)
        return
    for app in apps:
        print 'starting scheduler for "%s"...' % app
        args = (app, True, True, None, False, code)
        p = Process(target=run, args=args)
        processes.append(p)
        print "Currently running %s scheduler processes" % (len(processes))
        p.start()
        print "Processes started"
    for p in processes:
        try:
            p.join()
        except (KeyboardInterrupt, SystemExit):
            print "Processes stopped"
        except:
            p.terminate()
            p.join()
Example #27
0
def make_time_series_plot_wrapper(input_file='', prefix='temp'):
    ''' wrapper around make_time_series_plot '''
    from .audio_utils import make_time_series_plot
    tmp_ = Process(target=make_time_series_plot, args=(input_file, prefix,))
    tmp_.start()
    tmp_.join()
    return 'Done'
Example #28
0
def watcher():
    """This little code snippet is from
    http://greenteapress.com/semaphores/threading_cleanup.py (2012-07-31)
    It's now possible to interrupt the testrunner via ctrl-c at any time
    in a platform neutral way."""
    if sys.platform == 'win32':
        p = Process(target=main, name="MainProcess")
        p.start()
        try:
            p.join()
            rc = p.exitcode
            if rc > 0:
                sys.exit(rc)
        except KeyboardInterrupt:
            print 'KeyBoardInterrupt'
            p.terminate()
    else:
        child = os.fork()
        if child == 0:
            main() # child runs test
        try:
            rc = os.waitpid(child, 0)[1] /256 # exit status is the high order byte of second member of the tuple
            if rc > 0:
                sys.exit( rc )
        except KeyboardInterrupt:
            print 'KeyBoardInterrupt'
            try:
                os.kill(child, signal.SIGKILL)
            except OSError:
                pass
        except OSError:
            pass

    sys.exit()
Example #29
0
File: parse.py Project: clly/strmr
def pullMusic(folders):	
	""" 
		Walk through the music folders and create song objects.  
		Return an array
	"""
	print "Start Parsing Folders!"
	lock = Lock()
	dbQueue = Queue()
	
	# create a process for each music folder in the configuration file
	for folder in folders:
		walker = Process(target=worker.walker, args=(folder, dbQueue, lock,))
		walker.start()
	while dbQueue.empty():
		pass
	
	# create a process to enter files from the dbQueue into the database
	enterdb = Process(target=worker.enterDB, args=(dbQueue, lock))
	enterdb.start()

	# wait until enterDB is finished before starting
	# This can be taken out later.  I want complete information for testing
	enterdb.join()
	
	print "Done!"
Example #30
0
def main(): 
    parca =[]
    counter = 0
    fo = open("C:\Users\Toshiba-PC\Desktop\Dagitik\metin.txt","r")
    text = fo.read(l)
    while text != "":
       text = text.lower()
       parca.append(text)
       counter += l
       fo.seek(counter)
       text = fo.read(l)
    fo.close()
        
    #print parca

    work_queue = Queue()
    done_queue = Queue()
    processes = []
    
    for data in parca: 
        work_queue.put(data)
        
    for w in xrange(n): 
        p = Process(target=worker, args=(work_queue, done_queue)) 
        p.start() 
        processes.append(p) 
        work_queue.put('STOP')
    print processes
    
    for p in processes: 
        p.join()
    done_queue.put('STOP')
    for status in iter(done_queue.get, 'STOP'): 
        print status
Example #31
0
def main():

    # Internal function to get help message
    def get_help_message():
        msg = ""
        msg = msg + "To transition ground station into these modes, enter commands: " + "\n"
        msg = msg + "Contact mode:                      [C] " + "\n"
        # msg = msg + "Downlink mode: [D] " + "\n"
        msg = msg + "Keep beacons quiet:                [Q] " + "\n"
        msg = msg + "Turn on beacons:                   [U] " + "\n"
        msg = msg + "Terminate Script:                  [Z] " + "\n"
        msg = msg + "Display this help message:         [H]" + "\n"
        return msg

    try:
        # Check for mission folder
        if not os.path.exists(GROUND_STN_MISSION_FOLDER_PATH):
            os.makedirs(GROUND_STN_MISSION_FOLDER_PATH)

        # Check for hk logs folder
        if not os.path.exists(GROUND_STN_OBC_HK_FOLDER_PATH):
            os.makedirs(GROUND_STN_OBC_HK_FOLDER_PATH)

        # Initialize serial ports for TT&C transceiver
        ttnc_port = input("Enter COM port for TT&C transceiver: ")
        serial_ttnc = serial.Serial(ttnc_port, 9600, timeout=10)

        # Create pipes to communicate with beacon process
        conn_process_beacon, conn_main_process = Pipe(duplex=True)

        # Initialize serial ports for payload transceiver
        payload_port = input("Enter COM port for Payload transceiver: ")
        serial_payload = serial.Serial(payload_port, 115200, timeout=None)

        # Initialize background scheduler for Downlink task
        scheduler = BackgroundScheduler()
        scheduler.start()

        # Enter Autonomous mode to wait for beacons
        process_beacon_collection = Process(target=handle_incoming_beacons,
                                            args=(serial_ttnc,
                                                  conn_process_beacon),
                                            daemon=True)

        run_flag = True

        while run_flag:

            # Initial begin
            print()
            print("---- GROUND STATION ----")
            init_response = input("To begin, enter [Y]... ")
            if init_response.lower() == 'y':
                # Carry on running script
                print()
                pass
            else:
                print()
                print("Exiting script...")
                break

            # Begin Autonomous Mode
            print("Entering Autonomous Mode...")
            print()
            process_beacon_collection.start()

            # Wait for trigger to enter other modes
            print("---- WAITING FOR COMMANDS ----")
            print(get_help_message())

            while run_flag:
                choice = input()
                print()

                if choice.lower() == 'h':
                    print(get_help_message())

                elif choice.lower() == 'c':

                    # Stop beacon receiving process
                    conn_main_process.send("stop")
                    process_beacon_collection.join()

                    # Start contact mode process
                    print("Start Contact mode process")
                    telecommand_type, ts = handle_contact_mode(serial_ttnc)

                    # Schedule downlink task
                    if telecommand_type == TELECOMMAND_TYPE_MISSION_DOWNLINK:
                        # Subtract 2 mins from time stamp
                        ts = ts - timedelta(minutes=2)

                        scheduler.add_job(handle_downlink_task,
                                          next_run_time=ts,
                                          args=[serial_payload])

                        print("Scheduled downlink job")
                        print()

                    # Resume beacon collection after contact mode process ends
                    print("Restart beacon collection process")
                    print()
                    process_beacon_collection = Process(
                        target=handle_incoming_beacons,
                        args=(serial_ttnc, conn_process_beacon),
                        daemon=True)
                    process_beacon_collection.start()

                elif choice.lower() == 'q':
                    print("Verbose mode now\n")
                    conn_main_process.send("verbose on")
                    pass

                elif choice.lower() == 'u':
                    print("Verbose mode off\n")
                    conn_main_process.send("verbose off")
                    pass

                elif choice.lower() == 'z':
                    conn_main_process.send("stop")
                    process_beacon_collection.join()
                    run_flag = False

                else:
                    print("Command not found...")
                    print()

    except KeyboardInterrupt:
        run_flag = False

    except serial.serialutil.SerialException:
        print("Invalid Serial port!")
        sys.exit()

    serial_payload.close()
    serial_ttnc.close()

    conn_main_process.close()
    conn_process_beacon.close()

    print("Terminated script")
    sys.exit()
def FLS_TrainFun_parallel_1(Rule_num,Antecedents_num,InitialSetup_List,Xtrain,Ytrain,Xpredict,Ypredict=None,\
    modeName='Mamdani',modeType=2,predictMode=True,optimizer=tf.keras.optimizers.Adam(0.05),\
    lossFunction=tf.keras.losses.mean_squared_error,batchSIZE=1,epoch=5,subMode_learningRate=tf.constant(0.01),processesNum=None):

    startime = time.time()

    Mode_Name = 'ST' + str(modeType) + 'FLS_' + modeName
    Mode = eval(Mode_Name +
                str((Rule_num, Antecedents_num, InitialSetup_List)))

    print('******************************************************************')
    #print(Mode_Name+'.variables',Mode.variables)
    print('******************************************************************')
    print(Mode_Name + '.trainable_variables:', Mode.trainable_variables)
    print('******************************************************************')

    if len(Xtrain) < batchSIZE or len(Xtrain) < processesNum:
        print(
            'Warning! The number of training data must be greater than the number of batches and the number of possesser!'
        )

    Block_SizeOfProcesses = len(Xtrain) // processesNum

    Loss_save = np.zeros(epoch)
    for epoch_id in range(epoch):
        print('>>>>>>>>>>>>>>>>>>>>>>>epoch:{}/{}<<<<<<<<<<<<<<<<<<<<<<<<<<<'.
              format(epoch_id + 1, epoch))
        Epoch_sample_id = random.sample(range(0, len(Xtrain)), len(Xtrain))
        Xtrain_epoch = Xtrain[Epoch_sample_id, :]
        Ytrain_epoch = Ytrain[Epoch_sample_id]

        Grade_subMode = Queue()
        subModes = []
        for subMode_id in range(processesNum):
            submode = Process(target= SubMode_train ,args = (Mode,lossFunction,\
                Xtrain_epoch[subMode_id*Block_SizeOfProcesses:(subMode_id+1)*Block_SizeOfProcesses,:],\
                Ytrain_epoch[subMode_id*Block_SizeOfProcesses:(subMode_id+1)*Block_SizeOfProcesses], \
                batchSIZE,Grade_subMode,subMode_learningRate))
            subModes.append(submode)
            submode.start()

        for submode in subModes:
            submode.join()

        Grades_set = Grade_subMode.get()
        saveloss = Grades_set[1]
        for i_num in range(1, processesNum, 1):
            q_g = Grade_subMode.get()
            for j_num in range(len(q_g[0])):
                Grades_set[0][j_num] = Grades_set[0][j_num] + q_g[0][j_num]
                Grades_set[2][j_num] = Grades_set[2][j_num] + q_g[2][j_num]
            saveloss += q_g[1]

        for j_num in range(len(Grades_set[0])):
            Grades_set[0][j_num] = Grades_set[0][j_num] / processesNum
            Grades_set[2][j_num] = Grades_set[2][j_num] / processesNum

        Mode.Setting_parameters(Grades_set[0])
        optimizer.apply_gradients(zip(Grades_set[2], Mode.trainable_variables))

        Loss_save[epoch_id] = tf.sqrt(saveloss / len(Xtrain))

        print('epoch:{}/{},loss:{}'.format(epoch_id + 1, epoch, saveloss))

    endtime = time.time()
    dtime = endtime - startime

    outputPredict = Mode(Xpredict)
    Loss_predict = tf.sqrt(
        lossFunction(Ypredict, outputPredict) / len(Xpredict))

    print(
        '>>>>>>>>>>>>>>>>>>>>>>> The program has ended! Totial time:%.8f <<<<<<<<<<<<<<<<<<<<<<<<'
        % dtime)

    return Loss_save, Loss_predict, dtime
Example #33
0
        print("dataset %s has %d files" % (adataset, len(lfnList)))

        config.Data.userInputFiles = lfnList
        config.Data.splitting = 'FileBased'
        config.Data.unitsPerJob = 50

        config.JobType.outputFiles = ['ttbarreshad_predfile.root'
                                      ]  #, 'ttbarreshad_nanoskim.root' ]

        requestname = adataset.split('/')[-1]
        print 'requestname = ', requestname
        config.General.requestName = requestname
        config.Data.outputDatasetTag = requestname + '_' + options.tag
        print 'Submitting ' + config.General.requestName + ', dataset = ' + job
        print 'Configuration :'
        print config
        try:
            from multiprocessing import Process
            print 'submitting...'

            p = Process(target=submit, args=(config, ))
            p.start()
            p.join()
            #submit(config)
        except:
            print 'Not submitted.'


if __name__ == '__main__':
    main()
Example #34
0
#     results.append(result_)
#     results.append( ( min(width_list), dimensions, int(min_m), int(max_m), max_b ) )

diag = np.array([1] * (dimensions)).astype('i')
diag[-1] = max_minor
hermit = np.diag(diag)
# print hermit

from multiprocessing import Process, Value, Array, Manager

manager = Manager()
results = manager.list()

for pre_last_row in permutations(xrange(dimensions), dimensions - 1):
    hermit[dimensions - 1][:-1] = list(pre_last_row)

    for restriction in permutations(xrange(-max_minor, max_minor + 1),
                                    dimensions):
        p = Process(target=bruteforce, args=(results, restriction))
        p.start()
        p.join(600)
        if p.is_alive():
            print "running... let's kill it..."
            # Terminate
            p.terminate()
            p.join()

with open('res_new.csv', 'a') as f:
    f.write('This is the end... My dear friend' + '\r\n')
Example #35
0
        time.sleep(random.randint(1, 3))
        print('\033[43m%s 吃 %s\033[0m' % (name, res))
        q.task_done()


if __name__ == '__main__':
    q = JoinableQueue(5)
    p1 = Process(target=producer, args=(q, "liusir", "包子"))  # 生产者
    p2 = Process(target=producer, args=(q, "miller", "蛋糕"))  # 生产者
    p3 = Process(target=producer, args=(q, "yaqi", "泔水"))  # 生产者

    c1 = Process(target=consumer, args=(q, "小白"))  # 消费者
    c2 = Process(target=consumer, args=(q, "建波1号"))  # 消费者
    c3 = Process(target=consumer, args=(q, "建波2号"))  # 消费者
    c1.daemon = True
    c2.daemon = True
    c3.daemon = True

    c1.start()
    c2.start()
    c3.start()

    p1.start()
    p2.start()
    p3.start()

    p1.join()
    p2.join()
    p3.join()

    print("主")
Example #36
0
        help='use the roberta-large model instead of roberta-base')
    parser.add_argument('--learning-rate', type=float, default=2e-5)
    parser.add_argument('--weight-decay', type=float, default=0)
    args = parser.parse_args()

    nproc = int(
        subprocess.check_output([
            sys.executable, '-c', "import torch;"
            "print(torch.cuda.device_count() if torch.cuda.is_available() else 1)"
        ]))
    if nproc > 1:
        print(f'Launching {nproc} processes ...', file=sys.stderr)

        os.environ["MASTER_ADDR"] = '127.0.0.1'
        os.environ["MASTER_PORT"] = str(29500)
        os.environ['WORLD_SIZE'] = str(nproc)
        os.environ['OMP_NUM_THREAD'] = str(1)
        subprocesses = []

        for i in range(nproc):
            os.environ['RANK'] = str(i)
            os.environ['LOCAL_RANK'] = str(i)
            process = Process(target=run, kwargs=vars(args))
            process.start()
            subprocesses.append(process)

        for process in subprocesses:
            process.join()
    else:
        run(**vars(args))
class TestRabbitQueueConsumerProducer(unittest.TestCase):
    @staticmethod
    def consume_filter(message: Dict) -> Tuple[List[Dict], bool]:
        if message["type"] == "A":
            return [message], False
        return [], False

    @staticmethod
    def publish_multiple(message: Dict, idempotency_set=None) -> Tuple[List[Dict], bool]:
        if message and idempotency_set and message in idempotency_set:
            return [], False
        if idempotency_set and message:
            idempotency_set.prepare(message)
            idempotency_set.commit()
        if isinstance(message["value"], int) or isinstance(message["value"], float):
            return [{"type": message["type"]}] * int(message["value"]), False
        return [], False

    @staticmethod
    def republish_and_stop_with_key_z(message: Dict, idempotency_set=None) -> Tuple[List[Dict], bool]:
        if message and idempotency_set and message in idempotency_set:
            return [], False
        if idempotency_set and message:
            idempotency_set.prepare(message)
        if message['key'] != 'Z':
            return [message], False
        else:
            return [message], True

    def _start_process(self, func: StateCommiter, messages_to_group=1):
        RabbitQueueConsumerProducer("localhost", CONSUME_QUEUE, [RESPONSE_QUEUE], func,
                                    messages_to_group=messages_to_group)()

    @staticmethod
    def _read_process(write_pipe: Pipe):

        def consume(write_pipe, ch, method, properties, body):
            write_pipe.send(body)

        connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost"))
        channel = connection.channel()
        channel.basic_consume(queue=RESPONSE_QUEUE,
                              on_message_callback=partial(consume, write_pipe),
                              auto_ack=True)
        channel.start_consuming()

    def setUp(self) -> None:
        try:
            from pytest_cov.embed import cleanup_on_sigterm
        except ImportError:
            pass
        else:
            cleanup_on_sigterm()
        shutil.rmtree('/tmp/message_set', ignore_errors=True)
        os.mkdir('/tmp/message_set')
        self.message_set = DiskMessageSet('/tmp/message_set', recover_state_on_init=True)
        self.recv_pipe, self.write_pipe = Pipe(False)
        self.connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost"))
        self.channel = self.connection.channel()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.queue_declare(queue=RESPONSE_QUEUE)
        self.channel.queue_purge(CONSUME_QUEUE)
        self.channel.queue_purge(RESPONSE_QUEUE)
        self.test_process = None
        self.consume_process = Process(target=self._read_process, args=(self.write_pipe,))
        self.consume_process.start()

    def tearDown(self) -> None:
        self.channel.queue_purge(CONSUME_QUEUE)
        self.channel.queue_purge(RESPONSE_QUEUE)
        if self.test_process:
            self.test_process.terminate()
        self.consume_process.terminate()
        shutil.rmtree('/tmp/message_set', ignore_errors=True)

    def test_simple_filter(self):
        self.test_process = Process(target=self._start_process, args=(DummyStateCommiter(self.consume_filter),))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "B", "value": 5}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "C", "value": "a"}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "D", "value": 4}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 2.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.1}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps([{"type": "A", "value": None},
                                                    {"type": "V", "value": None}]))
        processed_data = []
        for _ in range(4):
            processed_data.append(json.loads(self.recv_pipe.recv()))
        self.assertFalse(self.recv_pipe.poll(1))
        self.assertEqual(processed_data[0], {"type": "A", "value": 4.2})
        self.assertEqual(processed_data[1], {"type": "A", "value": 2.2})
        self.assertEqual(processed_data[2], {"type": "A", "value": 4.1})
        self.assertEqual(processed_data[3], {"type": "A", "value": None})

    def test_simple_filter_with_grouping(self):
        self.test_process = Process(target=self._start_process, args=(DummyStateCommiter(self.consume_filter), 2))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "B", "value": 85}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "C", "value": "a"}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "D", "value": 4}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 2.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.1}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps([{"type": "A", "value": None},
                                                    {"type": "V", "value": None}]))
        processed_data = []
        for _ in range(4):
            processed_data.append(json.loads(self.recv_pipe.recv()))
        self.assertFalse(self.recv_pipe.poll(1))
        self.assertEqual(processed_data[0], [{"type": "A", "value": 4.2}])
        self.assertEqual(processed_data[1], [{"type": "A", "value": 2.2}])
        self.assertEqual(processed_data[2], [{"type": "A", "value": 4.1}])
        self.assertEqual(processed_data[3], [{"type": "A", "value": None}])

    def test_simple_multipy_message_with_grouping(self):
        self.test_process = Process(target=self._start_process, args=(DummyStateCommiter(self.publish_multiple), 2))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "B", "value": 7}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "C", "value": "a"}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "D", "value": 1}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps([{"type": "A", "value": None},
                                                    {"type": "V", "value": None}]))
        processed_data = []
        for _ in range(7):
            processed_data.append(json.loads(self.recv_pipe.recv()))
        self.assertFalse(self.recv_pipe.poll(1))
        self.assertEqual(processed_data[0], [{"type": "A"}, {"type": "A"}])
        self.assertEqual(processed_data[1], [{"type": "A"}, {"type": "A"}])
        self.assertEqual(processed_data[2], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[3], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[4], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[5], [{"type": "B"}])
        self.assertEqual(processed_data[6], [{"type": "D"}])

    def test_idempotency_set_integration(self):
        self.test_process = Process(target=self._start_process,
                                    args=(DummyStateCommiter(lambda m: self.publish_multiple(m, self.message_set)), 2))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "A", "value": 4.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "B", "value": 7}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "C", "value": "a"}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "D", "value": 1}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps([{"type": "A", "value": 4.2},
                                                    {"type": "V", "value": 1}]))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"type": "D", "value": 1}))
        processed_data = []
        for _ in range(8):
            processed_data.append(json.loads(self.recv_pipe.recv()))
        self.assertFalse(self.recv_pipe.poll(1))
        self.assertEqual(processed_data[0], [{"type": "A"}, {"type": "A"}])
        self.assertEqual(processed_data[1], [{"type": "A"}, {"type": "A"}])
        self.assertEqual(processed_data[2], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[3], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[4], [{"type": "B"}, {"type": "B"}])
        self.assertEqual(processed_data[5], [{"type": "B"}])
        self.assertEqual(processed_data[6], [{"type": "D"}])
        self.assertEqual(processed_data[7], [{"type": "V"}])

    def test_simple_stop(self):
        self.test_process = Process(target=self._start_process,
                                    args=(DummyStateCommiter(
                                        lambda m: self.republish_and_stop_with_key_z(m, self.message_set)), 2))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"key": "A", "value": 4.2}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"key": "Z", "value": 7}))
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"key": "C", "value": "a"}))
        self.test_process.join()
        processed_data = []
        for _ in range(2):
            processed_data.append(json.loads(self.recv_pipe.recv()))
        self.assertFalse(self.recv_pipe.poll(1))
        self.assertEqual(processed_data[0], [{"key": "A", "value": 4.2}])
        self.assertEqual(processed_data[1], [{"key": "Z", "value": 7}])

    def test_stop_on_first_message(self):
        def return_stop(msg):
            return [BroadcastMessage({})] * 10, True

        self.test_process = Process(target=self._start_process,
                                    args=(return_stop, 1))
        self.test_process.start()
        self.channel.queue_declare(queue=CONSUME_QUEUE)
        self.channel.basic_publish(exchange='', routing_key=CONSUME_QUEUE,
                                   body=json.dumps({"key": "A", "value": 4.2}))
        self.test_process.join()
Example #38
0
def main():
    # Create DXL Reacher1D environment
    env = DxlReacher1DEnv(
        setup='dxl_gripper_default',
        idn=1,
        baudrate=1000000,
        obs_history=1,
        dt=0.04,
        gripper_dt=0.01,
        rllab_box=False,
        episode_length_step=None,
        episode_length_time=2,
        max_torque_mag=100,
        control_type='torque',
        target_type='position',
        reset_type='zero',
        reward_type='linear',
        use_ctypes_driver=True,
    )

    # The outputs of the policy function are sampled from a Gaussian. However, the actions in terms of torque
    # commands are in the range [-max_torque_mag, max_torque_mag]. NormalizedEnv wrapper scales action accordingly.
    # By default, it does not normalize observations or rewards.
    env = NormalizedEnv(env)

    # Start environment processes
    env.start()

    # Create baselines trpo policy function
    sess = U.single_threaded_session()
    sess.__enter__()

    def policy_fn(name, ob_space, ac_space):
        return MlpPolicy(name=name,
                         ob_space=ob_space,
                         ac_space=ac_space,
                         hid_size=32,
                         num_hid_layers=2)

    # create and start plotting process
    plot_running = Value('i', 1)
    shared_returns = Manager().dict({
        "write_lock": False,
        "episodic_returns": [],
        "episodic_lengths": [],
    })
    # Plotting process
    pp = Process(target=plot_dxl_reacher,
                 args=(env, 2048, shared_returns, plot_running))
    pp.start()

    # Create callback function for logging data from baselines PPO learn
    kindred_callback = create_callback(shared_returns)

    # Train baselines TRPO
    learn(
        env,
        policy_fn,
        max_timesteps=50000,
        timesteps_per_batch=2048,
        max_kl=0.05,
        cg_iters=10,
        cg_damping=0.1,
        vf_iters=5,
        vf_stepsize=0.001,
        gamma=0.995,
        lam=0.995,
        callback=kindred_callback,
    )

    # Safely terminate plotter process
    plot_running.value = 0  # shutdown ploting process
    time.sleep(2)
    pp.join()

    # Shutdown the environment
    env.close()
Example #39
0
            else:

                print "# I: [Type] <Shared>"
                print "# I: [Flow] <Jobs and Resumes as different processes>"
                print "# I: [No. of Processes] <2>"
                print "# I: [No. of Processes] <{processes}> (For each flow)".format(processes=settings.NO_OF_PROCESSES)

                print "# I: [Total Job Links] <{total}> (Pending)".format(total=total_jobs)
                print "# I: [Total Resume Links] <{total}> (Pending)".format(total=total_resumes)
                job_process = Process(target=html_scrapper.fetch_html, args=(settings.TASKS[0], scrap_jobs,))
                resume_process = Process(target=html_scrapper.fetch_html, args=(settings.TASKS[1], scrap_resumes,))

                job_process.start()
                resume_process.start()
                job_process.join()
                resume_process.join()

        else:

            print "# I: [Multiprocessing] <False>"
            print "# I: [Flow] <Jobs -> Resumes>"

            print "# I: [Total Job Links] <{total}> (Pending)".format(total=total_jobs)
            html_scrapper.fetch_html(settings.TASKS[0], scrap_jobs)
            print "# I: [Total Resume Links] <{total}> (Pending)".format(total=total_resumes)
            html_scrapper.fetch_html(settings.TASKS[1], scrap_resumes)

    elif settings.FOR_JOB or settings.FOR_RESUME:

        if settings.FOR_JOB:
Example #40
0
    def get_all_draws(self, nprocs=20):
        meidq = Queue()
        dq = Queue()

        # Create and feed reader procs
        pget_mvid_draws = partial(qget_mvid_draws,
                                  env=self.env,
                                  lid=self.lid,
                                  yid=self.yid,
                                  sid=self.sid)
        read_procs = []
        for i in range(nprocs):
            p = Process(target=pget_mvid_draws, args=(meidq, dq))
            read_procs.append(p)
            p.start()

        # Feed and close the meid queue
        seq_list = self.cv.mvid_list.merge(self.cv.seq_map,
                                           on='modelable_entity_id')
        st_inj_clist = self.cv.mvid_list.merge(self.cv.st_injury_by_cause(),
                                               on='modelable_entity_id')
        st_inj_slist = self.cv.mvid_list.merge(self.cv.st_injury_by_sequela(),
                                               on='modelable_entity_id')
        inj_cprev = self.cv.mvid_list.merge(self.cv.injury_prev_by_cause(),
                                            on='modelable_entity_id')
        memv_list = pd.concat(
            [seq_list, st_inj_clist, st_inj_slist,
             inj_cprev])[['modelable_entity_id', 'model_version_id']]
        arglist = zip(list(memv_list.modelable_entity_id),
                      list(memv_list.model_version_id))
        arglist = list(set(arglist))
        for meid in arglist:
            meidq.put(meid)
        for p in read_procs:
            meidq.put(sentinel)

        # Build output df from reader procs
        df = [dq.get() for i in arglist]
        for p in read_procs:
            p.join()
        errs = [e for e in df if isinstance(e, dict)]
        print errs
        df = pd.concat([d for d in df if not isinstance(d, dict)])
        df = df[[
            'modelable_entity_id', 'location_id', 'year_id', 'age_group_id',
            'sex_id', 'measure_id'
        ] + ['draw_%s' % d for d in range(1000)]]
        self.st_inj_by_cause = df.merge(st_inj_clist, on='modelable_entity_id')
        self.st_inj_by_cause = self.st_inj_by_cause.query('measure_id == 3')
        self.st_inj_by_seq = df.merge(st_inj_slist, on='modelable_entity_id')
        self.st_inj_by_seq = self.st_inj_by_seq.query('measure_id == 3')
        self.inj_cprev = df.merge(inj_cprev, on='modelable_entity_id')
        self.inj_cprev = self.inj_cprev.query('measure_id == 5')
        self.inj_cprev = self.inj_cprev[
            ['location_id', 'year_id', 'age_group_id', 'sex_id', 'cause_id'] +
            self.drawcols].groupby([
                'location_id', 'year_id', 'age_group_id', 'sex_id', 'cause_id'
            ]).sum().reset_index()
        df = df[df.modelable_entity_id.isin(seq_list.modelable_entity_id)]
        self.gen_reports(df)
        df = self.fill_expected_me_age_sets(df)
        df = self.cast_ids_to_ints(df)
        return df
Example #41
0
		tiltD = manager.Value("f", 0.002)

		# we have 4 independent processes
		# 1. objectCenter  - finds/localizes the object
		# 2. panning       - PID control loop determines panning angle
		# 3. tilting       - PID control loop determines tilting angle
		# 4. setServos     - drives the servos to proper angles based
		#                    on PID feedback to keep object in center
		processObjectCenter = Process(target=obj_center,
			args=(args, objX, objY, centerX, centerY))
		processPanning = Process(target=pid_process,
			args=(pan, panP, panI, panD, objX, centerX))
		processTilting = Process(target=pid_process,
			args=(tlt, tiltP, tiltI, tiltD, objY, centerY))
		processSetServos = Process(target=go, args=(pan, tlt))

		# start all 4 processes
		processObjectCenter.start()
		processPanning.start()
		processTilting.start()
		processSetServos.start()

		# join all 4 processes
		processObjectCenter.join()
		processPanning.join()
		processTilting.join()
		processSetServos.join()

		# disable the servos
		pth.servo_enable(1, False)
		pth.servo_enable(2, False)
Example #42
0
        value = q.get(True)
        print 'read %s from queue.' % value
        if q.get(True) == 'Done':
            break


if __name__ == '__main__':
    # 父进程创建Queue,并传给各个子进程
    #Queue(maxsize=0)
    #    Returns a queue object
    q = Queue()

    #__init__(self, group=None, target=None, name=None, args=(), kwargs={})
    pw = Process(target=write, args=(q, ))
    pr = Process(target=read, args=(q, ))

    # 启动子进程pw,写入
    # Start child process
    pw.start()

    # 启动子进程pr,读取
    pr.start()

    # 等待pw结束
    # Wait until child process terminates
    pw.join()

    # pr进程里面是死循环,无法等待其结果,只能强行终止
    # Terminate process; sends SIGTERM signal or uses TerminateProcess()
    #pr.terminate()
Example #43
0
class BaseManager(object):
    '''
    Base class for managers
    '''
    _registry = {}
    _Server = Server

    def __init__(self, address=None, authkey=None, serializer='pickle'):
        if authkey is None:
            authkey = current_process().authkey
        self._address = address  # XXX not final address if eg ('', 0)
        self._authkey = AuthenticationString(authkey)
        self._state = State()
        self._state.value = State.INITIAL
        self._serializer = serializer
        self._Listener, self._Client = listener_client[serializer]

    def __reduce__(self):
        return type(self).from_address, \
               (self._address, self._authkey, self._serializer)

    def get_server(self):
        '''
        Return server object with serve_forever() method and address attribute
        '''
        assert self._state.value == State.INITIAL
        return Server(self._registry, self._address, self._authkey,
                      self._serializer)

    def connect(self):
        '''
        Connect manager object to the server process
        '''
        Listener, Client = listener_client[self._serializer]
        conn = Client(self._address, authkey=self._authkey)
        dispatch(conn, None, 'dummy')
        self._state.value = State.STARTED

    def start(self, initializer=None, initargs=()):
        '''
        Spawn a server process for this manager object
        '''
        assert self._state.value == State.INITIAL

        if initializer is not None and not hasattr(initializer, '__call__'):
            raise TypeError('initializer must be a callable')

        # pipe over which we will retrieve address of server
        reader, writer = connection.Pipe(duplex=False)

        # spawn process which runs a server
        self._process = Process(
            target=type(self)._run_server,
            args=(self._registry, self._address, self._authkey,
                  self._serializer, writer, initializer, initargs),
        )
        ident = ':'.join(str(i) for i in self._process._identity)
        self._process.name = type(self).__name__ + '-' + ident
        self._process.start()

        # get address of server
        writer.close()
        self._address = reader.recv()
        reader.close()

        # register a finalizer
        self._state.value = State.STARTED
        self.shutdown = util.Finalize(self,
                                      type(self)._finalize_manager,
                                      args=(self._process, self._address,
                                            self._authkey, self._state,
                                            self._Client),
                                      exitpriority=0)

    @classmethod
    def _run_server(cls,
                    registry,
                    address,
                    authkey,
                    serializer,
                    writer,
                    initializer=None,
                    initargs=()):
        '''
        Create a server, report its address and run it
        '''
        if initializer is not None:
            initializer(*initargs)

        # create server
        server = cls._Server(registry, address, authkey, serializer)

        # inform parent process of the server's address
        writer.send(server.address)
        writer.close()

        # run the manager
        util.info('manager serving at %r', server.address)
        server.serve_forever()

    def _create(self, typeid, *args, **kwds):
        '''
        Create a new shared object; return the token and exposed tuple
        '''
        assert self._state.value == State.STARTED, 'server not yet started'
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            id, exposed = dispatch(conn, None, 'create', (typeid, ) + args,
                                   kwds)
        finally:
            conn.close()
        return Token(typeid, self._address, id), exposed

    def join(self, timeout=None):
        '''
        Join the manager process (if it has been spawned)
        '''
        self._process.join(timeout)

    def _debug_info(self):
        '''
        Return some info about the servers shared objects and connections
        '''
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            return dispatch(conn, None, 'debug_info')
        finally:
            conn.close()

    def _number_of_objects(self):
        '''
        Return the number of shared objects
        '''
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            return dispatch(conn, None, 'number_of_objects')
        finally:
            conn.close()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.shutdown()

    @staticmethod
    def _finalize_manager(process, address, authkey, state, _Client):
        '''
        Shutdown the manager process; will be registered as a finalizer
        '''
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()
            except Exception:
                pass

            process.join(timeout=0.2)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')
                    process.terminate()
                    process.join(timeout=0.1)
                    if process.is_alive():
                        util.info('manager still alive after terminate')

        state.value = State.SHUTDOWN
        try:
            del BaseProxy._address_to_local[address]
        except KeyError:
            pass

    address = property(lambda self: self._address)

    @classmethod
    def register(cls,
                 typeid,
                 callable=None,
                 proxytype=None,
                 exposed=None,
                 method_to_typeid=None,
                 create_method=True):
        '''
        Register a typeid with the manager type
        '''
        if '_registry' not in cls.__dict__:
            cls._registry = cls._registry.copy()

        if proxytype is None:
            proxytype = AutoProxy

        exposed = exposed or getattr(proxytype, '_exposed_', None)

        method_to_typeid = method_to_typeid or \
                           getattr(proxytype, '_method_to_typeid_', None)

        if method_to_typeid:
            for key, value in list(method_to_typeid.items()):
                assert type(key) is str, '%r is not a string' % key
                assert type(value) is str, '%r is not a string' % value

        cls._registry[typeid] = (callable, exposed, method_to_typeid,
                                 proxytype)

        if create_method:

            def temp(self, *args, **kwds):
                util.debug('requesting creation of a shared %r object', typeid)
                token, exp = self._create(typeid, *args, **kwds)
                proxy = proxytype(token,
                                  self._serializer,
                                  manager=self,
                                  authkey=self._authkey,
                                  exposed=exp)
                conn = self._Client(token.address, authkey=self._authkey)
                dispatch(conn, None, 'decref', (token.id, ))
                return proxy

            temp.__name__ = typeid
            setattr(cls, typeid, temp)
Example #44
0
from multiprocessing import Process
import os


# 子进程要执行的代码
def run_proc(name):
    print('子进程运行中, name=%s, pid=%d...ppid=%d' %
          (name, os.getpid(), os.getppid()))


if __name__ == '__main__':
    print('父进程%d.' % os.getpid())
    p = Process(target=run_proc, args=('test', ))
    print('子进程将要执行')
    p.start()
    p.join()
    print('子进程已结束')

# 创建子进程时,只需要传入一个执行函数和函数的参数,创建一个Process实例,用start()方法启动,
# 这样创建进程比fork()还要简单
# join() 方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步

# Process 语法

# Process([group [, target [, name [, args [, kwargs]]]]])
# group:大多数情况下用不到;
# target:表示这个进程实例所调用对象;
# name:为当前进程实例的别名;
# args:表示调用对象的位置参数元组;
# kwargs:表示调用对象的关键字参数字典;
Example #45
0
    shared_array = shared_array.reshape(1, 1024 * 2)
    print shared_array

    s = Event()

    tonePlayer_p = Process(target=tonePlayer, args=(
        int(sys.argv[1]),
        s,
    ))
    tonePlayer_p.daemon = True

    recorder_p = Process(target=recorder,
                         args=(
                             shared_array,
                             int(sys.argv[1]),
                             int(sys.argv[2]),
                             s,
                         ))
    recorder_p.daemon = True

    plotter_p = Process(target=plotter, args=(shared_array, ))
    plotter_p.daemon = True

    recorder_p.start()
    tonePlayer_p.start()
    plotter_p.start()

    tonePlayer_p.join()
    recorder_p.join()
    plotter_p.join()
Example #46
0
                    if start_thermalize_timer:
                        print('Starting thermalization wait time ...')
                        time.sleep(therm_time)
    
                        # Launch the resonance measurement (power sweep, etc.)
                        # Wait for the process to return
                        print(f'Starting PNA measurement ...')
                        mng = Manager()
                        out = mng.dict()
                        pmeas = Process(target=MeasurementProcess,
                                args=('meas', measurement_path, out))
                        ptemp = Process(target=TemperatureController,
                                args=('temp', t, pid, fid, sck, out))
                        pmeas.start()
                        ptemp.start()
                        pmeas.join()
                        ptemp.join()

                        # Read the time and measurement return code
                        meas_ret = out['meas']
                        t, T = out['temp']

            except KeyboardInterrupt:
                SetCurrent(sck, 0)
                sck.close()
                fid.close()
    
        # Close the file, just in case the context manager does not free it
        fid.close()
        
    # Set the heater current back to 0 mA
Example #47
0
    def run(self):
        try:
            if self.parse_args() != 0:
                return

            if self._args.tail:
                start_time = UTCTimestampUsec() - 10 * pow(10, 6)
                while True:
                    self._start_time = start_time
                    self._end_time = UTCTimestampUsec()
                    start_time = self._end_time + 1
                    time.sleep(3)
                    result = self.query()
                    if result == -1:
                        return
                    self.display(result)
            else:
                start_time = self._args.start_time
                end_time = self._args.end_time
                if not self._args.start_time:
                    start_time = "now-10m"
                if not self._args.end_time:
                    end_time = "now"
                try:
                    self._start_time, self._end_time = \
                        OpServerUtils.parse_start_end_time(
                            start_time = start_time,
                            end_time = end_time,
                            last = self._args.last)
                except:
                    return -1

                start_time = self._start_time
                end_time = self._end_time

                result_list = []
                while int(end_time) - int(start_time) > 0:
                    if not self._args.reverse:
                        self._start_time = start_time
                        self._end_time = start_time + 10 * 60 * pow(10, 6) if (
                            start_time + 10 * 60 * pow(10, 6) <= int(end_time)
                        ) else int(end_time)
                    else:
                        self._end_time = end_time
                        self._start_time = end_time - 10 * 60 * pow(10, 6) if (
                            end_time - 10 * 60 * pow(10, 6) >= int(start_time)
                        ) else int(start_time)

                    p = Process(target=self.display, args=(result_list, ))
                    p.start()
                    result = self.query()
                    if result == -1:
                        return
                    # Accumulate the result before processing it as the
                    # formatting of result can be cpu intensive and hence would
                    # affect the overall time taken to fetch the result from the
                    # analytics-api. Since the query result ttl is set to 5 min
                    # in redis, it is necessary to improve the read throughput.
                    result_list = self.read_result(result)
                    p.join()
                    if not self._args.reverse:
                        start_time = self._end_time + 1
                    else:
                        end_time = self._start_time - 1
                self.display(result_list)

        except KeyboardInterrupt:
            return
Example #48
0
class ObserverSimulator:
    def __init__(self, n_states, n_inputs, n_outputs):
        self.n_states = n_states
        self.n_inputs = n_inputs
        self.n_outputs = n_outputs

        self.running = True
        self.d_time = 0.0001

        self.sim_thread = Process(target=self.simulate, args=())

        self.shared_A_hat = Array('d', np.zeros(n_states**2))
        self.shared_K = Array('d', np.zeros(n_states * n_outputs))
        self.shared_H = Array('d', np.zeros(n_states * n_inputs))
        self.shared_C = Array('d', np.zeros(n_outputs * n_states))
        self.shared_D = Array('d', np.zeros(n_outputs * n_inputs))
        self.shared_u = Array('d', np.zeros(n_inputs))
        self.shared_state_trans = Array('d', np.zeros(n_states**2))

        # Shared (State Transition Matrix + I (Identity)) * d_time
        self.shared_st_I_dt = Array('d', np.zeros(n_states**2))

        self.A_hat = np.frombuffer(self.shared_A_hat.get_obj()).reshape(
            (n_states, n_states))
        self.K = np.frombuffer(self.shared_K.get_obj()).reshape(
            (n_states, n_outputs))
        self.H = np.frombuffer(self.shared_H.get_obj()).reshape(
            (n_states, n_inputs))
        self.C = np.frombuffer(self.shared_C.get_obj()).reshape(
            (n_outputs, n_states))
        self.D = np.frombuffer(self.shared_D.get_obj()).reshape(
            (n_outputs, n_inputs))
        self.u = np.frombuffer(self.shared_u.get_obj()).reshape((n_inputs, 1))
        self.state_trans = np.frombuffer(
            self.shared_state_trans.get_obj()).reshape((n_states, n_states))
        self.st_I_dt = np.frombuffer(self.shared_st_I_dt.get_obj()).reshape(
            (n_states, n_states))

        # Default window name
        self.title = "Observer Simulator"

    def __del__(self):
        self.sim_thread.join()

    def signalHandler(self, sig, frame):
        self.running = False
        # self.sim_thread.join()

    def setInput(self, _u):
        assert _u.shape == (self.n_inputs, 1)
        np.copyto(self.u, _u)

    def setStateSpace(self, _A_hat, _K, _H, _C, _D):
        assert _A_hat.shape == (self.n_states, self.n_states)
        assert _K.shape == (self.n_states, self.n_outputs)
        assert _H.shape == (self.n_states, self.n_inputs)
        assert _C.shape == (self.n_outputs, self.n_states)
        assert _D.shape == (self.n_outputs, self.n_inputs)

        np.copyto(self.A_hat, _A_hat)
        np.copyto(self.K, _K)
        np.copyto(self.H, _H)
        np.copyto(self.C, _C)
        np.copyto(self.D, _D)

        # State Transition Matrix
        np.copyto(self.state_trans, expm(self.A_hat * self.d_time))
        np.copyto(self.st_I_dt,
                  (np.eye(self.n_states) + self.state_trans) * self.d_time)

    def setTitle(self, _title):
        self.title = _title

    def setPlotName(self, _plot_name):
        self.plot_name = _plot_name

    def setDelay(self, _delay):
        self.delay = _delay

    def beginSimulation(self):
        print('Starting Observer simulation...')
        signal.signal(signal.SIGINT, self.signalHandler)
        self.sim_thread.start()

    def pltCloseHandle(self, event):
        self.running = False
        # self.sim_thread.join()
        print('Plotter closed.')

    def simulate(self):

        # Time interval (seconds) & Maximum data
        view_interval = 0.01
        max_data = view_interval / self.d_time

        # Time
        t = 0

        # Initial State
        init_state = np.zeros((self.n_states, 1))

        # State
        state = init_state
        last_state = state

        # Input
        last_u = self.u

        # Output
        output = np.zeros((self.n_outputs, 1))

        # Data
        signal_data = []

        for i in range(0, self.n_states):
            signal_data.append([state[i]])

        for i in range(0, self.n_inputs):
            signal_data.append([self.u[i]])

        for i in range(0, self.n_outputs):
            signal_data.append([output[i]])

        time_data = [0]

        # Other stuff
        total_signal = np.zeros(self.n_states + self.n_inputs + self.n_outputs)

        # Plotter setup
        fig = plt.figure(facecolor='black', edgecolor='white')
        fig.canvas.set_window_title(self.title)
        fig.canvas.mpl_connect('close_event', self.pltCloseHandle)

        n_signals = self.n_states + self.n_inputs + self.n_outputs
        n_sp_rows, n_sp_cols = int(math.ceil(n_signals / 3)), min(3, n_signals)
        sp = []
        plot = []

        for i in range(0, n_signals):
            sp.append(fig.add_subplot(n_sp_rows, n_sp_cols, i + 1))
            sp[i].set_title(self.plot_name['signal{}'.format(i)],
                            color='white')
            sp[i].set_xlabel('Time (s)')
            sp[i].grid(True)
            sp[i].set_facecolor(
                (0.294117647, 0.294117647, 0.294117647))  # Gray
            sp[i].tick_params(color='white', labelcolor='white')
            for spine in sp[i].spines.values():
                spine.set_edgecolor('white')

            color = 'b'
            if i < self.n_states:
                color = 'r'
            elif i < self.n_states + self.n_inputs:
                color = 'g'

            line, = sp[i].plot(time_data, signal_data[i], color=color)
            plot.append(line)

        plt.tight_layout()

        # Manual adjustment
        plt.subplots_adjust(left=0.05,
                            bottom=0.05,
                            right=0.99,
                            top=0.96,
                            wspace=0.21,
                            hspace=0.3)

        plt.grid(True)

        fig.canvas.draw()
        fig.show()

        while self.running:

            # Approximation of State Space solution
            term1 = np.dot(self.state_trans, last_state)
            term2 = np.dot(self.K, output)
            term3 = np.dot(self.H, self.u)
            term4 = np.dot(self.st_I_dt, term2 + term3) * .5

            state = term1 + term4

            # Output calculation based on that state approximation
            output = np.dot(self.C, state) + np.dot(self.D, self.u)

            last_u = self.u
            last_state = state

            pres_signal = np.concatenate(
                (state, np.concatenate((self.u, output), axis=0)), axis=0)

            time_data.append(t)

            time_data_len = len(time_data)

            signal_avg = []

            for i in range(0, n_signals):

                # Summing signal magnitude
                # for calcuating their average
                total_signal[i] += np.abs(pres_signal[i])

                # Replace data
                signal_data[i].append(pres_signal[i])
                plot[i].set_ydata(signal_data[i])
                plot[i].set_xdata(time_data)

                # Draw into the figure
                sp[i].draw_artist(sp[i].patch)
                sp[i].draw_artist(plot[i])

                # To maintain sight of signal in plotter
                sp[i].set_xlim(left=max(0, t - 0.75 * view_interval),
                               right=max(view_interval,
                                         t + 0.25 * view_interval))

                # To adjust the signal interval
                signal_avg.append(total_signal[i] / time_data_len)
                sp[i].set_ylim(bottom=min(-0.001, -signal_avg[i] * 1.75),
                               top=max(0.001, signal_avg[i] * 1.75))

            fig.canvas.draw_idle()
            fig.canvas.flush_events()

            t += self.d_time

            time.sleep(self.delay)

            # Debugging part
            # print('Time Data Size : {}'.format(len(time_data)))
            # print('State Data Size : {}'.format(len(state_data)))
            # print('Output Data Size : {}'.format(len(output_data)))
            # print('State : {}'.format(state))
            # print('Output : {}'.format(output))
            # print('Time : {}'.format(t))

            # Remove unnecessary old data
            if len(time_data) >= max_data:

                site = int(max_data * 0.25)

                for i in range(0, n_signals):

                    total_signal[i] -= signal_avg[i] * site
                    del signal_data[i][0:site]

                del time_data[0:site]
        retR, frameR = CamR.read()
        cv2.imshow('imgR', frameR)

        if cv2.waitKey(1) & 0xFF == ord(' '):
            break

    Cam.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    Cam = cv2.VideoCapture(0)
    init_Cams(Cam)

    gp.output(7, False)
    gp.output(11, False)
    gp.output(12, True)
    pL = Process(target=activate_CamL, args=(Cam, ))
    pL.start()

    time.sleep(1)

    gp.output(7, False)
    gp.output(11, True)
    gp.output(12, False)
    pR = Process(target=activate_CamR, args=(Cam, ))
    pR.start()

    pL.join()
    pR.join()
Example #50
0
            print("Home", i, "Sell if no takers (", num_policy, ")")

        Homes.append(h)

    t = Process(target=terminal,
                args=(q, count, clock_ok, queue_echange, term_conn,
                      term_conn2))
    t.start()

    m = Process(target=Market,
                args=(queue, count, market_OK, clock_ok, temp, wind,
                      markt_conn, weather_ok))
    m.start()

    w = Process(target=Weather,
                args=(temp, wind, clock_ok, weather_ok, weather_conn))
    w.start()

    c = Process(target=Clock, args=(clock_ok, ))
    c.start()

    c.join()
    m.join()
    w.join()
    t.join()

    for h in Homes:
        h.join()

    print("temps de la simulation:", time.time() - ti)
def creat_process():
    start = time.time()
    p1 = Process(target=works, args=(1, 2))  # args传递两个值时不需要加逗号
    p1.start()
    p1.join()  #等待子-结束偶再继续往下运行,用于进程间同步
    print("单进程耗时:%8.f s" % (time.time() - start))
Example #52
0
                              offset['end_width'], offset['end_height']))
            if not os.path.exists(path + 'extracted'):
                os.makedirs(path + 'extracted')
            page.save(
                path + 'extracted/{}.jpg'.format(
                    file.replace('.pdf', '').replace('.PDF', '')), 'JPEG')
            break
    for file in os.listdir(path + 'extracted'):
        if not os.path.exists(path + 'crop'):
            os.makedirs(path + 'crop')
        center_image(path + 'extracted/{}'.format(file),
                     path + 'crop/{}'.format(file))


def f():
    extract_sig(True)


def g():
    extract_sig(False)


if __name__ == '__main__':
    p = Process(target=f)
    g = Process(target=g)
    p.start()
    g.start()
    p.join()
    g.join()
    print('extraction: finished')
Example #53
0
    def test_delayed_download(self):
        """
        Tests handling of race conditions where separate processes attempt to
        cache the same dataset
        """
        cache_dir = os.path.join(self.base_cache_path,
                                 'delayed-download-cache')
        DATASET_NAME = 'source1'
        target_path = os.path.join(self.session_cache(cache_dir),
                                   DATASET_NAME + nifti_gz_format.extension)
        tmp_dir = target_path + '.download'
        shutil.rmtree(cache_dir, ignore_errors=True)
        os.makedirs(cache_dir)
        archive = XnatArchive(server=SERVER,
                              cache_dir=cache_dir,
                              project_id=self.PROJECT)
        study = DummyStudy(
            self.STUDY_NAME,
            archive,
            LinearRunner('ad'),
            inputs=[DatasetMatch(DATASET_NAME, nifti_gz_format, DATASET_NAME)])
        source = archive.source([study.input(DATASET_NAME)],
                                name='delayed_source',
                                study_name='delayed_study')
        source.inputs.subject_id = self.SUBJECT
        source.inputs.visit_id = self.VISIT
        result1 = source.run()
        source1_path = result1.outputs.source1_path
        self.assertTrue(os.path.exists(source1_path))
        self.assertEqual(
            source1_path, target_path,
            "Output file path '{}' not equal to target path '{}'".format(
                source1_path, target_path))
        # Clear cache to start again
        shutil.rmtree(cache_dir, ignore_errors=True)
        # Create tmp_dir before running interface, this time should wait for 1
        # second, check to see that the session hasn't been created and then
        # clear it and redownload the dataset.
        os.makedirs(tmp_dir)
        source.inputs.race_cond_delay = 1
        result2 = source.run()
        source1_path = result2.outputs.source1_path
        # Clear cache to start again
        shutil.rmtree(cache_dir, ignore_errors=True)
        # Create tmp_dir before running interface, this time should wait for 1
        # second, check to see that the session hasn't been created and then
        # clear it and redownload the dataset.
        internal_dir = os.path.join(tmp_dir, 'internal')
        deleted_tmp_dir = tmp_dir + '.deleted'

        def simulate_download():
            "Simulates a download in a separate process"
            os.makedirs(internal_dir)
            time.sleep(5)
            # Modify a file in the temp dir to make the source download keep
            # waiting
            logger.info('Updating simulated download directory')
            with open(os.path.join(internal_dir, 'download'), 'a') as f:
                f.write('downloading')
            time.sleep(10)
            # Simulate the finalising of the download by copying the previously
            # downloaded file into place and deleting the temp dir.
            logger.info('Finalising simulated download')
            with open(target_path, 'a') as f:
                f.write('simulated')
            shutil.move(tmp_dir, deleted_tmp_dir)

        source.inputs.race_cond_delay = 10
        p = Process(target=simulate_download)
        p.start()  # Start the simulated download in separate process
        time.sleep(1)
        source.run()  # Run the local download
        p.join()
        with open(os.path.join(deleted_tmp_dir, 'internal', 'download')) as f:
            d = f.read()
        self.assertEqual(d, 'downloading')
        with open(target_path) as f:
            d = f.read()
        self.assertEqual(d, 'simulated')
Example #54
0
    for i in range(0,10):
        print 'Loop',i
        queue  = Queue()
        rqueue = Queue()

        Pids = []

        for i in range(0,NumProcs):
            Pids.append(Process(target=WorkerProcess, args=(TargetDir,queue,rqueue)))
    
        rth = Process(target=RemoveProcess, args=(TargetDir,rqueue))

        for th in Pids:
            th.start()

        rth.start()
    
        for FName in glob.iglob(Source):    
            queue.put(FName) 
    
        for th in Pids:
            queue.put(False)
    
        for th in Pids:
            th.join()
    
        rqueue.put(False)
        rth.join()
    
    end_timer("Multiprocessing:")
Example #55
0
                    encode = l2_normalizer.transform(
                        np.expand_dims(encode, axis=0))[0]
                    encoding_dict[person_name] = encode

            for key in encoding_dict.keys():
                print(key)

            with open(encodings_path, 'bw') as file:
                pickle.dump(encoding_dict, file)
        cv2.line(frame, (int(width / 2), 0), (int(width / 2), int(height)),
                 (0, 0, 255), 1)
        cv2.line(frame, (0, int(height / 2)), (int(width), int(height / 2)),
                 (0, 0, 255), 1)
        cv2.putText(frame, fps_r, (50, 50), font, 0.5, (0, 255, 255), 2)
        cv2.putText(frame,
                    repr(int(width / 2)) + ',' + repr(int(height / 2)),
                    (500, 400), font, 0.5, (0, 0, 255), 2)
        cv2.imshow('camera', frame)
        ##########################
    cap.release()
    #parent, child pipe 닫음
    parent_conn.close()
    cv2.destroyAllWindows()
    #child 프로세스 끝날 때 까지 parent는 여기서 기다린 후 child 종료되면 종료
    proc.join()

    #face_thread.join()
    #print("FACE END")
    #hi_thread.join()
    print("HI END")
    print("MAIN END")
    # th19.start()
    # th20.start()
    # th21.start()
    # th22.start()
    # th23.start()
    # th24.start()
    # th25.start()
    # th26.start()
    # th27.start()
    # th28.start()
    # th29.start()
    # th30.start()
    # th31.start()
    # th32.start()

    th1.join()
    th2.join()
    th3.join()
    th4.join()
    th5.join()
    th6.join()
    th7.join()
    th8.join()
    th9.join()
    th10.join()
    th11.join()
    th12.join()
    th13.join()
    th14.join()
    th15.join()
    th16.join()
Example #57
0
def child_2(interval):
    print("子进程{}开始执行,父进程为{}".format(os.getpid(), os.getppid()))
    t_start = time.time()
    time.sleep(interval)
    t_end = time.time()
    print("子进程{}执行时间{:0.2f}".format(os.getpid(), t_end - t_start))


if __name__ == '__main__':
    print("-----------父进程开始执行-------------")
    print('父进程PID:{}'.format(os.getpid()))
    p1 = Process(target=child_1, args=(1, ))
    p2 = Process(target=child_2, name='mrsort', args=(2, ))
    p1.start()
    p2.start()
    # 同时父进程仍然往下执行,如果P2进程还在执行,将会返回True
    print("p1.is_alive={}".format(p1.is_alive()))
    print("p2.is_alive={}".format(p2.is_alive()))
    #输出p1和p2进程的别名和PID
    print("p1.name = {}".format(p1.name))
    print("p1.pid = {}".format(p1.pid))
    print("p2.name = {}".format(p2.name))
    print("p2.pid = {}".format(p2.pid))
    print("---------waiting children process-------")
    p1.join()  #p1.join()是等到P1进程结束
    p2.join()
    print("---------父进程执行结束----------")
"""
1. 多进程与协程的区别,协程可以同时经常,包含上下文切换,但是多进程必须等上一个进程执行完毕
"""
Example #58
0
    def run(self):
        '''
        Run the client and perform all the operations:
         * Connect to the server.
         * Receive video while sniffing packets.
         * Close connection.
         * Process data and extract information.
         * Run measures.

        :returns: A dictionary of video files received (see :attr:`VideoTester.gstreamer.RTSPClient.files`), a dictionary of caps (see :attr:`VideoTester.gstreamer.RTSPClient.caps`) and a list of results
        :rtype: list
        '''
        VTLOG.info('Client running!')
        try:
            tempdir, num = self.__get_tempdir()
            server = ServerProxy('http://%s:%s' % (self.conf['ip'], self.port))
            rtspport = server.run(self.conf['bitrate'], self.conf['framerate'])
        except Exception as e:
            VTLOG.error(e)
            return None
        VTLOG.info('Connected to XMLRPC Server at %s:%s' %
                   (self.conf['ip'], self.port))
        VTLOG.info(
            'Evaluating: %s, %s, %s kbps, %s fps, %s' %
            (self.conf['video'], self.conf['codec'], self.conf['bitrate'],
             self.conf['framerate'], self.conf['protocol']))

        sniffer = Sniffer(self.conf['iface'], self.conf['ip'],
                          '%s%s.cap' % (tempdir, num))
        rtspclient = RTSPClient(tempdir + num, self.conf['codec'],
                                self.conf['bitrate'], self.conf['framerate'])
        url = 'rtsp://%s:%s/%s.%s' % (self.conf['ip'], rtspport,
                                      self.conf['video'], self.conf['codec'])
        child = Process(target=sniffer.run)
        ret = True
        try:
            child.start()
            VTLOG.info('PID: %s | Sniffer started' % child.pid)
            time.sleep(1)
            rtspclient.receive(url, self.conf['protocol'])
        except KeyboardInterrupt:
            VTLOG.warning('Keyboard interrupt!')
        except Exception as e:
            VTLOG.error(e)
        else:
            ret = False
        server.stop(self.conf['bitrate'], self.conf['framerate'])
        child.terminate()
        child.join()
        VTLOG.info('PID: %s | Sniffer stopped' % child.pid)
        if ret:
            return None

        video = '/'.join([self.path, dict(self.videos)[self.conf['video']]])
        rtspclient.makeReference(video)
        conf = {
            'codec': self.conf['codec'],
            'bitrate': self.conf['bitrate'],
            'framerate': self.conf['framerate'],
            'caps': rtspclient.caps
        }
        packetdata = sniffer.parsePkts(self.conf['protocol'], rtspclient.caps)
        codecdata, rawdata = self.__parseVideo(rtspclient.files,
                                               rtspclient.caps,
                                               self.conf['codec'])

        results = []
        results.extend(QoSmeter(self.conf['qos'], packetdata).run())
        results.extend(BSmeter(self.conf['bs'], codecdata).run())
        results.extend(
            VQmeter(self.conf['vq'],
                    (conf, rawdata, codecdata, packetdata)).run())

        VTLOG.info('Saving measures...')
        for measure in results:
            f = open(tempdir + num + '_' + measure['name'] + '.pkl', 'wb')
            pickle.dump(measure, f)
            f.close()
        VTLOG.info('Client stopped!')

        return rtspclient.files, rtspclient.caps, results
Example #59
0
def main():
    import requests
    import argparse
    import json

    def get_ip_by_sproxy_conn(
        socks_host,
        socks_port,
        _timeout=10,
    ):
        proxy_conn = 'socks5://{host}:{port}'. \
            format(host=socks_host, port=socks_port, )
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US,en;q=0.9,q=0.8',
        }
        try:
            resp = requests.get(
                'https://lumtest.com/myip.json',
                proxies=dict(http=proxy_conn, https=proxy_conn),
                timeout=_timeout,
                headers=headers,
            )
        except (requests.exceptions.ConnectionError,
                requests.exceptions.Timeout, requests.exceptions.ReadTimeout):
            logging.error("FAILED %s" % proxy_conn)
            return False
        if resp.status_code != 200:
            logging.error("FAILED CODE %s" % proxy_conn)
            resp.close()
            return False
        resp.close()
        try:
            data = json.loads(resp.content)
        except ValueError:
            logging.error("FAILED JSON %s" % proxy_conn)
            return False
        return data

    parser = argparse.ArgumentParser()
    parser.add_argument('-ssh', help='ssh', dest='ssh_host', required=True)
    parser.add_argument('-p',
                        '-P',
                        dest='ssh_port',
                        help='port',
                        type=int,
                        default=22)
    parser.add_argument('-l', dest='ssh_user', help='login', required=True)
    parser.add_argument('-pw', dest='ssh_password', help='password')
    parser.add_argument('-D',
                        dest='socks',
                        help='dynamic SOCKS',
                        default='127.0.0.1:7000')
    args, unknown = parser.parse_known_args(sys.argv[1:])

    socks_host, socks_port = args.socks.split(':')
    socks_host = socks_host.strip()
    socks_port = int(socks_port)
    proc = Process(target=main_worker,
                   args=(args.ssh_host, args.ssh_user, args.ssh_password,
                         args.ssh_port, socks_host, socks_port))
    proc.start()
    # sleep before test
    time.sleep(5)
    if proc.is_alive():
        external_ip = get_ip_by_sproxy_conn(socks_host=socks_host,
                                            socks_port=socks_port)
        if not external_ip:
            logging.error('Cannot access external IP address, exiting...')
            proc.terminate()
            return
        else:
            logging.info(external_ip)
        try:
            proc.join()
        except (KeyboardInterrupt, SystemExit):
            pass
    def run(self, text):

        text = text.strip()

        # Verify text is a link
        if not text.startswith("http"):
            self.tauon.gui.show_message("Could not identify text as link")
            return
        if " " in text:
            self.tauon.gui.show_message("Could not verify text as link")
            return

        downloaders = self.get_downloaders_list()
        dl_dir = self.dl_dir

        # Clear old download cache folder and make anew
        if os.path.exists(dl_dir):
            shutil.rmtree(dl_dir)
        os.makedirs(dl_dir)

        # Youtube downloader
        if "youtube.com" in text or "youtu.be" in text:
            if "youtube-dl" not in downloaders:
                self.tauon.gui.show_message("Downloading Youtube tracks requires youtube-dl")
                return

            self.tauon.gui.show_message("Type detected: Youtube", 'Link: ' + text)

            youtube_dir = os.path.join(dl_dir, text.rstrip("/").split("/")[-1])
            #youtube_dir = dl_dir
            os.makedirs(youtube_dir)

            line = self.tauon.launch_prefix + "youtube-dl -f bestaudio -o \"" + youtube_dir + "/%(title)s.%(ext)s\" --extract-audio --embed-thumbnail --add-metadata --audio-quality 160K --audio-format mp3 " + text
            self.downloading = True
            subprocess.run(shlex.split(line))

            # For every mp3 in folder, crop the embedded image
            for item in os.listdir(youtube_dir):
                if item.endswith(".mp3"):
                    path = os.path.join(youtube_dir, item)
                else:
                    continue

                # Get image from mp3 file
                tag = stagger.read_tag(path)
                tt = tag[stagger.id3.APIC][0]
                s = io.BytesIO(tt.data)
                im = Image.open(s)

                # Check for side bars (hacky, but should work most the time)
                w, h = im.size
                p1 = im.getpixel((1, 1))
                p2 = im.getpixel((30, 30))
                p3 = im.getpixel((w - 30, h - 30))
                p4 = im.getpixel((w - 1, h - 1))
                if p1 == p2 == p3 == p4:

                    # Crop to square
                    m = min(w, h)
                    im = im.crop((
                        (w - m) / 2,
                        (h - m) / 2,
                        (w + m) / 2,
                        (h + m) / 2,
                    ))

                    # Convert to bytes string
                    g = io.BytesIO()
                    g.seek(0)
                    im.save(g, 'JPEG')
                    g.seek(0)
                    string = g.getvalue()

                    # Embed back into mp3
                    # Workaround for unix signals issue
                    p = Process(target=self.embed_image, args=(path, string))
                    p.start()
                    p.join()

        # Spotify -> Youtube downloader
        elif "open.spotify.com/album/" in text:
            if "spotdl" not in downloaders:
                self.tauon.gui.show_message("Downloading Spotify (via Youtube) albums requires spotdl")
                return

            if "youtube-dl" not in downloaders:
                self.tauon.gui.show_message("Downloading Youtube songs (from Spotify) requires youtube-dl")
                return

            track_list_file = os.path.join(dl_dir, "tracklist.txt")
            code = text.split("/")[-1]

            line = self.tauon.launch_prefix + "spotdl --write-to=\"" + track_list_file + "\" --album " + text
            self.downloading = True
            subprocess.run(shlex.split(line))

            if not os.path.isfile(track_list_file):
                self.downloading = False
                self.tauon.gui.show_message("Could not get tracklist from Spotify")
                return

            self.downloading = True
            self.tauon.gui.show_message("Type detected: Spotify", 'Link: ' + text)
            os.makedirs(os.path.join(dl_dir, code))
            line = self.tauon.launch_prefix + "spotdl --list=\"" + track_list_file + "\" --folder=\"" + os.path.join(dl_dir, code) + "\""
            subprocess.run(shlex.split(line))

            if os.path.isfile(track_list_file):
                os.remove(track_list_file)

        # Bandcamp downloader
        elif ".bandcamp.com" in text:
            if "bandcamp-dl" not in downloaders:
                self.tauon.gui.show_message("Downloading Bandcamp albums requires bandcamp-dl")
                return

            self.tauon.gui.show_message("Type detected: Bandcamp", 'Link: ' + text)
            line = self.tauon.launch_prefix + "bandcamp-dl -e --base-dir=\"" + dl_dir + "\" --template=\"%{artist} - %{album}/%{track} - %{title}\" " + text
            self.downloading = True
            subprocess.run(shlex.split(line))

        # Soundcloud downloader
        elif "soundcloud.com" in text:
            if "scdl" not in downloaders:
                self.tauon.gui.show_message("Downloading Soundcloud playlists requires scdl")
                return

            self.tauon.gui.show_message("Type detected: Soundcloud", 'Link: ' + text)
            line = self.tauon.launch_prefix + "scdl -l " + text + " --path=\"" + dl_dir + "\""
            self.downloading = True
            subprocess.run(shlex.split(line))

        # No downloader for link found
        else:
            self.tauon.gui.show_message("Not compatible with this type of link")

        # Move downloads from cache folder to music folder
        for item in os.listdir(dl_dir):
            item_path = os.path.join(dl_dir, item)
            target_path = os.path.join(self.tauon.music_directory, item)

            if target_path not in self.tauon.dl_mon.done:
                self.tauon.dl_mon.done.add(target_path)
            try:
                shutil.move(item_path, self.tauon.music_directory)
                self.import_item(target_path)
            except:
                self.downloading = False
                self.tauon.gui.show_message("File already exists", mode='error')
                raise

        # Give warning about spotdl
        if 'open.spotify.com' in text:
            self.tauon.gui.show_message("Download complete",
                                        "Warning: This method is unreliable, found tracks may not match", mode='done')

        self.downloading = False