コード例 #1
0
ファイル: worker.py プロジェクト: cobain/iSpider
def get_gif_url_list(timeout):
    gif_url_list_q = Queue()
    for i in range(1,6):
        process_list = []
        for j in range(1 + (i - 1) * 10,1 + i * 10):
            process_list.append(Process(target=fetch_gif_url,args=(data,j,gif_url_list_q)))
        for j in range(1 + (i - 1) * 10,1 + i * 10):
            process_list[j - (i - 1) * 10 - 1].start()
            # print '任务 {0} 开始...'.format(j)

        time_sum = 0
        while 1:
            for process in process_list:
                if not process.is_alive():
                    process_list.remove(process)
            if len(process_list) == 0:
                break

            elif time_sum == timeout:
                for process in process_list:
                    process.terminate()
                    print '{0} 假死被杀掉了...'.format(process.name)
                    process_list.remove(process)
                break
            else:
                time_sum += 1
                sleep(1)
                print str(time_sum) + '秒'
                continue
    print (gif_url_list_q.qsize()),'个结果'
    gif_url_list = []
    for i in range(gif_url_list_q.qsize()):
        gif_url_list.append(gif_url_list_q.get())
    return gif_url_list
コード例 #2
0
ファイル: vcf_to_matrix.py プロジェクト: nate-d-olson/NASP
def parse_input_files( input_files, num_threads, genomes, min_coverage, min_proportion ):
    from multiprocessing import Process, Queue
    #from queue import Queue
    from time import sleep
    input_q = Queue()
    output_q = Queue()
    for input_file in input_files:
        input_q.put( input_file )
    if num_threads > input_q.qsize():
        num_threads = input_q.qsize()
    sleep( 1 )
    thread_list = []
    for current_thread in range( num_threads ):
        input_q.put( None )
        current_thread = Process( target=manage_input_thread, args=[ genomes.reference(), min_coverage, min_proportion, input_q, output_q ] )
        current_thread.start()
        #manage_input_thread( genomes.reference(), min_coverage, min_proportion, input_q, output_q )
        thread_list.append( current_thread )
    sleep( 1 )
    while num_threads > 0:
        new_genome = output_q.get()
        if new_genome is None:
            num_threads -= 1
        elif isinstance( new_genome, str ):
            genomes.add_failed_genome( new_genome )
        else:
            genomes.add_genome( new_genome )
    sleep( 1 )
    for current_thread in thread_list:
        current_thread.join()
コード例 #3
0
def test_transfert_queue():
    t1 = "testTopic"
    topic = Topics()
    q = Queue()

    topic.process(t1,123)
    topic.process(t1,456)
    topic.process(t1,789)

    assert q.empty()

    topic.transfer(t1,q)

    assert q.qsize() > 0

    assert q.get() == [0, 123]
    assert q.get() == [1, 456]
    assert q.get() == [2, 789]

    topic.process(t1,111)
    topic.process(t1,222)

    assert q.qsize() > 0

    assert q.get() == [3, 111]
    assert q.get() == [4, 222]
コード例 #4
0
ファイル: horizon-agent.py プロジェクト: ftdysa/skyline
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            Worker(listen_queue, pid).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
コード例 #5
0
ファイル: horizon-agent.py プロジェクト: B-Rich/skyline
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        #If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
コード例 #6
0
ファイル: test_cached.py プロジェクト: schnitzelbub/django-q
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async('math.copysign', 1, -1, cached=True, broker=broker)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
コード例 #7
0
ファイル: path_evaluator.py プロジェクト: hoergems/LQG
 def evaluate_paths(self, paths, P_t, current_step, horizon=-1):
     jobs = collections.deque() 
     eval_queue = Queue()
     evaluated_paths = []        
     paths_tmp = [(i, paths[i]) for i in xrange(len(paths)) if len(paths[i][0]) != 0]
     for i in xrange(len(paths_tmp)):            
         logging.info("PathEvaluator: Evaluate path " + str(i))            
         p = Process(target=self.evaluate, args=(paths_tmp[i][0], 
                                                 paths_tmp[i][1], 
                                                 P_t, 
                                                 current_step, 
                                                 horizon, 
                                                 self.robot, 
                                                 eval_queue,))
         p.start()
         jobs.append(p)           
             
         if len(jobs) == self.num_cores - 1 or i == len(paths_tmp) - 1:
             if i == len(paths_tmp) - 1 and not len(jobs) == self.num_cores - 1:
                 while not eval_queue.qsize() == len(paths_tmp) % (self.num_cores - 1):                        
                     time.sleep(0.00001)
             else:
                 while not eval_queue.qsize() == self.num_cores - 1:                        
                     time.sleep(0.00001)
             jobs.clear()
             q_size = eval_queue.qsize()
             for j in xrange(q_size):
                 eval_elem = eval_queue.get()
                 if eval_elem != None:                   
                     evaluated_paths.append(eval_elem)
                 else:
                     print "Path could not be evaluated"                    
     path_rewards = [evaluated_paths[i][1] for i in xrange(len(evaluated_paths))]               
     if len(path_rewards) == 0:
         return (None, None, None, None, None, None, None, None, None)
     best_index = evaluated_paths[0][0]
     best_path = evaluated_paths[0][2]        
     best_objective = path_rewards[0] 
     s_covariances = evaluated_paths[0][4] 
     deviation_covariances = evaluated_paths[0][5]
     estimated_deviation_covariances = evaluated_paths[0][6]
     for i in xrange(1, len(path_rewards)):                        
         if path_rewards[i] > best_objective:
             best_index = evaluated_paths[i][0]                
             best_objective = path_rewards[i]                
             best_path = evaluated_paths[i][2]
             s_covariances = evaluated_paths[i][4]
             deviation_covariances = evaluated_paths[i][5]
             estimated_deviation_covariances = evaluated_paths[i][6]
     logging.info("PathEvaluator: Objective value for the best path is " + str(best_objective))
     return (best_index,
             best_path[0], 
             best_path[1], 
             best_path[2], 
             best_path[3], 
             best_objective, 
             s_covariances,
             deviation_covariances,
             estimated_deviation_covariances)
コード例 #8
0
class CommandQueue():

    _STATE_NORMAL = 'normal'
    _STATE_FLUSHING = 'flushing'
    _STATE_FINISHED = 'finished'

    def __init__(self):
        self._in = Queue()
        self._out = Queue()
        self._state = self._STATE_NORMAL
        self._state_lock = Lock()
        self._terminating_commands = (
                'notify_kunquat_exception', 'notify_libkunquat_error')

    def update(self):
        in_count = self._in.qsize()
        with self._state_lock:
            if self._state == self._STATE_FLUSHING:
                get_counter = repeat(True)
            else:
                get_counter = range(in_count)

        for _ in get_counter:
            try:
                command_data = self._in.get_nowait()
            except Empty:
                return

            command, _ = command_data
            if command in self._terminating_commands:
                # Make sure we won't block the UI before the terminating command is sent
                with self._state_lock:
                    self._state = self._STATE_FLUSHING

            self._out.put(command_data)

    def put(self, command, *args):
        with self._state_lock:
            is_state_normal = (self._state == self._STATE_NORMAL)
        if is_state_normal:
            self._in.put((command, args))

    def get(self):
        command_data = self._out.get_nowait()
        command, _ = command_data
        if command in self._terminating_commands:
            with self._state_lock:
                self._state = self._STATE_FINISHED
        return command_data

    def get_command_count(self):
        return self._out.qsize()
コード例 #9
0
ファイル: Stress.py プロジェクト: bigobject-inc/StressTest
def runTest(num_proc, query_num):
    print "Simulate concurrent user: {}".format(num_proc)

    jobs = []
    queue = Queue()
    try:
        for num in range(num_proc):      
            p = Process(target=runCmdWorker, args=(query_num, queue))
            jobs.append(p)
            p.start()

        for j in jobs:
            j.join()

            if j.exitcode != 0:
                    print '%s.exitcode = %s' % (j.name, j.exitcode)

    finally:
        total_avg = 0.0
        for i in range(queue.qsize()):
            result = queue.get()
            proc_avg = float(result.split(':')[1])
            total_avg = total_avg + proc_avg
            #print result

        print "average execution time: {:.8f}s".format(total_avg/num_proc)
        print 
コード例 #10
0
ファイル: test_cluster.py プロジェクト: fle-internal/django-q
def test_recycle(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    # override settings
    monkeypatch.setattr(Conf, 'RECYCLE', 2)
    monkeypatch.setattr(Conf, 'WORKERS', 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, broker=broker)
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    monkeypatch.setattr(Conf, 'SAVE_LIMIT', 1)
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
コード例 #11
0
ファイル: client.py プロジェクト: garrylachman/info-manip
def main():
  # Join hub.py 
  try:
    COMM = Communicator(MACHINEID, SERVER, "logAdGrabber")
    LOG.info('Joining hub as %s' % MACHINEID) 
    response = COMM.send_request({}, 'join')
  except Exception as e:
    LOG.error(e)
    return

  LOG.debug('Init data structs')
  pr = {}
  input_queue = Queue(maxsize=NUMADGRABBERS)

  # clean up adsFolder
  for f in os.listdir(ADSFOLDER):
    os.unlink(os.path.join(ADSFOLDER, f))

  for i in range(NUMADGRABBERS):
    if os.path.exists('%d.txt' % i):
      os.unlink('%d.txt' % i)
    LOG.info("Creating phantomjs process %d" % i)
    pr[i] = Process(target=spawn_workers, args=(i, input_queue, COMM))
    pr[i].start()
  
  while True :
    LOG.info('Telling hub I have %d idle slots' % (NUMADGRABBERS -
        input_queue.qsize()))
    job = COMM.send_request({}, 'idle')
    if not job:
      LOG.info("No jobs received") 
      sleep(5)
      continue
    else:
      input_queue.put(job)
コード例 #12
0
ファイル: test_cmds_logs.py プロジェクト: carriercomm/paasta
def test_scribe_tail_log_everything():
    env = "fake_env"
    stream_name = "fake_stream"
    service = "fake_service"
    levels = ["fake_level1", "fake_level2"]
    components = ["build", "deploy"]
    clusters = ["fake_cluster1", "fake_cluster2"]
    instance = "fake_instance"
    queue = Queue()
    filter_fn = mock.Mock(return_value=True)

    tailer = iter(
        [
            format_log_line(levels[0], clusters, instance, "build", "level: first. component: build."),
            format_log_line(levels[1], clusters, instance, "deploy", "level: second. component: deploy."),
        ]
    )
    with contextlib.nested(mock.patch("paasta_tools.cli.cmds.logs.scribereader", autospec=True)) as (
        mock_scribereader,
    ):
        mock_scribereader.get_env_scribe_host.return_value = {"host": "fake_host", "port": "fake_port"}
        mock_scribereader.get_stream_tailer.return_value = tailer
        logs.scribe_tail(env, stream_name, service, levels, components, clusters, queue, filter_fn)
        assert mock_scribereader.get_env_scribe_host.call_count == 1
        mock_scribereader.get_stream_tailer.assert_called_once_with(stream_name, "fake_host", "fake_port")
        assert queue.qsize() == 2
        # Sadly, fetching with a timeout seems to be needed with
        # multiprocessing.Queue (this was not the case with Queue.Queue). It
        # failed 8/10 times with a get_nowait() vs 0/10 times with a 0.1s
        # timeout.
        first_line = queue.get(True, 0.1)
        assert "level: first. component: build." in first_line
        second_line = queue.get(True, 0.1)
        assert "level: second. component: deploy." in second_line
コード例 #13
0
ファイル: tracerbull.py プロジェクト: atbrox/tracerbull
    def start_services(services, codegen, importcode, tornadoapp, forker, boot_function, template_path="."):

        # loop through all services
        # create hosts file
        # update kill-file for them (processes)
        # create all files (websocket server, and js/python client files)
        host_file = {}
        kill_file = {}
        queue = Queue()
        for service in services:
            websocket_server_code = codegen(
                service, "websocket_server_template.tpl", loader=template.Loader(template_path)
            )
            websocket_server_module = importcode(websocket_server_code)
            websocket_server_class_name = "%s_websocket" % (service["servicename"])
            websocket_server_application = tornadoapp(
                [(r"/", getattr(websocket_server_module, websocket_server_class_name))]
            )
            websocket_server_process = forker(
                0, queue, boot_function, websocket_server_application, service["servicename"], 0, service
            )
            print websocket_server_process
            print "WSS, ", queue.qsize()

        return queue
コード例 #14
0
class QueueEventsSub:

    def __init__(self, maxsize=0):
        self._maxsize = maxsize
        self._q = Queue(maxsize=maxsize)

    def put_event(self, e):
        if self._q.qsize() == self._maxsize and self._maxsize != 0:
            self._q.get_nowait()

        self._q.put(e)

    def get_event(self):
        if self._q.qsize() == 0:
            return None
        return self._q.get()
コード例 #15
0
 def execute(self):
     """
     Executing every forest in collection, activating their networks.
     By the way collecting data about best fitness function.
     """
     process_list = []
     forests_queue = Queue(self.power)
     iterational = 0
     print '| |-starting evaluation, training and validation'
     for one_forest in self._forests:
         process_list.append(
             Process(target=main_async_method,
                     args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings)))
         iterational += 1
     for proc in process_list:
         proc.start()
     for proc in process_list:
         proc.join()
     for smth in range(forests_queue.qsize()):
         tmp = forests_queue.get()
         self._forests[tmp['place']].fitness = tmp['fitness']
     fitness_summ = sum(map(lambda forest: forest.fitness, self._forests))
     fss = map(lambda x: x.fitness, self._forests)
     print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss)
     self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
コード例 #16
0
ファイル: THU_LSI.py プロジェクト: multiangle/PyNLP
def genDict(path_parent, path_dict_folder):
    # 第一次遍历,成立词典,获取词频,文频等信息
    p_pool = []
    dict_queue = Queue()
    p_num = 3
    for i in range(p_num):
        p = Process(target=generate_dict_subprocess,
                    args=(i,
                          p_num,
                          dict_queue,
                          cut_all,
                          # '/mnt/D/multiangle/DataSet/THUCNews'
                          path_parent
                          ))
        p_pool.append(p)

    for p in p_pool: # 启动进程
        # p = Process(p)
        p.start()

    while True: # 检测是否全部完成
        if dict_queue.qsize() >= p_num:
            break
        time.sleep(1)

    dictionary = corpora.Dictionary()
    for i in range(p_num):
        q_dict = dict_queue.get()
        dictionary.merge_with(q_dict)

    for p in p_pool:
        p.terminate()

    dictionary.save(os.path.join(path_dict_folder,'THUNews.dict'))
    dictionary.save_as_text(os.path.join(path_dict_folder,'THUNews.txt'))
コード例 #17
0
ファイル: python-multiprocessing.py プロジェクト: Akanoa/PRI
class MyOVBox(OVBox):
	def __init__(self):
		OVBox.__init__(self)
		self.p = None
		self.q = None
	
	def f(self, queue):
		while True:
			queue.put('hello')
			time.sleep(1)
	
	def initialize(self):
		print "process initialize!"
		self.q = Queue()
		self.p = Process(target=self.f, args=(self.q,))
		self.p.start()
	
	def process(self):
		for i in range(self.q.qsize()):
			print self.q.get()
	
	def uninitialize(self):
		print "process uninitialize!"
		self.p.terminate()
		self.p.join()				
コード例 #18
0
ファイル: pronew.py プロジェクト: naiaden/VacCor
	def run(self):
                program_start = time.time()

		wQ = Queue()
                wQ.cancel_join_thread()
		wP = []
		
		for i in range(tp.threads):
			p = Process(target=tp.process_file_write, args=(i,wQ,))
			wP.append(p)
			p.start()

                # 10000 files enter here
                sys.stdout.write(Fore.GREEN + "Reading from stdin...")
                input_files = sys.stdin.readlines()
                for input_file in input_files:
                        wQ.put(input_file.rstrip())
                print Fore.GREEN + "\rDone reading from stdin. I found %d files." % (wQ.qsize())

		for _ in wP:
			wQ.put(None)

		for p in wP:
			p.join()

                program_stop = time.time()
		print Fore.GREEN + "Processed %d files in %f seconds (%fs avg)" % (len(input_files), program_stop-program_start, (program_stop-program_start)/len(input_files))
コード例 #19
0
ファイル: db_load.py プロジェクト: mobone/screener
def load_prices_to_db():
    date='2999-10-10'
    try:
        result = cur.execute('select Date from price_data order by date desc limit 1')
        date = result.fetchone()[0]
    except:
        print 'Database query failed'
    now = time.strftime("%Y-%m-%d")
    
    if date>now or True:
        print 'Updating Prices'
        try:
            result = cur.execute('DELETE from price_data')
            con.commit()
        except Exception as e:
            print e
        result = cur.execute('SELECT ticker from screens group by ticker')
        tickers = result.fetchall()
        tick_count = 0
        ticker_queue = Queue()
        for ticker in tickers:
            ticker_queue.put(str(ticker[0]))
        for i in range(cpu_count()*2):
            p = Process(target = get_prices, args = (ticker_queue,))
            p.start()
        while ticker_queue.qsize()>0:
            sleep(1)
コード例 #20
0
ファイル: httpload.py プロジェクト: 386/httploadtest
def TestMultiUserProcess(filenames,threadnums,T):

    allnums=sum(threadnums)
    eachnums=allnums/Global.CPUs
    actLists=[]
    for i in range(len(threadnums),1,-1):
        threadnums[i-1]=sum(threadnums[0:i])
    for i in range(0,len(filenames)):
        actList=[]
        for line in open(filenames[i]):
            words=line.split("||")
            temp=[]
            for w in words:
                temp.append(w)
            actList.append(temp)
        actLists.append(actList)
    del Global.result[:]
    Q=Queue(Global.CPUs)
    lock = multiprocessing.Lock()
    for i in range(0,Global.CPUs):
        p = multiprocessing.Process(target = MultiUserTest, args = (Q,lock,eachnums,threadnums,actLists,T,i,))
        p.start()
    st=time.time()
    while True:
        time.sleep(1)
        if Q.qsize()==Global.CPUs:
            break
    for i in range(0,Global.CPUs):
        Global.result.extend(Q.get(i))
コード例 #21
0
ファイル: proxy.py プロジェクト: eastonqiu/proxy
    def ValidateProxies(self, proxyList):
            
        maxProc = 50
        
        tests = ["http://www.baidu.com"]
    
        result = Queue()
       
        start = time.clock()
        
        for i in proxyList:
            p = Process(target=self.CheckProxy, args=(i, tests, result))
            p.start()  
            
            if len(multiprocessing.active_children()) > maxProc:
                #print('active_children: ', multiprocessing.active_children())
                p.join()
            
        while len(multiprocessing.active_children()) > 0:
            time.sleep(3)
        end = time.clock()
        #print("total time for validation:", end - start, "s")
        
        self.pool = []
        
        for i in range(result.qsize()):
            a = result.get()
            self.pool += [Proxy(a[0], a[1])]

        
        print("{0} validated".format(len(self.pool)))
コード例 #22
0
ファイル: test_cluster.py プロジェクト: sebasmagri/django-q
def test_recycle(r):
    # set up the Sentinel
    list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    start_event = Event()
    stop_event = Event()
    # override settings
    Conf.RECYCLE = 2
    Conf.WORKERS = 1
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, list_key=list_key)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    Conf.SAVE_LIMIT = 1
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    r.delete(list_key)
コード例 #23
0
def crunch(file_name, ext_type, handler, pool_size=4, queue_size=40,
           limit=None):

    print 'Crunching file: %s, limit: %s' % (file_name, limit)

    q = JoinableQueue(queue_size)
    q_feats = Queue()

    pool = Pool(pool_size, wrap_handler(handler), ((q, q_feats),))

    with file_reader(file_name) as reader:
        idx = 0
        for entry in reader:

            if (entry.pathname.find(ext_type) != -1):
                text = [b for b in entry.get_blocks()]
                key = entry.pathname.split('/')[-1].split('.')[0]

                q.put((key, text), True)
                idx += 1

                print 'Processing:', entry.pathname, idx

                if limit and idx >= limit:
                    print 'Reached the limit'
                    break

        q.close()
        q.join()
        pool.close()

    result = []
    for i in range(q_feats.qsize()):
        result.append(q_feats.get())
    return result
コード例 #24
0
ファイル: Quu.py プロジェクト: vergiliu/ps
class Quu(): #Singleton
    def __init__(self):
        self.queue = Queue()
        logger.debug('new queue')

    def addFolders(self, aLeftFolder, aRightFolder):
        logger.debug('adding new item in the quu')
        myComparator = FolderComparator(aLeftFolder, aRightFolder)
        myComparator.setSyncType("keepboth")
        self.queue.put(myComparator)

    def getQuu(self):
        return self.queue

    def getNext(self):
        """
        @return FolderComparator the folder
        """
        try:
            return self.queue.get_nowait()
        except BaseException:
            return None

    def getSize(self):
        return self.queue.qsize()
コード例 #25
0
ファイル: test_scheduler.py プロジェクト: nickpolet/django-q
def test_scheduler(r):
    list_key = 'scheduler_test:q'
    r.delete(list_key)
    schedule = create_schedule('math.copysign',
                               1, -1,
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # run scheduler
    scheduler(list_key=list_key)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    assert task_queue.qsize() == 1
    assert r.llen(list_key) == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    for t in Schedule.TYPE:
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result'
                                   )
        assert schedule is not None
        assert schedule.last_run() is None
    scheduler()
コード例 #26
0
ファイル: ng.py プロジェクト: ArturFis/grab
def start_spider(spider_cls):
    try:
        result_queue = Queue()
        network_response_queue = Queue()
        shutdown_event = Event()
        generator_done_event = Event()
        taskq = QueueBackend('ng')

        #from grab.spider.base import logger_verbose
        #logger_verbose.setLevel(logging.DEBUG)

        kwargs = {
            'taskq': taskq,
            'result_queue': result_queue,
            'network_response_queue': network_response_queue,
            'shutdown_event': shutdown_event,
            'generator_done_event': generator_done_event,
            'ng': True,
        }

        # Generator: OK
        generator_waiting_shutdown_event = Event()
        bot = spider_cls(waiting_shutdown_event=generator_waiting_shutdown_event, **kwargs)
        generator = Process(target=bot.run_generator)
        generator.start()

        # Downloader: OK
        downloader_waiting_shutdown_event = Event()
        bot = spider_cls(waiting_shutdown_event=downloader_waiting_shutdown_event,
                         **kwargs)
        downloader = Process(target=bot.run)
        downloader.start()

        # Parser: OK
        events = []
        for x in xrange(2):
            parser_waiting_shutdown_event = Event()
            events.append(parser_waiting_shutdown_event)
            bot = spider_cls(waiting_shutdown_event=parser_waiting_shutdown_event,
                             **kwargs)
            parser = Process(target=bot.run_parser)
            parser.start()

        while True:
            time.sleep(2)
            print('task size', taskq.size())
            print('response size', network_response_queue.qsize())
            if (downloader_waiting_shutdown_event.is_set() and
                all(x.is_set() for x in events)):
                shutdown_event.set()
                break

        time.sleep(1)

        print('done')
    finally:
        for child in active_children():
            logging.debug('Killing child process (pid=%d)' % child.pid)
            child.terminate()
コード例 #27
0
class BroadFirstGenerator(Process):
    def __init__(self, config_path):
        Process.__init__(self)
        self.config = BroadFirstConfig(config_path)
        self.data = Queue()
        self.x = self.config.x
        self.y = self.config.y
        self.stop_produce = False
        self.stop_consume = False
        self.total = self.config.getTotalTileNum()
        self.count = 0
        
    def run(self):
        init = True
        while True and not (self.stop_produce):
            dl = self.data.qsize()
            if dl <= self.config.MAX_QUEUE/2 and not self.stop_produce:
                for _ in range(self.config.MAX_QUEUE/4):
                    if not self.getNextXY():
                        self.stop_produce = True
                        break
                    else:
                        self.data.put((self.x, self.y, self.config.min_z))
            if init:
                init = False
                num_consumers = multiprocessing.cpu_count() * 2
#                 num_consumers = 2
                consumers = [BroadFirstDownloader(self.data, self.config) for _ in range(num_consumers)]
                for consumer in consumers:
                    consumer.start()
            if self.data.empty():
                break
            time.sleep(60)
                        
    def getNextXY(self):
#         logger = multiprocessing.get_logger()
        if self.config.debug:
            if self.count >= self.config.debugTryTimes:
                self.x = None
                self.y = None
                return False
        self.count = self.count + 1
        if self.count % 1 == 0 and self.x != None and self.y != None:
#             process = self.config.getProcess(self.x, self.y, self.data.qsize())
#             print '%s: Proceed:%f/100' % (time.ctime(), process)
#             logger.debug('%s: Proceed:%f/100' % (time.ctime(), process))
            self.config.updateState(self.x, self.y)
        if self.y < self.config.max_y:
            if self.x < self.config.max_x:
                self.x = self.x + 1
            else:
                self.x = self.min_x
                self.y = self.y + 1
            return True
        else:
            self.x = None
            self.y = None
            return False
コード例 #28
0
ファイル: spider_ng.py プロジェクト: 31H0B1eV/grab
def start_spider(spider_cls):
    try:
        result_queue = Queue()
        response_queue = Queue()
        shutdown_event = Event()
        generator_done_event = Event()
        taskq = Queue()

        args = (taskq, result_queue, response_queue, shutdown_event, generator_done_event)
        kwargs = dict(verbose_logging=True)

        generator_wating_shutdown_event = Event()
        generator_cls = build_generator_spider(spider_cls)
        generator = create_process(generator_cls, generator_wating_shutdown_event,
                                   *args, **kwargs)
        generator.start()

        downloader_wating_shutdown_event = Event()
        downloader = create_process(BaseDownloaderSpider, downloader_wating_shutdown_event,
                                    *args, **kwargs)
        downloader.start()

        parser_wating_shutdown_event = Event()
        parser_cls = build_parser_spider(spider_cls)
        parser = create_process(parser_cls, parser_wating_shutdown_event,
                                *args, **kwargs)
        parser.start()

        while True:
            time.sleep(2)
            print 'task size', taskq.qsize()
            print 'response size', response_queue.qsize()
            if (downloader_wating_shutdown_event.is_set() and
                parser_wating_shutdown_event.is_set()):
                shutdown_event.set()
                break

        time.sleep(1)

        print 'done'
    finally:
        for child in active_children():
            logging.debug('Killing child process (pid=%d)' % child.pid)
            child.terminate()
コード例 #29
0
ファイル: vcf_to_matrix.py プロジェクト: TGenNorth/NASP
def parse_input_files(input_files, num_threads, genomes, min_coverage, min_proportion):
    """
    Use a pool of worker threads to, in parallel, read in the input files.
    Populate the genome collection with the read-in data.
    This is the "poison pill" thread management algorithm, where threads are
    each given a "you can stop now" task once the actual queue of tasks is
    complete.
    """
    # Lines below marked "Single-thread" can be uncommented and replace
    # the lines marked "Multi-thread" to get single-thread behavior.
    from multiprocessing import Process, Queue  # Multi-thread
    # from queue import Queue  # Single-thread
    from time import sleep

    input_q = Queue()
    output_q = Queue()
    for input_file in input_files:
        input_q.put(input_file)
    # If the number of jobs is already smaller than the thread pool...
    if num_threads > input_q.qsize():
        num_threads = input_q.qsize()
    sleep(1)
    thread_list = []
    for current_thread in range(num_threads):
        input_q.put(None)
        current_thread = Process(target=manage_input_thread,
                                 args=[genomes.reference(), min_coverage, min_proportion, input_q,
                                       output_q])  # Multi-thread
        current_thread.start()  # Multi-thread
        #manage_input_thread( genomes.reference(), min_coverage, min_proportion, input_q, output_q )  # Single-thread
        thread_list.append(current_thread)  # Multi-thread
    sleep(1)
    while num_threads > 0:
        new_genome = output_q.get()
        if new_genome is None:
            num_threads -= 1
        elif isinstance(new_genome, str):
            # Reading this file in failed.  We only know the filename.
            genomes.add_failed_genome(new_genome)
        else:
            genomes.add_genome(new_genome)
    sleep(1)
    for current_thread in thread_list:  # Multi-thread
        current_thread.join()  # Multi-thread
コード例 #30
0
ファイル: brokest.py プロジェクト: chiwhalee/brokest
def server_discover(servers=None, exclude_patterns=None, querry_timeout=5, qsize=8, info=0): 
    """
        issue: todo: use multiprocessing instead of thrading may faster, 
            qsize should be tuned to a better value 
    """
    
    #if servers is None: 
    if servers is None: 
        servers= []
    if servers== 'all': 
        servers= (
                ['node%d'%i for i in range(1, 100)]  #8cpu, normal, mem48, mem96
            +   ['localhost', 'qtg7501']
                )
    
    print('run server_discover ... ') 
    
    res = []
    msg = {'header': 'querry', 'what': 'is_exists'}

    if 0:  # serial  
        for s in servers: 
            rep=send_msg(msg, server=s, querry_timeout=querry_timeout, info=info)
            if rep == 'Y' : 
                res.append(s)
    else: 
        #qsize = 8   #this could not be too large, or else error may occor 
        from Queue import Queue 
        qq = Queue(qsize)
        def func(s): 
            try: 
                rep=send_msg(msg, server=s, querry_timeout=querry_timeout, info=info)
            except Exception as err: 
                rep = str(err)
                #rep = 'error'
            qq.put((s, rep))
        n = len(servers)/qsize  + 1 
        for i in range(n) : 
            tt = []
            for s in servers[i*qsize: i*qsize + qsize]: 
                t = threading.Thread(target=func, args=(s, ))
                t.start()
                tt.append(t)
                
            for t in tt: 
                t.join()
            temp = [qq.get() for a in range(qq.qsize())]
            #print_vars(vars(),  ['i, temp'])
            res.extend(temp)
        res = [r[0] for r in res if r[1] == 'Y' ]
    if exclude_patterns is not None: 
        for e in exclude_patterns: 
            res= [r for r in res if e not in r]
            
    print('servers found are:\n\t %s'%(res, )) 
    return res 
コード例 #31
0
class worker(api):
    def __init__(self, workdir, n_threads=0):
        self.n_threads = cpu_count() if n_threads == 0 else n_threads
        self.process_list = []
        self.workdir = workdir

        self.work_queue = Queue()
        self.update_queues = []
        self.testcase_report = Queue()

        self.coverage = dict()
        self.testcases = []
        self.executed_testcases = Value('i', 0)

        self.mutator = mutator()

        if not os.path.exists(workdir):
            os.makedirs(workdir)
        os.chdir(workdir)

    def run(self, n_testcases=0):
        #cleanup workdir
        for fname in glob.glob("*sancov") + glob.glob("t-*"):
            os.remove(fname)

        self.work_queue.put(self.mutator.initial_testcase())
        self.executor = executor(self.cmd, ".")

        #start worker threads
        self.update_queues = []
        for i in xrange(self.n_threads):
            self.update_queues.append(Queue())
            d = Process(target=self.worker, args=(i, ))
            d.daemon = True
            d.start()
            self.process_list += [d]

        self.client(n_testcases)

    def apply_update(self, update):
        if len(update["testcase_update"]) > 0:
            for t in update["testcase_update"]:
                pid = t["parent_id"]
                self.testcases += [t]
                if t["id"] > 0 and t["id"] < len(self.testcases):
                    self.testcases[pid]["childs"] += [t["id"]]

            self.mutator.random_merge_cache = {}

        if "coverage_update" in update:
            for k in update["coverage_update"].keys():
                if k not in self.coverage:
                    self.coverage[k] = set()
                self.coverage[k].update(set(update["coverage_update"][k]))

        self.crash = update["crash"]

    #execute testcases and process results
    def worker(self, worker_id):
        print "worker started"
        self.worker_id = worker_id
        while True:
            for mutated in self.get_testcases():
                self.execute_testcase(mutated)

    def execute_testcase(self, testcase):
        self.executed_testcases.value += 1
        try:
            data = self.mutator.mutate_seed(testcase, self.seed_data)
            stderr, crash, coverage = self.executor.call(data, self.ext)
            if len(coverage) > 0:
                self.process_result(testcase, stderr, crash, coverage, data)
        except:
            import traceback
            traceback.print_exc()
            pass

    #detect new edges/crashes and appends to report queues
    def process_result(self, testcase, stderr, crash, coverage, binary):
        testcase["coverage"] = (coverage)
        testcase["bin"] = b64encode(binary)

        #detect new crash
        if crash and crash not in self.crash:
            print "New crash %s" % crash
            testcase["crash"] = crash
            testcase["stderr"] = stderr
            self.testcase_report.put(testcase)
            return

        #found new blocks, requeue to remove unused mutations
        if "minimize" not in testcase:
            new_blocks = self.compute_new_blocks(coverage)

            if new_blocks > 0:
                #remove unused mutations
                testcase["new_blocks"] = new_blocks
                minimize = {}
                minimize["i"] = 0
                minimize["reference"] = deepcopy(testcase)
                testcase["minimize"] = minimize
                self.work_queue.put(testcase)

        #remove unused mutations
        else:
            i = testcase["minimize"]["i"]
            reference = testcase["minimize"]["reference"]

            #test if testcase covered equal or more blocks
            ge = False
            if not crash:
                new_blocks = self.compute_new_blocks(coverage)

                testcase["new_blocks"] = new_blocks
                if new_blocks >= reference["new_blocks"]:
                    ge = True

            #done
            if i >= len(testcase["mutations"]) - 1:
                if not ge:
                    testcase = reference

                if "minimize" in testcase: del testcase["minimize"]

                if "random-merge" not in testcase["description"] and len(
                        testcase["mutations"]) > 0:
                    state = testcase["mutations"][-1]
                    testcase["description"] = (
                        "offset=%d: " % state["offset"]) + state["description"]

                print "Minimized testcase: New blocks: %d Parent: %d Description: %s " % (
                    testcase["new_blocks"], testcase["id"],
                    testcase["description"]),
                print "Mutations: %d" % len(testcase["mutations"]),
                print "Report Queue: %d" % self.testcase_report.qsize()
                new_blocks = self.compute_new_blocks(coverage)
                self.testcase_report.put(testcase)
                return

            #mutation was unused
            if ge:
                #print "%d unused" % i
                del testcase["minimize"]["reference"]
                testcase["minimize"]["reference"] = deepcopy(testcase)
                testcase["minimize"]["i"] = i

            else:
                #print "%d used" % i
                testcase["minimize"]["i"] = i + 1

            testcase["mutations"] = reference["mutations"][:i] + reference[
                "mutations"][i + 1:]

            del testcase["coverage"]
            self.work_queue.put(testcase)

    #genetic methods
    def get_testcases(self):
        while True:
            #apply updates
            try:
                update = self.update_queues[self.worker_id].get(False)
                self.apply_update(update)
            except:
                pass

            #initial and deterministic testcases
            try:
                while True:
                    testcase = self.work_queue.get(False)
                    yield testcase
            except:
                pass

            #choose
            if len(self.testcases) == 0: continue
            s = reduce(lambda x, y: x + y["new_blocks"], self.testcases, 0)

            r = randrange(s)
            for t in sorted(self.testcases,
                            key=lambda t: t["new_blocks"],
                            reverse=True):
                r -= t["new_blocks"]
                if r < 0:
                    break
            tid = self.testcases.index(t)

            merged = self.mutator.random_merge(self.testcases, tid)
            if merged: yield merged

            #get
            testcase = self.testcases[tid]

            #mutate
            if tid == 0:
                mutated = self.mutator.random_mutation(testcase, maximum=8)
            else:
                mutated = self.mutator.random_mutation(testcase, maximum=8)
            mutated["parent_id"] = tid
            yield mutated

    def stop(self):
        try:
            self.executor.watchDog.exit()
        except:
            pass
        for p in self.process_list:
            print "killing", p.pid
            os.kill(p.pid, 9)
        self.process_list = []
        self.executed_testcases.value = 0

    def crash_fuzz(self, files):
        self.addrs = []
        self.crashes = []

        for fname in files:
            with open(fname, "r") as f:
                testcase = json.loads(f.read())
                testcase["description"] = ""

            stderr, crash, coverage = self.callback(self, testcase)
            self.crash_fuzz_process_crash(stderr, testcase)

        #fuzz crash
        while True:
            testcase = choice(self.crashes)
            mutated = deepcopy(testcase)
            mutated["mutators"]["data"] = self.mutator.get_random_mutations(
                testcase["mutators"]["data"],
                maximum=1)  #, mutations=[3]) ##, start=711-16, stop=711+16)
            mutated["mutators"]["data"] = self.mutator.get_random_mutations(
                testcase["mutators"]["data"],
                maximum=1,
                mutations=[3],
                start=0,
                stop=0)
            stderr, crash, coverage = self.callback(self, mutated)
            self.crash_fuzz_process_crash(stderr, mutated)

    def crash_fuzz_process_crash(self, stderr, testcase):
        for line in stderr.split("\n"):
            if "ERROR: AddressSanitizer:" in line:
                try:
                    addr = re.findall("on [a-z ]*address 0x[0-9a-f]*", line)[0]
                    addr = re.findall("0x[0-9a-f]*", addr)[0]
                    crash = re.findall("pc 0x[0-9a-f]*", line)[0]
                    addr = "%s-%s" % (crash, addr)
                    if addr not in self.addrs:
                        if "READ" in stderr:
                            cause = "READ"
                        elif "WRITE" in stderr:
                            cause = "WRITE"
                        else:
                            cause = "OTHER"

                        stderr, crash, coverage = self.callback(
                            self,
                            testcase,
                            dumpfile="crashFuzz-%s-%s.bin" % (addr, cause))
                        save_data("crashFuzz-%s-%s.stderr" % (addr, cause),
                                  stderr)
                        print cause, addr, testcase["description"]
                        self.addrs += [addr]
                        self.crashes += [testcase]
                except:
                    #print stderr
                    import traceback
                    traceback.print_exc()
            i += 1
コード例 #32
0
ファイル: multistreamcache.py プロジェクト: NullspaceSF/AAS
class MultistreamCache():
    '''
    Input sample cache that employs a set of worker threads which collect new input samples from files to insert into the cache.
    Can produce sample batches by randomly selecting items from cache.
    Ensures at least a certain number of items are refreshed after each new batch is generated.
    '''
    def __init__(
        self,
        worker_method,
        worker_options,  # replace at least this many entries on each cache update. Can be fractional
        alpha_smoother=0.99
    ):  # the higher the more temporally smoothed is the average_replacement_rate. Not very important

        self.num_workers = worker_options["num_workers"]
        self.worker_method = worker_method
        self.worker_options = worker_options
        self.cache_size = worker_options["cache_size"]
        self.min_replacement_rate = worker_options["min_replacement_rate"]
        self.alpha_smoother = alpha_smoother

        # Internal Data Structures
        self.communication_queue = Queue(maxsize=150)  #TODO  hardcoded for now
        self.worker_handles = []
        self.cache = [None] * self.cache_size
        self.idx_next_item_to_be_updated = 0
        self.average_replacement_rate = self.min_replacement_rate
        self.exit_flag = Event()
        self.exit_flag.clear()
        self.counter_cache_items_updated = 0

        # call seed if this is used from different threads / processes
        seed()

    def start_workers(self):
        for k in range(self.num_workers):
            p = Process(target=self.worker_method,
                        args=(self.communication_queue, self.exit_flag,
                              self.worker_options))
            #print(str(self.worker_options))
            print('in start_workers: ', str(k))
            p.start()
            self.worker_handles.append(p)

        # Fill cache
        print('----- Filling cache (Size: {}) -------'.format(self.cache_size))
        for k in range(self.cache_size):
            try:
                data = self.communication_queue.get(timeout=10)
                self.update_next_cache_item(data)
            except Empty as error:
                print('Timeout: {}'.format(str(error)))
                print('qsize: ' + str(self.communication_queue.qsize()))
                # print(str(self.cache[self.idx_next_item_to_be_updated - 1]))

        print('----- Cache Filled -------')

        # We reset the update counter when starting the workers
        self.counter_cache_items_updated = 0

    def stop_workers(self):
        # We just kill them assuming there is nothing to be shut down properly.
        # This is somewhat brutal but simplifies things a lot and is enough for now
        self.exit_flag.set()
        for worker in self.worker_handles:
            worker.join(timeout=5)
            worker.terminate()  # try harder to kill it off if necessary

    def update_next_cache_item(self, data):
        self.cache[self.idx_next_item_to_be_updated] = data
        self.idx_next_item_to_be_updated = (self.idx_next_item_to_be_updated +
                                            1) % self.cache_size
        self.counter_cache_items_updated += 1

    def update_cache_from_queue(self):

        # Implements a minimum update rate in terms of an average
        # number of items that have to be replaced in a call to this
        # function. If the average is not achieved, this functions
        # blocks until the required number of items are replaced in the
        # cache.

        num_replacements_current = 0
        average_replacement_rate_prev = self.average_replacement_rate

        while True:
            average_replacement_rate_updated = (
                1 - self.alpha_smoother
            ) * num_replacements_current + self.alpha_smoother * average_replacement_rate_prev

            if (average_replacement_rate_updated >= self.min_replacement_rate):
                break
            if num_replacements_current == self.cache_size:  # entire cache replaced? Your IO is super fast!
                break

            #print('Loading new item into cache from data list starting with ' + self.worker_options["file_list"][0][0].path)
            self.update_next_cache_item(self.communication_queue.get())
            num_replacements_current += 1
            print('num_replacements_current: ' + str(num_replacements_current))
        # Final update of self.num_replacements_smoothed
        self.average_replacement_rate = average_replacement_rate_updated
        print('ave_replace_rate: ' + str(self.average_replacement_rate))

    def get_cache_item(self, idx):
        return self.cache[idx]

    def set_cache_item(self, idx, item):
        self.cache[idx] = item
コード例 #33
0
    # nproc is the number of parallel processes to initiate (each process will
    # take jobs from the inputq until it is empty)
    processes = [
        Process(target=remote_command, args=(inputq, outputq))
        for i in range(nproc)
    ]
    for p in processes:
        p.start()
    for p in processes:
        #sys.stderr.write( str(p.pid) + "\n" )
        p.join()

    #print ("outputq.size:", outputq.qsize())
    #print ("inputq.size:", inputq.qsize())
    while outputq.qsize() > 0:
        machine, data_area, output = outputq.get()
        #print (machine, data_area)
        #print ("output", output)
        if machine not in diskreport.keys():
            diskreport[machine] = dict()
        if data_area not in diskreport[machine].keys():
            diskreport[machine][data_area] = dict()
        if disk_query == "du":
            #diskreport[machine][data_area]["du"] = (output.split("\n"))
            diskreport[machine][data_area]["du"] = []
            for directory in output.split(b"\n")[0:-1]:
                du_output = directory.split()
                du_output[0] = int(du_output[0])
                diskreport[machine][data_area]["du"].append(du_output)
        elif disk_query == "df":
print(q.full())  # 还只放2条消息,没有满False

q.put("消息3")

print(q.full()) #True

# 因为消息列队已满下面的try都会抛出异常,
# 第一个try会等待2秒后再抛出异常,第二个Try会立刻抛出异常
# put函数有一个参数block默认为True,如果已经没有空间可写入,
# 此时程序将被阻塞(停在写入状态),直到从消息列队腾出空间为止,
# 如果设置了timeout,则会等待timeout秒,若还没空间,则抛出"Queue.Full"异常;
# 因此使用try函数,消息满了,超时2秒,然后执行下面的except语句
try:
    q.put("消息4",True,2)
except:
    print("消息列队已满,现有消息数量:%s" % q.qsize())

try:
# 如果block值为False,消息列队如果没有空间可写入,则会立刻抛出"Queue.Full"异常
# Queue.put_nowait(item):相当Queue.put(item, False)
# 立即抛出异常
    q.put_nowait("消息4")
except:
    print("消息列队已满,现有消息数量:%s" % q.qsize())



#推荐的方式,先判断消息列队是否已满,再写入
if not q.full():
    q.put_nowait("消息4")
コード例 #35
0
    except Exception as e:
        print 'Could not start server.'
        printv(e,2)
        for thr in threadPool:
            thr.terminate()
        sys.exit(0)
            
    while True:  #Master loop
        try:
            conn, addr = s.accept()
            printv('[Master] New connection from'+str(addr),3)
            if q.empty() and len(threadPool)>minThreadNum:
                if threadStatus[-1]==False:
                    threadPool[-1].terminate()
                    threadPool.pop()
                    threadStatus.pop()
                    printv('Killed thread. New thread count : '+str(len(threadPool)),2)
            if q.qsize()>2 and len(threadPool)<maxThreadNum:
                threadStatus.append(False)
                p=Process(target=serverProcess, args=(q,len(threadPool),threadStatus,))
                threadPool.append(p)
                p.start()
                printv('Started thread. New thread count : '+str(len(threadPool)),2)
            q.put(forking_dumps([conn,addr]))
            conn.close()
        except KeyboardInterrupt:
            printv('Server stopped from keyboard',1)
            sys.exit(0)
        except socket.timeout:
            pass
コード例 #36
0
def startTicker(q):

    logging.info(
        'starting threaded websocket connection, loading initial state')

    # Initialise
    global access_token_A
    access_token_A = ReadAccessTokenFile(access_token_A_file)
    #global access_token_B
    #access_token_B=ReadAccessTokenFile(access_token_B_file)
    global kws
    kws = KiteTicker(api_key_A, access_token_A)

    #setting queue for DB update
    qDB = Queue()

    #put df in queue
    #jobReconnect()
    global df
    #if oracle_db.dialect.has_table(oracle_db, 's3stockmaster'):
    #    df=jobImportDB()
    #    logging.info('putting db imported dataframe in multiprocessing queue on RAM')
    #    q.put(df)
    #else:
    #    pass
    #    print('does not exist')

    qDB.put(1)

    kws.on_ticks = on_ticks
    kws.on_connect = on_connect
    kws.on_close = on_close
    kws.connect(threaded=True)

    flagMargin = False
    flagStartOver = False

    counterStockTick = 0

    df['profit'] = 0
    df['rank'] = 0
    df['rank2'] = 0
    df['loser'] = 0
    df['skip'] = 0
    df['buy'] = 0
    df['sell'] = 0
    df['high'] = 0
    df['low'] = 0
    #t = timer()

    global df_original
    df_original = df

    while True:

        #index and make changes in a copy of df called df_calc
        df_calc = df
        df_calc = df_calc.set_index('instrument_token')

        #traverse dataframe
        for index, row in df_calc.iterrows():

            #set base buy and sell quantity
            if df_calc.loc[index, 'action'] == 'BaseSet':
                pass

#identify the stock to be traded
            elif df_calc.loc[index, 'action'] == 'StockSelect':

                if df_calc.loc[index,
                               'rank'] == 1 and df_calc.loc[index, 'v'] >= 2:
                    df_calc.at[index, 'qty'] = math.floor(
                        344 / df_calc.loc[index, 'ltp'] *
                        df_calc.loc[index, 'x'])
                    df_calc.at[index, 'action'] = 'Start'
                elif df_calc.loc[index,
                                 'rank'] == 1 and df_calc.loc[index, 'v'] < 2:
                    flagStartOver = True

#place order on zerodha for the selected trade
            elif df_calc.loc[index, 'action'] == 'Start':

                if skip_kite == False:
                    if direction_switch == True:
                        df_calc.at[index, 'id_a_sell'] = placeMarketOrder(
                            api_key_A, access_token_A, 'SELL',
                            df_calc.loc[index, 'zid'], df_calc.loc[index,
                                                                   'qty'])
                        df_calc.at[index,
                                   'sl'] = 1.003 * df_calc.loc[index, 'ltp']
                    else:
                        df_calc.at[index, 'id_a_buy'] = placeMarketOrder(
                            api_key_A, access_token_A, 'BUY',
                            df_calc.loc[index, 'zid'], df_calc.loc[index,
                                                                   'qty'])
                        df_calc.at[index,
                                   'sl'] = 0.997 * df_calc.loc[index, 'ltp']

                    df_calc.at[index, 'action'] = 'VerifyStart'
                df_calc.at[index, 'pExec'] = df_calc.loc[index, 'ltp']

#verify if the initial zerodha order is completed
            elif df_calc.loc[index, 'action'] == 'VerifyStart':

                if skip_kite == False:
                    if direction_switch == True:
                        status = checkExecutionStatus(
                            api_key_A, access_token_A,
                            df_calc.loc[index, 'id_a_sell'])
                        if status == "COMPLETE":
                            df_calc.at[index, 'id_a_buy'] = placeLimitOrder(
                                api_key_A, access_token_A, 'BUY',
                                df_calc.loc[index, 'zid'], df_calc.loc[index,
                                                                       'qty'],
                                round(0.997 * df_calc.loc[index, 'ltp'], 1))
                            df_calc.at[index, 'action'] = 'RunTrade'
                        elif status == "REJECTED":
                            df_calc.at[index, 'action'] = 'FatalError'
                    else:
                        status = checkExecutionStatus(
                            api_key_A, access_token_A, df_calc.loc[index,
                                                                   'id_a_buy'])
                        if status == "COMPLETE":
                            df_calc.at[index, 'id_a_sell'] = placeLimitOrder(
                                api_key_A, access_token_A, 'SELL',
                                df_calc.loc[index, 'zid'], df_calc.loc[index,
                                                                       'qty'],
                                round(1.003 * df_calc.loc[index, 'ltp'], 1))
                            df_calc.at[index, 'action'] = 'RunTrade'
                        elif status == "REJECTED":
                            df_calc.at[index, 'action'] = 'FatalError'
                else:
                    df_calc.at[index, 'action'] = 'RunTrade'

            elif df_calc.loc[index, 'action'] == 'ExitManual':
                pass

#verify if the closing zerodha order is completed
            elif df_calc.loc[index, 'action'] == 'RunTrade':

                if skip_kite == False:

                    slflag = 0

                    if direction_switch == True:
                        if df_calc.loc[index, 'ltp'] > df_calc.loc[index,
                                                                   'sl']:
                            #df_calc.at[index,'id_a_buy']=placeMarketOrder(api_key_A,access_token_A,'BUY',df_calc.loc[index,'zid'],df_calc.loc[index,'qty'])
                            #df_calc.at[index,'action']='TradeComplete'
                            df_calc.at[index, 'action'] = 'ExitManual'
                            slflag = 1
                    else:
                        if df_calc.loc[index, 'ltp'] < df_calc.loc[index,
                                                                   'sl']:
                            #df_calc.at[index,'id_a_sell']=placeMarketOrder(api_key_A,access_token_A,'SELL',df_calc.loc[index,'zid'],df_calc.loc[index,'qty'])
                            #df_calc.at[index,'action']='TradeComplete'
                            df_calc.at[index, 'action'] = 'ExitManual'
                            slflag = 1

                    if slflag == 0:

                        if direction_switch == True:
                            status = checkExecutionStatus(
                                api_key_A, access_token_A,
                                df_calc.loc[index, 'id_a_buy'])
                        else:
                            status = checkExecutionStatus(
                                api_key_A, access_token_A,
                                df_calc.loc[index, 'id_a_sell'])
                        if status == "COMPLETE":
                            df_calc.at[index, 'action'] = 'TradeComplete'
                        elif status == "REJECTED":
                            df_calc.at[index, 'action'] = 'FatalError'

                else:
                    df_calc.at[index, 'action'] = 'TradeComplete'


#trade completed on zerodha, start over
            elif df_calc.loc[index, 'action'] == 'TradeComplete':
                df_calc.at[index,
                           'skip'] = 0  #skip 1 to prevent repetition of stock
                flagStartOver = True

            elif df_calc.loc[index, 'action'] == 'FatalError':
                df_calc.at[index, 'skip'] = 1
                #df_calc.at[index,'action']='StockSelect'

            else:
                if df_calc.loc[index, 'ltp'] > 0.1:
                    df_calc.at[index, 'pBase'] = df_calc.loc[index, 'ltp']
                    df_calc.at[index, 'action'] = 'BaseSet'
                    counterStockTick += 1
                else:
                    if counterStockTick > 100:
                        df_calc.at[index, 'action'] = 'BaseSet'
                    else:
                        df_calc.at[index, 'action'] = 'Waiting'

        #if all action status is the same -- steps for global changes
        if df_calc.action.nunique() == 1:
            if 'StockSelect' in df_calc.values:

                #filter out stocks
                df_calc = df_calc[df_calc.skip == 0]
                df_calc = df_calc[df_calc.buy > 10]
                df_calc = df_calc[df_calc.sell > 10]
                df_calc = df_calc[df_calc.high > 10]
                df_calc = df_calc[df_calc.low > 10]
                df_calc = df_calc[df_calc.ltp < 1000]
                df_calc = df_calc[df_calc.x > 1]

                #calculate volatility and rank
                if direction_switch == True:
                    df_calc['v'] = df_calc['sell'] / df_calc['buy']
                    df_calc['v2'] = (df_calc['ltp'] - df_calc['low']) / (
                        df_calc['high'] - df_calc['low'])
                else:
                    df_calc['v'] = df_calc['buy'] / df_calc['sell']
                    df_calc['v2'] = (df_calc['high'] - df_calc['ltp']) / (
                        df_calc['high'] - df_calc['low'])

                df_calc = df_calc[df_calc.v2 > 0.5]

                df_calc['rank'] = df_calc['v'].rank(ascending=False)
                df_calc['rank2'] = df_calc['v2'].rank(ascending=False)

            elif 'BaseSet' in df_calc.values:
                #set values to prevent error
                df_calc.loc[df_calc['sell'] == 0, 'sell'] += 1
                df_calc.loc[df_calc['buy'] == 0, 'buy'] += 1

                #t = timer()
                if flagMargin == False:
                    if 12.5 in df_calc.values:
                        pass
                    else:
                        getMarginX(df_calc)
                        df_original = df_original.set_index('instrument_token')
                        df_original = df_calc
                        df_original = df_original.reset_index()
                        flagMargin = True
                df_calc['action'] = 'StockSelect'

        df_calc = df_calc.reset_index()

        df = df_calc

        if flagStartOver == True:
            counterStockTick = 0
            df = df_original
            flagStartOver = False

        os.system("cls")
        #pd.set_option('display.max_rows', len(df_calc))
        if direction_switch == True:
            print('Direction: Sell -> Buy')
        else:
            print('Direction: Buy -> Sell')
        print(f'Stock count: {df.shape[0]}')
        print(df.nsmallest(5, 'rank'))

        if qDB.qsize() == 1:
            qDB.get()
            #pDB = Process(target=jobUpdateDB, args=(df_calc,qDB,1))
            #pDB.start()
        #else:
        #logging.info('dataframe updated on RAM only, skipping DB write')

        #q.put(df_calc)

        time.sleep(.2)
コード例 #37
0
        q.put_nowait(obj)  如果队列已满, 抛异常
        q.get_nowait()  如果队列为空, 抛异常

        q.full()  队列是否已满
        q.empty()  队列是否为空
        q.qsize()  获取当前队列的大小(队列中元素的个数)
"""
from multiprocessing import Queue

# 创建队列对象
q = Queue(3)  # 创建一个长度为3的队列

print(q.full())  # False 判断当前队列满了吗
print(q.empty())  # True 判断当前队列是不是空的

print(q.qsize())  # 0

q.put("李小花")
q.put("王二狗")
q.put("刘三胖")

print(q.full())  # True
print(q.qsize())  # 3

# q.put("史狗剩")  # 阻塞式的, 阻塞程序的执行, 直到将"史狗剩"放到队列中为止
# q.put("刘铁蛋", timeout=3)  # 阻塞式的, 阻塞时间为timeout, 如果超出阻塞时间, 抛出异常raise Full, 打断程序的执行
# q.put("赵铁柱", block=False)  # 如果设置block为False, 非阻塞式, 代表如果队列已满, 直接抛出异常

# q.put_nowait("隔壁老王")  # raise Full
print("---------------")
コード例 #38
0
ファイル: main.py プロジェクト: ContentsViewer/STEM
def main(args):
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    config = importlib.import_module(args.config).Config

    app_timer = Stopwatch()
    app_timer.start()

    # --- Setup Serial ---
    print('Serial Start up...')
    sensor_output = Queue()
    serial_process = Process(target=sensor.reader,
                             daemon=True,
                             args=(config.port, config.baudrate,
                                   config.sampling_rate,
                                   config.sensor_data_labels, sensor_output))
    serial_process.start()

    if sensor_output.get() is sensor.FAIL:
        raise RuntimeError('Serial Process Fail.')

    # --- Setup GUI ---
    print('GUI is launching...')
    pygame.init()
    GUI.init()
    screen = pygame.display.set_mode((720, 483))
    pygame.display.set_caption("STEM")
    pygame.display.set_icon(
        GUI.make_text('|†|',
                      font=pygame.font.Font(
                          "fonts/Ubuntu Mono derivative Powerline Bold.ttf",
                          64),
                      color=GUI.color.black))
    # End Setup GUI ---

    # --- Setup Model ---
    print('Model Setup...')
    base_model = importlib.import_module(config.model_path).Model()
    inputs = tf.keras.Input(shape=(
        config.model_sensor_data_inputs,
        len(config.sensor_feature_labels),
    ),
                            batch_size=None)
    x = base_model(inputs)
    outputs = Duplicate()(x)
    model = Model(inputs, outputs)
    model.compile(loss=facenet.triplet_loss(), optimizer='adam')

    base_model.summary()
    model.summary()
    if os.path.exists(config.checkpoint):
        print('last checkpoint found. Loading the weights...')
        model.load_weights(config.checkpoint)

    # model.save_weights(args.checkpoint)

    # End Setup Model ---

    is_running = True
    profile = {
        'keyboard_input': 0,
        'sensor_read': 0,
        'gui_update': 0,
        'screen_update': 0
    }
    input_queue = deque(maxlen=config.model_sensor_data_inputs)

    # 0 -> 'label A', 1 -> 'label B', ...
    id2label = [None for _ in range(len(config.possible_states))]
    label2id = {}

    precedents_dict = [
        deque(maxlen=config.precedents_maxlen)
        for _ in range(len(config.possible_states) + 1)
    ]

    estimator = facenet.Estimator(
        model=model,
        precedents_dict=precedents_dict,
    )

    trainor = facenet.Trainor(precedents_dict=precedents_dict)

    prev_saving_precedent_time = app_timer.elapsed
    fixed_loop = FixedLoop(1 / config.frame_rate)
    fixed_loop.reset()
    fixed_loop.sync()
    estimated = None
    while is_running:
        screen.fill(GUI.color.screen_backgorund)
        submitted_estimator = False
        submitted_trainor = False
        supervised_state_label = None
        app_timer.lap()

        # --- Keyboard Input ---
        pressed_keys = pygame.key.get_pressed()
        for label, settings in config.possible_states.items():
            if pressed_keys[settings['key']]:
                supervised_state_label = label

        for event in pygame.event.get():
            if event.type == QUIT:
                running = False
            if event.type == KEYDOWN:
                if event.key == K_ESCAPE:
                    is_running = False

        profile['keyboard_input'] = app_timer.lap()
        # End Keyboard Input ---

        # --- Control Module Behaviors ---
        # print(sensor.read_latest_data(ser))
        # sensor_data = sensor.read_latest_data(ser, config.sensor_data_labels)
        # sensor_data = dotdict(
        #     {'timestamp': time.time(), 'pulse_width': 100, 'flow_amount': 200})

        received_size = sensor_output.qsize()
        for _ in range(received_size):
            sensor_data = sensor_output.get()
            if sensor_data is sensor.DROP:
                input_queue.clear()
            elif sensor_data is sensor.FAIL:
                raise RuntimeError('Serial Process Fail.')
            else:
                features = [
                    sensor_data[label]
                    for label in config.sensor_feature_labels
                ]
                input_queue.append(features)

        profile['sensor_read'] = app_timer.lap()

        if len(input_queue) >= input_queue.maxlen:
            input_list = np.array([input_queue])
            if not estimator.is_running:
                submitted_estimator = True
                estimator.run(inputs=input_list,
                              supervised_state_label=supervised_state_label)

        count = 0
        for state_id in range(len(precedents_dict) - 1):
            count += len(precedents_dict[state_id])
        if count > len(config.possible_states):
            precedents_dict[-1].clear()

        if not estimator.results.empty():
            estimated = estimator.results.get()

            if (estimated.supervised_state_label is not None) and (
                    estimated.estimated_state < len(config.possible_states)):
                if id2label[estimated.estimated_state] is None:
                    id2label[
                        estimated.
                        estimated_state] = estimated.supervised_state_label
                    label2id[
                        estimated.
                        supervised_state_label] = estimated.estimated_state

                if label2id.get(estimated.supervised_state_label) is None:
                    aligned_id = 0
                    for i, label in enumerate(id2label):
                        if label is None:
                            aligned_id = i
                            break

                    id2label[aligned_id] = estimated.supervised_state_label
                    label2id[estimated.supervised_state_label] = aligned_id

                estimated.supervised_state = label2id[
                    estimated.supervised_state_label]
                # print(estimated.supervised_state)
            if (
                    estimated.supervised_state_label is not None
            ) or app_timer.elapsed > prev_saving_precedent_time + config.precedent_interval:
                if not trainor.is_running:
                    submitted_trainor = True
                    trainor.run(model=model, anchor=estimated)

                major_state = facenet.get_major_state(estimated)

                precedents_dict[major_state].append(estimated)
                prev_saving_precedent_time = app_timer.elapsed

        if not trainor.results.empty():
            pass

        # print(len(input_queue))
        profile['submodule_control'] = app_timer.lap()
        # End Control Module Behaviors ---

        # --- Update GUI Elements ---
        if supervised_state_label is None:
            screen.blit(GUI.make_text('Self Learning...', GUI.font.large),
                        (400, 0))
        else:
            screen.blit(
                GUI.make_text(
                    'Supervised... {0}'.format(supervised_state_label),
                    GUI.font.large), (400, 0))

        screen.blit(GUI.make_text('State: ', GUI.font.large), (0, 0))
        if estimated is not None and estimated.estimated_state < len(
                config.possible_states):
            current_state = facenet.get_major_state(estimated)

            screen.blit(
                GUI.make_text(
                    '{0} {1}'.format(
                        current_state, '?' if id2label[current_state] is None
                        else id2label[current_state]), GUI.font.large),
                (80, 0))
        else:
            screen.blit(GUI.make_text('?', GUI.font.large), (80, 0))

        GUI.begin_multilines((400, 30))
        GUI.draw_multiline_text(screen, "Sensor:")
        if sensor_data is sensor.DROP:
            GUI.draw_multiline_text(screen, '  DROP!')
        else:
            GUI.draw_multiline_text(
                screen, "\n".join([
                    "  {0}: {1}".format(label, sensor_data[label])
                    for label in config.sensor_data_labels
                ]))

        GUI.draw_multiline_text(screen, ("App:\n"
                                         "  Time            : {0:.3f}\n"
                                         "  input_queue.size: {1}\n").format(
                                             app_timer.elapsed,
                                             len(input_queue),
                                         ))
        GUI.draw_multiline_text(screen, ("  precedents.size :" + (", ".join(
            ["{0}".format(len(precedents))
             for precedents in precedents_dict]))))
        GUI.draw_multiline_text(screen, "Estimator:")
        GUI.draw_multiline_text(
            screen,
            '  o',
            color=GUI.color.green if submitted_estimator else GUI.color.red)
        GUI.draw_multiline_text(screen, "Trainor:")
        GUI.draw_multiline_text(
            screen,
            '  o',
            color=GUI.color.green if submitted_trainor else GUI.color.red)
        GUI.draw_multiline_text(
            screen, ("Profile:\n"
                     "  Keyboard Input   : {1:.4f}\n"
                     "  Sensor Read      : {2:.4f}\n"
                     "  Submodule Control: {3:.4f}\n"
                     "  GUI Update       : {4:.4f}\n"
                     "  Screen Update    : {5:.4f}\n").format(
                         estimator.is_running, profile['keyboard_input'],
                         profile['sensor_read'], profile['submodule_control'],
                         profile['gui_update'], profile['screen_update']))
        if fixed_loop.last_delay_time >= 0:
            screen.blit(
                GUI.make_text('Frame: Sync ({0:.3f})'.format(
                    fixed_loop.last_delay_time),
                              color=GUI.color.green), (0, 463))
        else:
            screen.blit(
                GUI.make_text('Frame: Busy ({0:.3f})'.format(
                    fixed_loop.last_delay_time),
                              color=GUI.color.red), (0, 463))

        if sensor_data is sensor.DROP:
            screen.blit(GUI.make_text('Sensor: Busy', color=GUI.color.red),
                        (240, 463))
        else:
            screen.blit(GUI.make_text('Sensor: Sync', color=GUI.color.green),
                        (240, 463))

        pygame.draw.rect(screen, (0x11, 0x11, 0x11),
                         pygame.Rect(10, 30, 380, 380))
        meta, plots = facenet.make_visualized_graph_plots(
            precedents_dict, estimated)
        if meta is not None:
            scale = meta.max - meta.min
            a = 190 / scale.max()
            root = np.array([10 + 190, 30 + 190])
            for plot in plots:
                position = root + plot.position * a
                position = position.astype(np.int64)

                if plot.supervised_state is not None:
                    pygame.draw.circle(screen,
                                       config.id2color[plot.supervised_state],
                                       position, 6)

                if plot.estimated_state is not None:
                    pygame.draw.circle(screen,
                                       config.id2color[plot.estimated_state],
                                       position, 4)

        profile['gui_update'] = app_timer.lap()
        # End Update GUI Elements ---

        pygame.display.update()
        profile['screen_update'] = app_timer.lap()
        if not fixed_loop.sync():
            fixed_loop.reset()

    pygame.quit()
    exit()
コード例 #39
0
def run(cam_source, yolo_engine, tf_weight_pickle, dnnweaver2_weight_pickle, in_videofile, out_videofile):

    # Synchronous queues
    frame_q = Queue(maxsize=1)
    bbox_q = Queue(maxsize=1)
    kill_q = Queue(maxsize=1)
    key_q = Queue(maxsize=1)
    num_processes = 2
    done_q = Queue(maxsize=num_processes) 

    # Multiprocessing locks
    frame_l = Lock()
    bbox_l = Lock()
    key_l = Lock()

    # Drone management process
    if cam_source == "drone": 
        droneProcess = Process(target=drone_control, args=(frame_q, frame_l, bbox_q, bbox_l, key_q, key_l, kill_q, done_q, ))
        droneProcess.start()
    elif cam_source == "webcam": 
        webcamProcess = Process(target=webcam_control, args=(frame_q, frame_l, bbox_q, bbox_l, kill_q, done_q, )) 
        webcamProcess.start()
    elif cam_source == "videofile":
        videofileProcess = Process(target=videofile_control, args=(frame_q, frame_l, bbox_q, bbox_l, kill_q, done_q, in_videofile, out_videofile, )) 
        videofileProcess.start()

    # Object detection process using YOLO algorithm
    if yolo_engine == "tf-cpu":
        proc = "cpu"
    elif yolo_engine == "tf-gpu":
        proc = "gpu"
    elif yolo_engine == "dnnweaver2":
        proc = "gpu"
    detectionProcess = Process(target=detection, args=(yolo_engine, tf_weight_pickle, dnnweaver2_weight_pickle, frame_q, frame_l, bbox_q, bbox_l, kill_q, done_q, proc, ))
#    detectionProcess = Process(target=detection, args=(yolo_engine, tf_weight_pickle, dnnweaver2_weight_pickle, frame_q, frame_l, bbox_q, bbox_l, kill_q, done_q, proc, True, ))
    detectionProcess.start()

    # Keyboard input handler
    inkey = GetKey()
    thread_print ("Keyboard Input Handler Starts")
    while True: 
        try:
            key = inkey()
            key = ord(key)
            with key_l:
                if key_q.empty():
                    key_q.put(key)
            if key == 101: # key = 'e' 
                break
        except KeyboardInterrupt:
            break
    thread_print ("Keyboard Input Handler Ends")

    # Notifying all processes/threads to die
    kill_q.put(True)
    print ("Sent KILL Signal")

    # Wait the processes to end 
    while done_q.qsize() != num_processes:
        sleep(0.5)

    # Flush all entires in queueus
    drain_queue([frame_q, bbox_q, kill_q, key_q])
    if cam_source == "drone":
        droneProcess.join()
    elif cam_source == "webcam":
        webcamProcess.join()
    detectionProcess.join()
コード例 #40
0
    if args.username and args.password:
        es = Elasticsearch(url.netloc,
                           request_timeout=5,
                           timeout=args.timeout,
                           http_auth=(args.username, args.password))
        if url.scheme == 'https':
            es = Elasticsearch(url.netloc,
                               use_ssl=True,
                               verify_certs=False,
                               request_timeout=5,
                               timeout=args.timeout,
                               http_auth=(args.username, args.password))
    else:
        es = Elasticsearch(url.netloc, request_timeout=5, timeout=args.timeout)
        if url.scheme == 'https':
            es = Elasticsearch(url.netloc,
                               use_ssl=True,
                               verify_certs=False,
                               request_timeout=5,
                               timeout=args.timeout)
    outq = Queue(maxsize=50000)
    alldone = Event()
    dumpproc = Process(target=dump, args=(es, outq, alldone))
    dumpproc.daemon = True
    dumpproc.start()
    while not alldone.is_set() or outq.qsize() > 0:
        try:
            print json.dumps(outq.get(block=False))
        except:
            time.sleep(0.1)
コード例 #41
0
ファイル: extract_features.py プロジェクト: w5688414/AMNN
# vgg16_image_name_to_features
evaluator = Evaluator(
    model,
    data_path,
    image_path,
    image_name_to_features_filename=object_image_features_filename)
image_names = evaluator.full_data['image_names'].tolist()
tweet_list = evaluator.full_data['tweets'].tolist()
tweet_dict = {}
for image_arg, image_name in tqdm(enumerate(image_names)):
    tweet = str(tweet_list[image_arg])
    tweet_dict[image_name] = tweet

if __name__ == "__main__":
    ts = time()
    seconds = 0
    queue = Queue()
    for img_file in image_names:
        queue.put(img_file)

    print(queue.qsize())
    procs = [
        Process(target=run_main, args=[i, queue]) for i in range(num_processes)
    ]
    for p in procs:
        p.start()
    for p in procs:
        p.join()

    print('Took {}s'.format(time() - ts))
コード例 #42
0
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value,
                 out_counter_value, drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        self.process = Process(name=self.name, target=self.run)
        self.process.start()

    def init(self):
        global use_setproctitle
        if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        rospy.init_node(WORKER_NODE_NAME %
                        (self.nodename_prefix, self.id, self.collname),
                        anonymous=False)

        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(
                    self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class,
                                                   self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty():
                sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()

    def sanitize_value(self, v):
        if isinstance(v, rospy.Message):
            return self.message_to_dict(v)
        elif isinstance(v, Time):
            t = datetime.fromtimestamp(v.secs)
            return t + timedelta(microseconds=v.nsecs / 1000.)
        elif isinstance(v, Duration):
            return v.secs + v.nsecs / 1000000000.
        elif isinstance(v, list):
            return [self.sanitize_value(t) for t in v]
        else:
            return v

    def message_to_dict(self, val):
        d = {}
        for f in val.__slots__:
            d[f] = self.sanitize_value(getattr(val, f))
        return d

    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            self.queue.put((topic, data, current_time or datetime.now()))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
            t = None
            try:
                t = self.queue.get(True)
            except IOError:
                # Anticipate Ctrl-C
                #print("Quit W1: %s" % self.name)
                self.quit.value = 1
                break
            if isinstance(t, tuple):
                self.out_counter.increment()
                self.worker_out_counter.increment()
                topic = t[0]
                msg = t[1]
                ctime = t[2]

                if isinstance(msg, rospy.Message):
                    doc = self.message_to_dict(msg)
                    doc["__recorded"] = ctime or datetime.now()
                    doc["__topic"] = topic
                    try:
                        #print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
                        #pprint.pprint(doc)
                        self.collection.insert(doc)
                    except InvalidDocument, e:
                        print("InvalidDocument " + current_process().name +
                              "@" + topic + ": \n")
                        print e
                    except InvalidStringData, e:
                        print("InvalidStringData " + current_process().name +
                              "@" + topic + ": \n")
                        print e

            else:
コード例 #43
0
from multiprocessing import Queue

#创建队列
q = Queue(3)

q.put(1)
print(q.full())
q.put(2)
q.put(3)
print(q.full())

#设置超时时间为3秒
q.put(4, True, 3)

print(q.get())
print('队列中还有%d条消息' % q.qsize())
print(q.empty())
q.close()
コード例 #44
0
ファイル: reactor.py プロジェクト: JK19/PythonGA
class Reactor(object):
    """
    Reactor and container of genetic algorithm
    """
    def __init__(self, size, chromolen, alphabet, fitnessfunc, pmut, sortfunc=None, engine="process"):
        """
        Params:
            size (int): Number of individuals in reactor population
            chromolen (int): Number of components in a individual (chromosome)
            alphabet (string[]): List with the string representation of each possible gene
            fitnessfunc (func): Function receiving a chromosome and returning a int
            pmut (float): Probability of mutating an individual in each population
            sortfunc (func): Overrides the default sorting function based on fitness
            engine (string): Type of paralellism used ("process" or "thread")
        """
        self.size = size
        self.chromolen = chromolen
        self.alphabet = alphabet
        self.fitness = fitnessfunc
        self.pmut = pmut
        if sortfunc is None:
            self.sortfunc = self.fitness
        else:
            self.sortfunc = sortfunc
        self.engine = engine
        self.cores = 1
        self.running_cores = []
        self.result_queue = Queue()
        self.population = randpopulation(self.size, self.chromolen, self.alphabet)

    def addCores(self, cores):
        if self.cores + cores >= 1:
            self.cores += cores
        return self

    def run(self, steps, ongeneration=None):
        #TODO: add a sync=True parameter to run a task in local thread or all task in paralell

        # create cores
        if self.engine.lower() == "process":
            from multiprocessing import Process as Task
        elif self.engine.lower() == "thread":
            from threading import Thread as Task
        else:
            raise ValueError("Unknown engine {}".format(self.engine))

        for i in range(self.cores):
            # tasks.append(Process(target=self._loop, args=(i, steps, ongeneration, results)))
            self.running_cores.append(Task(target=self._loop, args=(i, steps, ongeneration, self.result_queue)))

        # launch
        for task in self.running_cores:
            task.start()

        # create local process
        # tasks.append(Process(target=self._loop, args=(0, steps, ongeneration, results)))
        # self._loop(0, steps, ongeneration, results)

        # join launched processes
        # for task in tasks:
        #     task.join()

        # return [results.get() for _ in range(results.qsize())]
        return self

    def get_results(self):
        for task in range(len(self.running_cores)):
            self.running_cores.pop().join()
        return [self.result_queue.get() for _ in range(self.result_queue.qsize())]

    def _loop(self, core, steps, ongeneration=None, queue=None):

        pop = self.population[:]  # local copy
        top = None

        for step in range(steps):

            pop, best = cycle(
                population=pop,
                alphabet=self.alphabet,
                pmut=self.pmut,
                sortfunc=self.sortfunc
            )

            if ongeneration is not None:
                ongeneration(step, core, best)

            if top is None:
                top = best

            if self.fitness(best) > self.fitness(top):
                top = best

        if queue is not None:
            queue.put(top)
            return
        else:
            return top
コード例 #45
0
                        print(accession, datetime.now(), q)
                    if len(variant_coordinaten) != 0:
                        #Erstellen des Graphen, sowie das dranhängen der Knoten und Kanten
                        process_these_peptides = get_the_peptide_to_be_parsed(
                            zwischensumme_list, variants_on_peptidelevel,
                            aa_seq, SQL_protein_ID, SQL_organism_ID,
                            variant_coordinaten, variant_char, variants,
                            accession)

                        i = 0
                        while i < len(process_these_peptides):
                            worker_queue.put(process_these_peptides[i])
                            i = i + 1

                        print(accession, datetime.now(), q, "len var",
                              len(variants), worker_queue.qsize(),
                              "are in the queue")

                        #process_these_peptides = process_these_peptides[19:23]
            protein_acc = line.rstrip('\n')
            protein_seq = ""
            q = q + 1
            if q % 2 == 0:
                conn.commit()

        else:
            protein_seq += line.rstrip('\n')

procs = []

#letztes protein
コード例 #46
0
def reade(q: Queue):
    for i in range(q.qsize()):
        print(f'读取{q.get()}')
        time.sleep(1)
コード例 #47
0
        threads_count = 1 + (new_pages_count - done) / 2

        if threads_count > max_threads_count:
            threads_count = max_threads_count

        workers = [
            Process(target=collect_childs, args=(queue, results_queue))
            for i in xrange(threads_count)
        ]
        for w in workers:
            w.daemon = True
        [w.start() for w in workers]

        for w in workers:
            w.join(timeout=0.1)
            for _ in xrange(results_queue.qsize() - done):
                bar.next()
                done += 1

    recursion += 1
    if recursion > max_recursion:
        break

    while not results_queue.empty():
        childs += results_queue.get()

bar.finish()

with open("all_tested_links", 'a+') as f:
    for page in CACHE:
        f.write(page + "\n")
コード例 #48
0
queue.put([4, 5])
queue.put({'a': 1})

# 不能一次性添加多个数据
# queue.put(1, 'a', (1, 2), [4, 5], {'a': 1})

# 当队列消息已放满,再put()数据会等待
# queue.put(2323)
# 使用put_nowait()不等待,直接报错
# queue.put_nowait(2323)

# 查看队列是否满了
print('满了没?', queue.full())

# 查看队列消息个数
print('个数:', queue.qsize())

# 获取队列消息
value = queue.get()
value = queue.get()
print(value)

# 当队列消息get完后再次使用get会等待
value = queue.get()
value = queue.get()
value = queue.get()

# 再次执行一下代码会等待,只有队列put值后才会取
# 使用get_nowait()不等待,直接报错,不建议使用
# value = queue.get()
# value = queue.get_nowait()
コード例 #49
0
    subs.setDaemon(True)

    pubs = FacePublisher("tcp://127.0.0.1:812354", face_msg_queue)
    pubs.setDaemon(True)

    yololock = Lock()
    facelock = Lock()
    face_proc = []
    for i in range(procnum):
        face_proc.append(
            FaceRecognition(load_mode_finish_q, yolo_msg_queue, yololock,
                            face_msg_queue, facelock))
    for i in range(procnum):
        face_proc[i].start()

    while load_mode_finish_q.qsize() < procnum:
        #print(load_mode_finish_q.get())
        time.sleep(1)

    pubs.start()
    subs.start()

    for i in range(len(face_proc)):
        load_mode_finish_q.get()

    config = edict()
    config.mode = 16
    config.data_dir = 1e-4
    config.classifier = 'KNN'
    config.use_split_dataset = 0
    config.test_data_dir = ''
コード例 #50
0
from multiprocessing import Queue,Process,Pool
from multiprocessing import Manager
import time
import os
import random
'''
Queue.qsize():返回当前队列包含的消息数量;
Queue.empty():如果队列为空,返回True,反之False ;
Queue.full():如果队列满了,返回True,反之False;
Queue.get([block[, timeout]]):获取队列中的⼀条消息,然后将其从列队
    中移除,block默认值为True;
    1)如果block使⽤默认值,且没有设置timeout(单位秒),消息列队如果为
    空,此时程序将被阻塞(停在读取状态),直到从消息列队读到消息为⽌,
    如果设置了timeout,则会等待timeout秒,若还没读取到任何消息,则抛
    出"Queue.Empty"异常;
    2)如果block值为False,消息列队如果为空,则会⽴刻抛
    出"Queue.Empty"异常;
Queue.get_nowait():相当Queue.get(False);
Queue.put(item,[block[, timeout]]):将item消息写⼊队列,block默认值
    为True;
    1)如果block使⽤默认值,且没有设置timeout(单位秒),消息列队如果已
    经没有空间可写⼊,此时程序将被阻塞(停在写⼊状态),直到从消息列队
    腾出空间为⽌,如果设置了timeout,则会等待timeout秒,若还没空间,则抛
    出"Queue.Full"异常;
    2)如果block值为False,消息列队如果没有空间可写⼊,则会⽴刻抛
    出"Queue.Full"异常;
Queue.put_nowait(item):相当Queue.put(item, False);
'''

#Process进程间的通信 使用Queue()就行
コード例 #51
0
ファイル: nano_cet.py プロジェクト: JulianMuenzberg/pynta
class NanoCET(BaseExperiment):
    """ Experiment class for performing a nanoCET measurement."""
    BACKGROUND_NO_CORRECTION = 0  # No backround correction
    BACKGROUND_SINGLE_SNAP = 1

    def __init__(self, filename=None):
        super().__init__()  # Initialize base class
        self.logger = get_logger(name=__name__)

        self.worker_saver_queue = Queue()

        self.load_configuration(filename)

        self.dropped_frames = 0
        self.keep_acquiring = True
        self.acquiring = False  # Status of the acquisition
        self.camera = None  # This will hold the model for the camera
        self.current_height = None
        self.current_width = None
        self.max_width = None
        self.max_height = None
        self.background = None
        self.temp_image = None  # Temporary image, used to quickly have access to 'some' data and display it to the user
        self.movie_buffer = None  # Holds few frames of the movie in order to be able to do some analysis, save later, etc.
        self.last_index = 0  # Last index used for storing to the movie buffer
        self.stream_saving_running = False
        self.async_threads = []  # List holding all the threads spawn
        self.stream_saving_process = None
        self.do_background_correction = False
        self.background_method = self.BACKGROUND_SINGLE_SNAP
        self._stop_evet = Event()
        self.waterfall_index = 0

        self.locations_queue = Queue()
        # self.connect(self.print_me, 'free_run')

    def initialize_camera(self):
        """ Initializes the camera to be used to acquire data. The information on the camera should be provided in the
        configuration file and loaded with :meth:`~self.load_configuration`. It will load the camera assuming
        it is located in pynta/model/cameras/[model].

        .. todo:: Define how to load models from outside of pynta. E.g. from a user-specified folder.
        """
        try:
            self.logger.info('Importing camera model {}'.format(
                self.config['camera']['model']))
            self.logger.debug('pynta.model.cameras.' +
                              self.config['camera']['model'])

            camera_model_to_import = 'pynta.model.cameras.' + self.config[
                'camera']['model']
            cam_module = importlib.import_module(camera_model_to_import)
        except ModuleNotFoundError:
            self.logger.error(
                'The model {} for the camera was not found'.format(
                    self.config['camera']['model']))
            raise
        except:
            self.logger.exception('Unhandled exception')
            raise

        cam_init_arguments = self.config['camera']['init']

        if 'extra_args' in self.config['camera']:
            self.logger.info('Initializing camera with extra arguments')
            self.logger.debug('cam_module.camera({}, {})'.format(
                cam_init_arguments, self.config['camera']['extra_args']))
            self.camera = cam_module.camera(
                cam_init_arguments, *self.config['Camera']['extra_args'])
        else:
            self.logger.info('Initializing camera without extra arguments')
            self.logger.debug(
                'cam_module.camera({})'.format(cam_init_arguments))
            self.camera = cam_module.camera(cam_init_arguments)
            self.current_width, self.current_height = self.camera.getSize()
            self.logger.info('Camera sensor ROI: {}px X {}px'.format(
                self.current_width, self.current_height))
            self.max_width = self.camera.GetCCDWidth()
            self.max_height = self.camera.GetCCDHeight()
            self.logger.info('Camera sensor size: {}px X {}px'.format(
                self.max_width, self.max_height))

        self.camera.initializeCamera()

    @check_camera
    @check_not_acquiring
    def snap_background(self):
        """ Snaps an image that will be stored as background.
        """
        self.logger.info('Acquiring background image')
        self.camera.configure(self.config['camera'])
        self.camera.setAcquisitionMode(self.camera.MODE_SINGLE_SHOT)
        self.camera.triggerCamera()
        self.background = self.camera.readCamera()[-1]
        self.logger.debug('Got an image of {} pixels'.format(
            self.backgound.shape))

    @check_camera
    @check_not_acquiring
    def set_roi(self, x, y, width, height):
        """ Sets the region of interest of the camera, provided that the camera supports cropping. All the technicalities
        should be addressed on the camera model, not in this method.

        :param int x: horizontal position for the start of the cropping
        :param int y: vertical position for the start of the cropping
        :param int width: width in pixels for cropping
        :param int height: height in pixels for the cropping
        :raises ValueError: if either dimension of the cropping goes out of the camera total amount of pixels
        :returns: The final cropping dimensions, it may be that the camera limits the user desires
        """
        X = [x, x + width - 1]
        Y = [y, y + height - 1]
        self.logger.debug('Setting new camera ROI to x={},y={}'.format(X, Y))
        Nx, Ny = self.camera.setROI(X, Y)
        self.current_width, self.current_height = self.camera.getSize()
        self.logger.debug('New camera width: {}px, height: {}px'.format(
            self.current_width, self.current_height))
        self.tempimage = np.zeros((Nx, Ny))

    @check_camera
    @check_not_acquiring
    def clear_roi(self):
        """ Clears the region of interest and returns to the full frame of the camera.
        """
        self.logger.info('Clearing ROI settings')
        self.camera.setROI(1, 1, self.max_width, self.max_height)

    @check_camera
    @check_not_acquiring
    @make_async_thread
    def snap(self):
        """ Snap a single frame. It is not an asynchronous method. To make it async, it should be placed within
        a different thread.
        """
        self.logger.info('Snapping a picture')
        self.camera.configure(self.config['camera'])
        self.camera.setAcquisitionMode(self.camera.MODE_SINGLE_SHOT)
        self.camera.triggerCamera()
        self.check_background()
        data = self.camera.readCamera()[-1]
        self.queue.put({'topic': 'snap', 'data': data})
        self.temp_image = data[-1]
        self.logger.debug('Got an image of {} pixels'.format(
            self.temp_image.shape))

    @make_async_thread
    @check_not_acquiring
    @check_camera
    def start_free_run(self):
        """ Starts continuous acquisition from the camera, but it is not being saved. This method is the workhorse
        of the program. While this method runs in its thread, it will broadcast the images to be consumed by other
        methods. In this way it is possible to continuously save to hard drive, track particles, etc.
        """

        self.logger.info('Starting a free run acquisition')
        first = True
        self.keep_acquiring = True  # Change this attribute to stop the acquisition
        self.camera.configure(self.config['camera'])

        while self.keep_acquiring:
            if first:
                self.logger.debug('First frame of a free_run')
                self.camera.setAcquisitionMode(self.camera.MODE_CONTINUOUS)
                self.camera.triggerCamera()  # Triggers the camera only once
                first = False

            data = self.camera.readCamera()
            self.logger.debug('Got {} new frames'.format(len(data)))
            for img in data:
                if self.do_background_correction and self.background_method == self.BACKGROUND_SINGLE_SNAP:
                    img -= self.background

                # This will broadcast the data just acquired with the current timestamp
                # The timestamp is very unreliable, especially if the camera has a frame grabber.
                self.queue.put({
                    'topic': 'free_run',
                    'data': [time.time(), img]
                })
            if self._stop_evet.is_set():
                break
            self.temp_image = data[-1]

        self.camera.stopAcq()

    def stop_free_run(self):
        self._stop_evet.set()

    def save_image(self):
        """ Saves the last acquired image. The file to which it is going to be saved is defined in the config.
        """
        if self.temp_image:
            self.logger.info('Saving last acquired image')
            # Data will be appended to existing file
            file_name = self.config['saving']['filename_photo'] + '.hdf5'
            file_dir = self.config['saving']['directory']
            if not os.path.exists(file_dir):
                os.makedirs(file_dir)
                self.logger.debug('Created directory {}'.format(file_dir))

            with h5py.File(os.path.join(file_dir, file_name), "a") as f:
                now = str(datetime.now())
                g = f.create_group(now)
                g.create_dataset('image', data=self.temp_image)
                g.create_dataset('metadata', data=json.dumps(self.config))
                f.flush()
            self.logger.debug('Saved image to {}'.format(
                os.path.join(file_dir, file_name)))
        else:
            self.logger.warning(
                'Tried to save an image, but no image was acquired yet.')

    def add_to_stream_queue(self, data):
        """ This method is a buffer between the publisher and the ``save_stream`` method. The idea is that in order
        to be quick (saving to disk may be slow), whatever the publisher sends will be added to a Queue. Another
        process will read from the queue and save it to disk on a separate process.
        """

        img = data[1]
        self.worker_saver_queue.put(img)

    def save_stream(self):
        """ Saves the queue to a file continuously. This is an async function, that can be triggered before starting
        the stream. It relies on the multiprocess library. It uses a queue in order to get the data to be saved.
        In normal operation, it should be used together with ``add_to_stream_queue``.
        """
        if self.save_stream_running:
            self.logger.warning('Tried to start a new instance of save stream')
            raise StreamSavingRunning(
                'You tried to start a new process for stream saving')

        self.logger.info('Starting to save the stream')
        file_name = self.config['saving']['filename_video'] + '.hdf5'
        file_dir = self.config['saving']['directory']
        if not os.path.exists(file_dir):
            os.makedirs(file_dir)
            self.logger.debug('Created directory {}'.format(file_dir))
        file_path = os.path.join(file_dir, file_name)
        self.stream_saving_process = Process(target=worker_saver,
                                             args=(file_path,
                                                   json.dumps(self.config),
                                                   self.worker_saver_queue))
        self.stream_saving_process.start()
        self.logger.debug('Started the stream saving process')

    def stop_save_stream(self):
        """ Stops saving the stream.
        """
        self.logger.info('Stopping the saving stream process')
        if not self.save_stream_running:
            self.logger.warning(
                'The saving stream is not running. Nothing will be done.')
            return
        self.worker_saver_queue.put('Exit')

    @property
    def save_stream_running(self):
        if self.stream_saving_process is not None:
            return self.stream_saving_process.is_alive()
        return False

    def empty_queue(self):
        """ Empties the queue where the data from the movie is being stored.
        """
        self.logger.info('Clearing the saver queue')
        while not self.worker_saver_queue.empty(
        ) or self.worker_saver_queue.qsize() > 0:
            self.worker_saver_queue.get()
        self.logger.debug('Queue cleared')

    def calculate_waterfall(self, image):
        """ A waterfall is the product of summing together all the vertical values of an image and displaying them
        as lines on a 2D image. It is how spectrometers normally work. A waterfall can be produced either by binning the
        image in the vertical direction directly at the camera, or by doing it in software.
        The first has the advantage of speeding up the readout process. The latter has the advantage of working with any
        camera.
        This method will work either with 1D arrays or with 2D arrays and will generate a stack of lines.
        """

        if self.waterfall_index == self.config['waterfall'][
                'length_waterfall']:
            self.waterfall_data = np.zeros(
                (self.config['waterfall']['length_waterfall'],
                 self.camera.width))
            self.waterfall_index = 0

        center_pixel = np.int(self.camera.height /
                              2)  # Calculates the center of the image
        vbinhalf = np.int(self.config['waterfall']['vertical_bin'])
        if vbinhalf >= self.current_height / 2 - 1:
            wf = np.array([np.sum(image, 1)])
        else:
            wf = np.array([
                np.sum(
                    image[:, center_pixel - vbinhalf:center_pixel + vbinhalf],
                    1)
            ])
        self.waterfall_data[self.waterfall_index, :] = wf
        self.waterfall_index += 1
        self.queue.put({'topic': 'waterfall_data', 'data': wf})

    def check_background(self):
        """ Checks whether the background is set.
        """

        if self.do_background_correction:
            self.logger.info('Setting up the background corretion')
            if self.background_method == self.BACKGROUND_SINGLE_SNAP:
                self.logger.debug('Bacground single snap')
                if self.background is None or self.background.shape != [
                        self.current_width, self.current_height
                ]:
                    self.logger.warning(
                        'Background not set. Defaulting to no background...')
                    self.background = None
                    self.do_background_correction = False

    def calculate_positions_image(self, image):
        """ Calculates the positions of the particles on an image. It used the trackpy package, which may not be
        installed by default.
        """
        locations = np.random.rand((10, 2))
        # if trackpy:
        #     locations = tp.locate(image, self.config['trackpy']['size'])
        #     self.queue.put({'topic': 'trackpy', 'data': locations})
        # else:
        #     self.logger.warn('Trackpy is not detected and is the only tracking algorithm available')
        #     raise TrackpyNotInstalled('Trackpy is not installed on this computer')

    def put_position_to_queue(self, locations):
        """ Accumulates the locations into a Queue in order to use them in a generator.
        """
        self.locations_queue.put(locations)

    def link_trajecktories(self, locations):
        print(locations)

    def __exit__(self, *args):
        super(NanoCET, self).__exit__(*args)
        if not self.worker_saver_queue.empty():
            self.logger.info('Emptying the saver queue')
            self.logger.debug('There are {} elements in the queue'.format(
                self.worker_saver_queue.qsize()))
            while not self.worker_saver_queue.empty():
                self.worker_saver_queue.get()
コード例 #52
0
ファイル: safaribooks.py プロジェクト: nzcode/safaribooks
class SafariBooks:

    HEADERS = {
        "accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "accept-encoding":
        "gzip, deflate, br",
        "accept-language":
        "it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7",
        "cache-control":
        "no-cache",
        "cookie":
        "",
        "pragma":
        "no-cache",
        "referer":
        "https://www.safaribooksonline.com/home/",
        "upgrade-insecure-requests":
        "1",
        "user-agent":
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
        "Chrome/62.0.3202.94 Safari/537.36"
    }

    BASE_URL = "https://www.safaribooksonline.com"
    LOGIN_URL = BASE_URL + "/accounts/login/"
    API_TEMPLATE = BASE_URL + "/api/v1/book/{0}/"

    BASE_01_HTML = "<!DOCTYPE html>\n" \
                   "<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
                   " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
                   " xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
                   " http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
                   " xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
                   "<head>\n" \
                   "{0}\n" \
                   "<style type=\"text/css\">" \
                   "body{{background-color:#fbfbfb!important;margin:1em;}}" \
                   "#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"

    KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
                  "word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
                  "{{overflow-x:unset!important;overflow:unset!important;" \
                  "overflow-y:unset!important;white-space:pre-wrap!important;}}"

    BASE_02_HTML = "</style>" \
                   "</head>\n" \
                   "<body>{1}</body>\n</html>"

    CONTAINER_XML = "<?xml version=\"1.0\"?>" \
                    "<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
                    "<rootfiles>" \
                    "<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
                    "</rootfiles>" \
                    "</container>"

    # Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
    CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" \
                  "<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
                  "<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
                  " xmlns:opf=\"http://www.idpf.org/2007/opf\">\n"\
                  "<dc:title>{1}</dc:title>\n" \
                  "{2}\n" \
                  "<dc:description>{3}</dc:description>\n" \
                  "{4}" \
                  "<dc:publisher>{5}</dc:publisher>\n" \
                  "<dc:rights>{6}</dc:rights>\n" \
                  "<dc:language>en-US</dc:language>\n" \
                  "<dc:date>{7}</dc:date>\n" \
                  "<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
                  "<meta name=\"cover\" content=\"{8}\"/>\n" \
                  "</metadata>\n" \
                  "<manifest>\n" \
                  "<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
                  "{9}\n" \
                  "</manifest>\n" \
                  "<spine toc=\"ncx\">\n{10}</spine>\n" \
                  "<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
                  "</package>"

    # Format: ID, Depth, Title, Author, NAVMAP
    TOC_NCX = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>" \
              "<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
              " \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">" \
              "<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">" \
              "<head>" \
              "<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>" \
              "<meta content=\"{1}\" name=\"dtb:depth\"/>" \
              "<meta content=\"0\" name=\"dtb:totalPageCount\"/>" \
              "<meta content=\"0\" name=\"dtb:maxPageNumber\"/>" \
              "</head>" \
              "<docTitle><text>{2}</text></docTitle>" \
              "<docAuthor><text>{3}</text></docAuthor>" \
              "<navMap>{4}</navMap>" \
              "</ncx>"

    def __init__(self, args):
        self.args = args
        self.display = Display("info_%s.log" % escape(args.bookid))
        self.display.intro()

        self.cookies = {}

        if not args.cred:
            if not os.path.isfile(COOKIES_FILE):
                self.display.exit(
                    "Login: unable to find cookies file.\n"
                    "    Please use the --cred option to perform the login.")

            self.cookies = json.load(open(COOKIES_FILE))

        else:
            self.display.info("Logging into Safari Books Online...",
                              state=True)
            self.do_login(
                *[c.replace("'", "").replace('"', "") for c in args.cred])
            if not args.no_cookies:
                json.dump(self.cookies, open(COOKIES_FILE, "w"))

        self.book_id = args.bookid
        self.api_url = self.API_TEMPLATE.format(self.book_id)

        self.display.info("Retrieving book info...")
        self.book_info = self.get_book_info()
        self.display.book_info(self.book_info)

        self.display.info("Retrieving book chapters...")
        self.book_chapters = self.get_book_chapters()

        self.chapters_queue = self.book_chapters[:]

        if len(self.book_chapters) > sys.getrecursionlimit():
            sys.setrecursionlimit(len(self.book_chapters))

        self.book_title = self.book_info["title"]
        self.base_url = self.book_info["web_url"]

        self.clean_book_title = "".join(
            self.escape_dirname(
                self.book_title).split(",")[:2]) + " ({0})".format(
                    self.escape_dirname(", ".join(
                        a["name"] for a in self.book_info["authors"][:2]),
                                        clean_space=True))

        books_dir = os.path.join(PATH, "Books")
        if not os.path.isdir(books_dir):
            os.mkdir(books_dir)

        self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
        self.css_path = ""
        self.images_path = ""
        self.create_dirs()
        self.display.info("Output directory:\n    %s" % self.BOOK_PATH)

        self.chapter_title = ""
        self.filename = ""
        self.css = []
        self.images = []

        self.display.info("Downloading book contents... (%s chapters)" %
                          len(self.book_chapters),
                          state=True)
        self.BASE_HTML = self.BASE_01_HTML + (
            self.KINDLE_HTML if not args.no_kindle else "") + self.BASE_02_HTML

        self.cover = False
        self.get()
        if not self.cover:
            self.cover = self.get_default_cover()
            cover_html = self.parse_html(
                html.fromstring(
                    "<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>"
                    .format(self.cover)), True)

            self.book_chapters = [{
                "filename": "default_cover.xhtml",
                "title": "Cover"
            }] + self.book_chapters

            self.filename = self.book_chapters[0]["filename"]
            self.save_page_html(cover_html)

        self.css_done_queue = Queue(
            0) if "win" not in sys.platform else WinQueue()
        self.display.info("Downloading book CSSs... (%s files)" %
                          len(self.css),
                          state=True)
        self.collect_css()
        self.images_done_queue = Queue(
            0) if "win" not in sys.platform else WinQueue()
        self.display.info("Downloading book images... (%s files)" %
                          len(self.images),
                          state=True)
        self.collect_images()

        self.display.info("Creating EPUB file...", state=True)
        self.create_epub()

        if not args.no_cookies:
            json.dump(self.cookies, open(COOKIES_FILE, "w"))

        self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
        self.display.unregister()

        if not self.display.in_error and not args.log:
            os.remove(self.display.log_file)

        sys.exit(0)

    def return_cookies(self):
        return " ".join(
            ["{0}={1};".format(k, v) for k, v in self.cookies.items()])

    def return_headers(self, url):
        if "safaribooksonline" in urlsplit(url).netloc:
            self.HEADERS["cookie"] = self.return_cookies()

        else:
            self.HEADERS["cookie"] = ""

        return self.HEADERS

    def update_cookies(self, jar):
        for cookie in jar:
            self.cookies.update({cookie.name: cookie.value})

    def requests_provider(self,
                          url,
                          post=False,
                          data=None,
                          update_cookies=True,
                          **kwargs):
        try:
            response = getattr(requests, "post" if post else "get")(
                url, headers=self.return_headers(url), data=data, **kwargs)

            self.display.last_request = (url, data, kwargs,
                                         response.status_code, "\n".join([
                                             "\t{}: {}".format(*h)
                                             for h in response.headers.items()
                                         ]), response.text)

        except (requests.ConnectionError, requests.ConnectTimeout,
                requests.RequestException) as request_exception:
            self.display.error(str(request_exception))
            return 0

        if update_cookies:
            self.update_cookies(response.cookies)

        return response

    def do_login(self, email, password):
        response = self.requests_provider(self.BASE_URL)
        if response == 0:
            self.display.exit(
                "Login: unable to reach Safari Books Online. Try again...")

        csrf = []
        try:
            csrf = html.fromstring(response.text).xpath(
                "//input[@name='csrfmiddlewaretoken'][@value]")

        except (html.etree.ParseError,
                html.etree.ParserError) as parsing_error:
            self.display.error(parsing_error)
            self.display.exit(
                "Login: error trying to parse the home of Safari Books Online."
            )

        if not len(csrf):
            self.display.exit("Login: no CSRF Token found in the page."
                              " Unable to continue the login."
                              " Try again...")

        csrf = csrf[0].attrib["value"]
        response = self.requests_provider(
            self.LOGIN_URL,
            post=True,
            data=(("csrfmiddlewaretoken", ""), ("csrfmiddlewaretoken", csrf),
                  ("email", email), ("password1", password),
                  ("is_login_form", "true"), ("leaveblank", ""), ("dontchange",
                                                                  "http://")),
            allow_redirects=False)

        if response == 0:
            self.display.exit(
                "Login: unable to perform auth to Safari Books Online.\n    Try again..."
            )

        if response.status_code != 302:
            try:
                error_page = html.fromstring(response.text)
                errors_message = error_page.xpath(
                    "//ul[@class='errorlist']//li/text()")
                recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
                messages = (["    `%s`" % error for error in errors_message
                            if "password" in error or "email" in error] if len(errors_message) else []) +\
                           (["    `ReCaptcha required (wait or do logout from the website).`"] if len(recaptcha) else[])
                self.display.exit(
                    "Login: unable to perform auth login to Safari Books Online.\n"
                    + self.display.SH_YELLOW + "[*]" +
                    self.display.SH_DEFAULT + " Details:\n"
                    "%s" % "\n".join(messages if len(messages) else
                                     ["    Unexpected error!"]))
            except (html.etree.ParseError,
                    html.etree.ParserError) as parsing_error:
                self.display.error(parsing_error)
                self.display.exit(
                    "Login: your login went wrong and it encountered in an error"
                    " trying to parse the login details of Safari Books Online. Try again..."
                )

    def get_book_info(self):
        response = self.requests_provider(self.api_url)
        if response == 0:
            self.display.exit("API: unable to retrieve book info.")

        response = response.json()
        if not isinstance(response, dict) or len(response.keys()) == 1:
            self.display.exit(self.display.api_error(response))

        if "last_chapter_read" in response:
            del response["last_chapter_read"]

        return response

    def get_book_chapters(self, page=1):
        response = self.requests_provider(
            urljoin(self.api_url, "chapter/?page=%s" % page))
        if response == 0:
            self.display.exit("API: unable to retrieve book chapters.")

        response = response.json()

        if not isinstance(response, dict) or len(response.keys()) == 1:
            self.display.exit(self.display.api_error(response))

        if "results" not in response or not len(response["results"]):
            self.display.exit("API: unable to retrieve book chapters.")

        if response["count"] > sys.getrecursionlimit():
            sys.setrecursionlimit(response["count"])

        result = []
        result.extend([
            c for c in response["results"]
            if "cover" in c["filename"] or "cover" in c["title"]
        ])
        for c in result:
            del response["results"][response["results"].index(c)]

        result += response["results"]
        return result + (self.get_book_chapters(page +
                                                1) if response["next"] else [])

    def get_default_cover(self):
        response = self.requests_provider(self.book_info["cover"],
                                          update_cookies=False,
                                          stream=True)
        if response == 0:
            self.display.error("Error trying to retrieve the cover: %s" %
                               self.book_info["cover"])
            return False

        file_ext = response.headers["Content-Type"].split("/")[-1]
        with open(os.path.join(self.images_path, "default_cover." + file_ext),
                  'wb') as i:
            for chunk in response.iter_content(1024):
                i.write(chunk)

        return "default_cover." + file_ext

    def get_html(self, url):
        response = self.requests_provider(url)
        if response == 0:
            self.display.exit(
                "Crawler: error trying to retrieve this page: %s (%s)\n    From: %s"
                % (self.filename, self.chapter_title, url))

        root = None
        try:
            root = html.fromstring(response.text, base_url=self.BASE_URL)

        except (html.etree.ParseError,
                html.etree.ParserError) as parsing_error:
            self.display.error(parsing_error)
            self.display.exit(
                "Crawler: error trying to parse this page: %s (%s)\n    From: %s"
                % (self.filename, self.chapter_title, url))

        return root

    @staticmethod
    def url_is_absolute(url):
        return bool(urlparse(url).netloc)

    def link_replace(self, link):
        if link:
            if not self.url_is_absolute(link):
                if "cover" in link or "images" in link or "graphics" in link or \
                        link[-3:] in ["jpg", "peg", "png", "gif"]:
                    link = urljoin(self.base_url, link)
                    if link not in self.images:
                        self.images.append(link)
                        self.display.log("Crawler: found a new image at %s" %
                                         link)

                    image = link.split("/")[-1]
                    return "Images/" + image

                return link.replace(".html", ".xhtml")

            else:
                if self.book_id in link:
                    return self.link_replace(link.split(self.book_id)[-1])

        return link

    @staticmethod
    def get_cover(html_root):
        images = html_root.xpath(
            "//img[contains(@id, 'cover') or contains(@class, 'cover') or"
            "contains(@name, 'cover') or contains(@src, 'cover')]")
        if len(images):
            return images[0]

        divs = html_root.xpath(
            "//div[contains(@id, 'cover') or contains(@class, 'cover') or"
            "contains(@name, 'cover') or contains(@src, 'cover')]//img")
        if len(divs):
            return divs[0]

        a = html_root.xpath(
            "//a[contains(@id, 'cover') or contains(@class, 'cover') or"
            "contains(@name, 'cover') or contains(@src, 'cover')]//img")
        if len(a):
            return a[0]

        return None

    def parse_html(self, root, first_page=False):
        if random() > 0.5:
            if len(root.xpath("//div[@class='controls']/a/text()")):
                self.display.exit(self.display.api_error(" "))

        book_content = root.xpath("//div[@id='sbo-rt-content']")
        if not len(book_content):
            self.display.exit(
                "Parser: book content's corrupted or not present: %s (%s)" %
                (self.filename, self.chapter_title))

        page_css = ""
        stylesheet_links = root.xpath("//link[@rel='stylesheet']")
        if len(stylesheet_links):
            stylesheet_count = 0
            for s in stylesheet_links:
                css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
                    else urljoin(self.base_url, s.attrib["href"])

                if css_url not in self.css:
                    self.css.append(css_url)
                    self.display.log("Crawler: found a new CSS at %s" %
                                     css_url)

                page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
                            "rel=\"stylesheet\" type=\"text/css\" />\n".format(stylesheet_count)
                stylesheet_count += 1

        stylesheets = root.xpath("//style")
        if len(stylesheets):
            for css in stylesheets:
                if "data-template" in css.attrib and len(
                        css.attrib["data-template"]):
                    css.text = css.attrib["data-template"]
                    del css.attrib["data-template"]

                try:
                    page_css += html.tostring(
                        css, method="xml", encoding='unicode') + "\n"

                except (html.etree.ParseError,
                        html.etree.ParserError) as parsing_error:
                    self.display.error(parsing_error)
                    self.display.exit(
                        "Parser: error trying to parse one CSS found in this page: %s (%s)"
                        % (self.filename, self.chapter_title))

        # TODO: add all not covered tag for `link_replace` function
        svg_image_tags = root.xpath("//image")
        if len(svg_image_tags):
            for img in svg_image_tags:
                image_attr_href = [x for x in img.attrib.keys() if "href" in x]
                if len(image_attr_href):
                    svg_url = img.attrib.get(image_attr_href[0])
                    svg_root = img.getparent().getparent()
                    new_img = svg_root.makeelement("img")
                    new_img.attrib.update({"src": svg_url})
                    svg_root.remove(img.getparent())
                    svg_root.append(new_img)

        book_content = book_content[0]
        book_content.rewrite_links(self.link_replace)

        xhtml = None
        try:
            if first_page:
                is_cover = self.get_cover(book_content)
                if is_cover is not None:
                    page_css = "<style>" \
                               "body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
                               "#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
                               "img{height:90vh;margin-left:auto;margin-right:auto;}" \
                               "</style>"
                    cover_html = html.fromstring("<div id=\"Cover\"></div>")
                    cover_div = cover_html.xpath("//div")[0]
                    cover_img = cover_div.makeelement("img")
                    cover_img.attrib.update({"src": is_cover.attrib["src"]})
                    cover_div.append(cover_img)
                    book_content = cover_html

                    self.cover = is_cover.attrib["src"]

            xhtml = html.tostring(book_content,
                                  method="xml",
                                  encoding='unicode')

        except (html.etree.ParseError,
                html.etree.ParserError) as parsing_error:
            self.display.error(parsing_error)
            self.display.exit(
                "Parser: error trying to parse HTML of this page: %s (%s)" %
                (self.filename, self.chapter_title))

        return page_css, xhtml

    @staticmethod
    def escape_dirname(dirname, clean_space=False):
        if ":" in dirname:
            if dirname.index(":") > 15:
                dirname = dirname.split(":")[0]

            elif "win" in sys.platform:
                dirname = dirname.replace(":", ",")

        for ch in [
                '~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/',
                '`', '\'', '"', '|', '+'
        ]:
            if ch in dirname:
                dirname = dirname.replace(ch, "_")

        return dirname if not clean_space else dirname.replace(" ", "")

    def create_dirs(self):
        if os.path.isdir(self.BOOK_PATH):
            self.display.log("Book directory already exists: %s" %
                             self.BOOK_PATH)

        else:
            os.makedirs(self.BOOK_PATH)

        oebps = os.path.join(self.BOOK_PATH, "OEBPS")
        if not os.path.isdir(oebps):
            self.display.book_ad_info = True
            os.makedirs(oebps)

        self.css_path = os.path.join(oebps, "Styles")
        if os.path.isdir(self.css_path):
            self.display.log("CSSs directory already exists: %s" %
                             self.css_path)

        else:
            os.makedirs(self.css_path)
            self.display.css_ad_info.value = 1

        self.images_path = os.path.join(oebps, "Images")
        if os.path.isdir(self.images_path):
            self.display.log("Images directory already exists: %s" %
                             self.images_path)

        else:
            os.makedirs(self.images_path)
            self.display.images_ad_info.value = 1

    def save_page_html(self, contents):
        self.filename = self.filename.replace(".html", ".xhtml")
        open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb")\
            .write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
        self.display.log("Created: %s" % self.filename)

    def get(self):
        len_books = len(self.book_chapters)

        for _ in range(len_books):
            if not len(self.chapters_queue):
                return

            first_page = len_books == len(self.chapters_queue)

            next_chapter = self.chapters_queue.pop(0)
            self.chapter_title = next_chapter["title"]
            self.filename = next_chapter["filename"]

            if os.path.isfile(
                    os.path.join(self.BOOK_PATH, "OEBPS",
                                 self.filename.replace(".html", ".xhtml"))):
                if not self.display.book_ad_info and \
                        next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
                    self.display.info(
                        "File `%s` already exists.\n"
                        "    If you want to download again all the book%s,\n"
                        "    please delete the `<BOOK NAME>/OEBPS/*.xhtml` files and restart the program."
                        %
                        (self.filename.replace(".html", ".xhtml"),
                         " (especially because you selected the `--no-kindle` option)"
                         if self.args.no_kindle else ""))
                    self.display.book_ad_info = 2

            else:
                self.save_page_html(
                    self.parse_html(self.get_html(next_chapter["web_url"]),
                                    first_page))

            self.display.state(len_books, len_books - len(self.chapters_queue))

    def _thread_download_css(self, url):
        css_file = os.path.join(self.css_path,
                                "Style{0:0>2}.css".format(self.css.index(url)))
        if os.path.isfile(css_file):
            if not self.display.css_ad_info.value and url not in self.css[:self
                                                                          .css.
                                                                          index(
                                                                              url
                                                                          )]:
                self.display.info(
                    "File `%s` already exists.\n"
                    "    If you want to download again all the CSSs,\n"
                    "    please delete the `<BOOK NAME>/OEBPS/*.xhtml` and `<BOOK NAME>/OEBPS/Styles/*`"
                    " files and restart the program." % css_file)
                self.display.css_ad_info.value = 1

        else:
            response = self.requests_provider(url, update_cookies=False)
            if response == 0:
                self.display.error(
                    "Error trying to retrieve this CSS: %s\n    From: %s" %
                    (css_file, url))

            with open(css_file, 'wb') as s:
                for chunk in response.iter_content(1024):
                    s.write(chunk)

        self.css_done_queue.put(1)
        self.display.state(len(self.css), self.css_done_queue.qsize())

    def _thread_download_images(self, url):
        image_name = url.split("/")[-1]
        image_path = os.path.join(self.images_path, image_name)
        if os.path.isfile(image_path):
            if not self.display.images_ad_info.value and url not in self.images[:self
                                                                                .
                                                                                images
                                                                                .
                                                                                index(
                                                                                    url
                                                                                )]:
                self.display.info(
                    "File `%s` already exists.\n"
                    "    If you want to download again all the images,\n"
                    "    please delete the `<BOOK NAME>/OEBPS/*.xhtml` and `<BOOK NAME>/OEBPS/Images/*`"
                    " files and restart the program." % image_name)
                self.display.images_ad_info.value = 1

        else:
            response = self.requests_provider(urljoin(self.BASE_URL, url),
                                              update_cookies=False,
                                              stream=True)
            if response == 0:
                self.display.error(
                    "Error trying to retrieve this image: %s\n    From: %s" %
                    (image_name, url))

            with open(image_path, 'wb') as img:
                for chunk in response.iter_content(1024):
                    img.write(chunk)

        self.images_done_queue.put(1)
        self.display.state(len(self.images), self.images_done_queue.qsize())

    def _start_multiprocessing(self, operation, full_queue):
        if len(full_queue) > 5:
            for i in range(0, len(full_queue), 5):
                self._start_multiprocessing(operation, full_queue[i:i + 5])

        else:
            process_queue = [
                Process(target=operation, args=(arg, )) for arg in full_queue
            ]
            for proc in process_queue:
                proc.start()

            for proc in process_queue:
                proc.join()

    def collect_css(self):
        self.display.state_status.value = -1

        if "win" in sys.platform:
            # TODO
            for css_url in self.css:
                self._thread_download_css(css_url)

        else:
            self._start_multiprocessing(self._thread_download_css, self.css)

    def collect_images(self):
        if self.display.book_ad_info == 2:
            self.display.info(
                "Some of the book contents were already downloaded.\n"
                "    If you want to be sure that all the images will be downloaded,\n"
                "    please delete the `<BOOK NAME>/OEBPS/*.xhtml` files and restart the program."
            )

        self.display.state_status.value = -1

        if "win" in sys.platform:
            # TODO
            for image_url in self.images:
                self._thread_download_images(image_url)

        else:
            self._start_multiprocessing(self._thread_download_images,
                                        self.images)

    def create_content_opf(self):
        self.css = next(os.walk(self.css_path))[2]
        self.images = next(os.walk(self.images_path))[2]

        manifest = []
        spine = []
        for c in self.book_chapters:
            c["filename"] = c["filename"].replace(".html", ".xhtml")
            item_id = escape("".join(c["filename"].split(".")[:-1]))
            manifest.append(
                "<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />"
                .format(item_id, c["filename"]))
            spine.append("<itemref idref=\"{0}\"/>".format(item_id))

        for i in set(self.images):
            dot_split = i.split(".")
            head = "img_" + escape("".join(dot_split[:-1]))
            extension = dot_split[-1]
            manifest.append(
                "<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />"
                .format(head, i, "jpeg" if "jp" in extension else extension))

        for i in range(len(self.css)):
            manifest.append(
                "<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
                "media-type=\"text/css\" />".format(i))

        authors = "\n".join(
            "<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>"
            .format(escape(aut["name"])) for aut in self.book_info["authors"])

        subjects = "\n".join(
            "<dc:subject>{0}</dc:subject>".format(escape(sub["name"]))
            for sub in self.book_info["subjects"])

        return self.CONTENT_OPF.format(
            (self.book_info["isbn"]
             if self.book_info["isbn"] else self.book_id),
            escape(self.book_title), authors,
            escape(self.book_info["description"]), subjects, ", ".join(
                escape(pub["name"]) for pub in self.book_info["publishers"]),
            escape(self.book_info["rights"]), self.book_info["issued"],
            self.cover, "\n".join(manifest), "\n".join(spine),
            self.book_chapters[0]["filename"].replace(".html", ".xhtml"))

    @staticmethod
    def parse_toc(l, c=0, mx=0):
        r = ""
        for cc in l:
            c += 1
            if int(cc["depth"]) > mx:
                mx = int(cc["depth"])

            r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
                 "<navLabel><text>{2}</text></navLabel>" \
                 "<content src=\"{3}\"/>".format(
                    cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
                    escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
                 )

            if cc["children"]:
                sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
                r += sr

            r += "</navPoint>\n"

        return r, c, mx

    def create_toc(self):
        response = self.requests_provider(urljoin(self.api_url, "toc/"))
        if response == 0:
            self.display.exit(
                "API: unable to retrieve book chapters. "
                "Don't delete any files, just run again this program"
                " in order to complete the `.epub` creation!")

        response = response.json()

        if not isinstance(response, list) and len(response.keys()) == 1:
            self.display.exit(
                self.display.api_error(response) +
                " Don't delete any files, just run again this program"
                " in order to complete the `.epub` creation!")

        navmap, _, max_depth = self.parse_toc(response)
        return self.TOC_NCX.format(
            (self.book_info["isbn"] if self.book_info["isbn"] else
             self.book_id), max_depth, self.book_title,
            ", ".join(aut["name"]
                      for aut in self.book_info["authors"]), navmap)

    def create_epub(self):
        open(os.path.join(self.BOOK_PATH, "mimetype"),
             "w").write("application/epub+zip")
        meta_info = os.path.join(self.BOOK_PATH, "META-INF")
        if os.path.isdir(meta_info):
            self.display.log("META-INF directory already exists: %s" %
                             meta_info)

        else:
            os.makedirs(meta_info)

        open(os.path.join(meta_info, "container.xml"), "wb").write(
            self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace"))
        open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"),
             "wb").write(self.create_content_opf().encode(
                 "utf-8", "xmlcharrefreplace"))
        open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"),
             "wb").write(self.create_toc().encode("utf-8",
                                                  "xmlcharrefreplace"))

        zip_file = os.path.join(PATH, "Books", self.book_id)
        if os.path.isfile(zip_file + ".zip"):
            os.remove(zip_file + ".zip")

        shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
        os.rename(zip_file + ".zip",
                  os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
コード例 #53
0
ファイル: com.py プロジェクト: ISIR-MAP/multicom
class HDevice:
    """ Function to read/write to device """
    def __init__(self, proto):
        super(HDevice, self).__init__()
        self.fifoin = Queue()
        self.fifoout = Queue()
        self.proto = proto
        if proto == "ftdi":
            self.processdev = _DeviceProcess(self.fifoin, self.fifoout)
        else:
            import re
            globals()["re"] = re
            self.processdev = _DeviceProcessserial(self.fifoin, self.fifoout, self.proto)
    def launch(self):
        """ Launch the process for device communication """
        self.processdev.start()
        pid = self.processdev.pid
        p = psutil.Process(self.processdev.pid)
        p.nice(psutil.HIGH_PRIORITY_CLASS)
        print(str(pid) + "est le pid")
    def get(self):
        """ get byte from device """
        return self.fifoin.get()
    def quit(self):
        """ quit device process """
        self.processdev.terminate()
        self.processdev.join()
    def extract(self, size):
        """ Extract 'size' bytes from 'fifo' and return a bytearray """
        rec = bytearray([0]*size)
        for i in range(0, size):
            rec[i] = int.from_bytes(self.get(), 'big')
        return rec
    def readarray(self, size):
        """ read a bytearray from device """
        return bytearray(self.extract(size))
    def readascii(self):
        """ read data in ascii from serial port """
        data = self.get()
        return data.decode('ascii','backslashreplace')
    def readsep(self, sep, size):
        """ read data using Regexp """
        data = self.readascii()
        regexp = ""
        for i in range(0,size):
            regexp = regexp + r"([0-9]+(?:\.[0-9]+)?)(?:" + sep + ")"
        #regexp = r"([0-9]+(?:\.[0-9]+)?)(?:\|)([0-9]+(?:\.[0-9]+)?)"
        retour = re.findall(regexp, data)
        try:
            return retour[0]
        except Exception:
            return (0,0,0,0)
    def incommingsize(self):
        """get the incomming buffer size"""
        return self.fifoin.qsize()
    def writeint(self, tosend):
        """write data to haptic device"""
        bufenvoi = bytearray(4)
        bufenvoi[0] = int(tosend) & int('0b00111111', 2)
        bufenvoi[1] = ((int(tosend) >> 6) & int('0b00111111', 2)) | int('0b01000000', 2)
        bufenvoi[2] = ((int(tosend) >> 12) & int('0b00111111', 2)) | int('0b10000000', 2)
        bufenvoi[3] = int('0b11000000', 2)
        self.fifoout.put(bufenvoi)
    def write(self, tosend):
        """ convert and send data to haptic device"""
        forcenow = max(min(tosend, 130), -130)
        forcenowint = 32767*(1+forcenow/130)
        self.writeint(forcenowint)
コード例 #54
0
ファイル: queue1.py プロジェクト: LzWaiting/03.PythonProcess
from multiprocessing import Queue
from time import sleep

# 创建队列
q = Queue(3)

q.put(1)
sleep(0.5)
print(q.empty())
q.put(2)
sleep(0.5)
print(q.qsize())
q.put(3)
sleep(0.5)
print(q.full())
# q.put(4,False,3)
print(q.get())
q.close()
コード例 #55
0
class OpenQuoteContext:
    def __init__(self, host="127.0.0.1", sync_port=11111, async_port=11111):
        """
        create a context to established a network connection
        :param host:the address of the network connection
        :param sync_port:network connection port for synchronous communication
        :param async_port:network connection port for asynchronous communication,receiving client data push
        """
        self.__host = host
        self.__sync_port = sync_port
        self.__async_port = async_port

        self._req_queue = Queue()
        self._handlers_ctx = HandlerContext()

        self._async_ctx = _AsyncNetworkManager(self.__host, self.__async_port,
                                               self._handlers_ctx)
        self._proc_run = False
        self._sync_net_ctx = _SyncNetworkQueryCtx(self.__host,
                                                  self.__sync_port,
                                                  long_conn=True)
        self._net_proc = Thread(target=_net_proc,
                                args=(
                                    self._async_ctx,
                                    self._req_queue,
                                ))

    def __del__(self):
        if self._proc_run:
            self._proc_run = False
            self._stop_net_proc()
            self._net_proc.join(timeout=5)

    def set_handler(self, handler):
        return self._handlers_ctx.set_handler(handler)

    def start(self):
        """
        start the receiving thread,asynchronously receive the data pushed by the client
        """
        self._net_proc = Thread(target=_net_proc,
                                args=(
                                    self._async_ctx,
                                    self._req_queue,
                                ))
        self._net_proc.start()
        self._proc_run = True

    def stop(self):
        """
        stop the receiving thread, no longer receive the data pushed by the client
        """
        if self._proc_run:
            self._stop_net_proc()
            self._net_proc.join(timeout=5)
            self._proc_run = False
        self._net_proc = Thread(target=_net_proc,
                                args=(
                                    self._async_ctx,
                                    self._req_queue,
                                ))

    def _send_sync_req(self, req_str):
        """
        send a synchronous request
        """
        ret, msg, content = self._sync_net_ctx.network_query(req_str)
        if ret != RET_OK:
            return RET_ERROR, msg, None
        return RET_OK, msg, content

    def _send_async_req(self, req_str):
        """
        send a asynchronous request
        """
        if self._req_queue.full() is False:
            try:
                self._req_queue.put((True, req_str), timeout=1)
                return RET_OK, ''
            except Exception as e:
                _ = e
                err = sys.exc_info()[1]
                error_str = ERROR_STR_PREFIX + str(err)
                return RET_ERROR, error_str
        else:
            error_str = ERROR_STR_PREFIX + "Request queue is full. The size: %s" % self._req_queue.qsize(
            )
            return RET_ERROR, error_str

    def _get_sync_query_processor(self, pack_func, unpack_func):
        """
        synchronize the query processor
        :param pack_func: back
        :param unpack_func: unpack
        :return: sync_query_processor
        """
        send_req = self._send_sync_req

        def sync_query_processor(**kargs):
            ret_code, msg, req_str = pack_func(**kargs)
            if ret_code == RET_ERROR:
                return ret_code, msg, None

            ret_code, msg, rsp_str = send_req(req_str)

            if ret_code == RET_ERROR:
                return ret_code, msg, None

            ret_code, msg, content = unpack_func(rsp_str)
            if ret_code == RET_ERROR:
                return ret_code, msg, None
            return RET_OK, msg, content

        return sync_query_processor

    def _stop_net_proc(self):
        """
        stop the request of network
        :return: (ret_error,error_str)
        """
        if self._req_queue.full() is False:
            try:
                self._req_queue.put((False, None), timeout=1)
                return RET_OK, ''
            except Exception as e:
                _ = e
                err = sys.exc_info()[1]
                error_str = ERROR_STR_PREFIX + str(err)
                return RET_ERROR, error_str
        else:
            error_str = ERROR_STR_PREFIX + "Cannot send stop request. queue is full. The size: %s" \
                                           % self._req_queue.qsize()
            return RET_ERROR, error_str

    def get_trading_days(self, market, start_date=None, end_date=None):

        if market is None or isinstance(market, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
            return RET_ERROR, error_str

        if start_date is not None and isinstance(start_date, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of start_date param is wrong"
            return RET_ERROR, error_str

        if end_date is not None and isinstance(end_date, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of end_date param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)

        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {
            'market': market,
            'start_date': start_date,
            "end_date": end_date
        }
        ret_code, msg, trade_day_list = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        return RET_OK, trade_day_list

    def get_stock_basicinfo(self, market, stock_type='STOCK'):
        param_table = {'market': market, 'stock_type': stock_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp)
        kargs = {"market": market, 'stock_type': stock_type}

        ret_code, msg, basic_info_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'name', 'lot_size', 'stock_type']

        basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)

        return RET_OK, basic_info_table

    def get_history_kline(self,
                          code,
                          start=None,
                          end=None,
                          ktype='K_DAY',
                          autype='qfq'):

        if start is not None and isinstance(start, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of start param is wrong"
            return RET_ERROR, error_str

        if end is not None and isinstance(end, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of end param is wrong"
            return RET_ERROR, error_str

        param_table = {'code': code, 'ktype': ktype, 'autype': autype}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            HistoryKlineQuery.pack_req, HistoryKlineQuery.unpack_rsp)
        kargs = {
            "stock_str": code,
            "start_date": start,
            "end_date": end,
            "ktype": ktype,
            "autype": autype
        }

        ret_code, msg, kline_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
            'turnover'
        ]
        kline_frame_table = pd.DataFrame(kline_list, columns=col_list)

        return RET_OK, kline_frame_table

    def get_autype_list(self, code_list):

        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            ExrightQuery.pack_req, ExrightQuery.unpack_rsp)
        kargs = {"stock_list": code_list}
        ret_code, msg, exr_record = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'code', 'ex_div_date', 'split_ratio', 'per_cash_div',
            'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio',
            'allotment_price', 'stk_spo_ratio', 'stk_spo_price',
            'forward_adj_factorA', 'forward_adj_factorB',
            'backward_adj_factorA', 'backward_adj_factorB'
        ]

        exr_frame_table = pd.DataFrame(exr_record, columns=col_list)

        return RET_OK, exr_frame_table

    def get_market_snapshot(self, code_list):
        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp)
        kargs = {"stock_list": code_list}

        ret_code, msg, snapshot_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'code', 'data_date', 'data_time', 'last_price', 'open_price',
            'high_price', 'low_price', 'prev_close_price', 'volume',
            'turnover', 'turnover_rate', 'suspension', 'listing_date'
        ]

        snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)

        return RET_OK, snapshot_frame_table

    def subscribe(self, stock_code, data_type, push=False):
        """
        subcribe a sort of data for a stock
        :param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
        :param data_type: string  data type. For instance, "K_1M", "K_MON"
        :param push: push option
        :return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
        """
        param_table = {'stock_code': stock_code, 'data_type': data_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            SubscriptionQuery.pack_subscribe_req,
            SubscriptionQuery.unpack_subscribe_rsp)

        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {'stock_str': stock_code, 'data_type': data_type}
        ret_code, msg, _ = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        if push:
            ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req(
                stock_code, data_type)

            if ret_code != RET_OK:
                return RET_ERROR, msg

            ret_code, msg = self._send_async_req(push_req_str)
            if ret_code != RET_OK:
                return RET_ERROR, msg

        return RET_OK, None

    def unsubscribe(self, stock_code, data_type):
        """
        unsubcribe a sort of data for a stock
        :param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
        :param data_type: string  data type. For instance, "K_1M", "K_MON"
        :return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
        """

        param_table = {'stock_code': stock_code, 'data_type': data_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            SubscriptionQuery.pack_unsubscribe_req,
            SubscriptionQuery.unpack_unsubscribe_rsp)
        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {'stock_str': stock_code, 'data_type': data_type}

        ret_code, msg, _ = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        return RET_OK, None

    def query_subscription(self):
        """
        get the current subscription table
        :return:
        """
        query_processor = self._get_sync_query_processor(
            SubscriptionQuery.pack_subscription_query_req,
            SubscriptionQuery.unpack_subscription_query_rsp)

        ret_code, msg, subscription_table = query_processor()
        if ret_code == RET_ERROR:
            return ret_code, msg

        return RET_OK, subscription_table

    def get_stock_quote(self, code_list):
        """
        :param code_list:
        :return: DataFrame of quote data

        Usage:

        After subcribe "QUOTE" type for given stock codes, invoke

        get_stock_quote to obtain the data

        """
        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            StockQuoteQuery.pack_req,
            StockQuoteQuery.unpack_rsp,
        )
        kargs = {"stock_list": code_list}

        ret_code, msg, quote_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'code', 'data_date', 'data_time', 'last_price', 'open_price',
            'high_price', 'low_price', 'prev_close_price', 'volume',
            'turnover', 'turnover_rate', 'amplitude', 'suspension',
            'listing_date'
        ]

        quote_frame_table = pd.DataFrame(quote_list, columns=col_list)

        return RET_OK, quote_frame_table

    def get_rt_ticker(self, code, num=500):
        """
        get transaction information
        :param code: stock code
        :param num: the default is 500
        :return: (ret_ok, ticker_frame_table)
        """

        if code is None or isinstance(code, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
            return RET_ERROR, error_str

        if num is None or isinstance(num, int) is False:
            error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            TickerQuery.pack_req,
            TickerQuery.unpack_rsp,
        )
        kargs = {"stock_str": code, "num": num}
        ret_code, msg, ticker_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'stock_code', 'time', 'price', 'volume', 'turnover',
            "ticker_direction", 'sequence'
        ]
        ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)

        return RET_OK, ticker_frame_table

    def get_cur_kline(self, code, num, ktype='K_DAY', autype='qfq'):
        """
        get current kline
        :param code: stock code
        :param num:
        :param ktype: the type of kline
        :param autype:
        :return:
        """
        param_table = {'code': code, 'ktype': ktype}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        if num is None or isinstance(num, int) is False:
            error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
            return RET_ERROR, error_str

        if autype is not None and isinstance(autype, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of autype param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            CurKlineQuery.pack_req,
            CurKlineQuery.unpack_rsp,
        )

        kargs = {
            "stock_str": code,
            "num": num,
            "ktype": ktype,
            "autype": autype
        }
        ret_code, msg, kline_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = [
            'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
            'turnover'
        ]
        kline_frame_table = pd.DataFrame(kline_list, columns=col_list)

        return RET_OK, kline_frame_table

    def get_order_book(self, code):
        if code is None or isinstance(code, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(
            OrderBookQuery.pack_req,
            OrderBookQuery.unpack_rsp,
        )

        kargs = {"stock_str": code}
        ret_code, msg, orderbook = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        return RET_OK, orderbook
コード例 #56
0
ファイル: publisher.py プロジェクト: JulianMuenzberg/pynta
class Publisher:
    """ Publisher class in which the queue for publishing messages is defined and also a separated process is started.
    It is important to have a new process, since the serialization/deserialization of messages from the QUEUE may be
    a bottleneck for performance.
    """
    def __init__(self, port=5555):
        self.logger = get_logger(name=__name__)
        self._port = port
        self._queue = Queue(
        )  # The publisher will grab and broadcast the messages from this queue
        self._event = Event()  # This event is used to stop the process
        self._process = Process(target=publisher,
                                args=[self._queue, self._event, self._port])
        self.logger.info('Initialized published on port {}'.format(port))

    def start(self):
        """ Start a new process that will be responsible for broadcasting the messages.
        """
        self._event.clear()
        self._process.start()
        sleep(1)  # This forces the start to block until the publisher is ready

    def stop(self):
        self._event.set()
        self.empty_queue()

    def empty_queue(self):
        """ If the publisher stops before broadcasting all the messages, the Queue may still be using some memory. This
        method is simply getting all the elements in order to free memory. Can be useful for garbage collection or
        better control of the downstream program.
        """
        self.logger.info('Emptying the queue of the publisher')
        self.logger.debug('Queue length: {}'.format(self._queue.qsize()))
        self._queue.close()

    def publish(self, topic, data):
        """ Adapts the data to make it faster to broadcast

        :param str topic: Topic in which to publish the data
        :param data: Data to be published
        :return: None
        """
        self.logger.debug('Adding data of type {} to topic {}'.format(
            type(data), topic))
        try:
            self._queue.put({'topic': topic, 'data': data})
        except AssertionError:
            # This means the queue has been closed already
            pass

    @property
    def port(self):
        return self._port

    @port.setter
    def port(self, new_port):
        if new_port != self._port:
            self._port = new_port
            self.logger.warning(
                'Changing the port requires restarting the publisher process')
            self.logger.debug(
                'Setting the new publisher port to {}'.format(new_port))
            self.stop()
            self._process.join()
            self._process = Process(
                target=publisher, args=[self._queue, self._event, self._port])
            self.start()
        else:
            self.logger.warning(
                'New port {} is the same as the old port'.format(new_port))

    def join(self, timeout=0):
        if self._process.is_alive():
            self.logger.debug('Waiting for Publisher process to finish')
            self._process.join(timeout)
コード例 #57
0
def main(args):
    number = ''
    _img_count = 0
    dsf = os.path.dirname(os.path.realpath(__file__))
    js_path = os.path.join(dsf, args.config_file)
    json_file = open(js_path).read()
    js = json.loads(json_file)
    config = js[str(args.config)]
    x1, x2, y1, y2 = config[
        'region']  # (125, 950, 100, 620)#src='http:///mjpg/video.mjpg',
    cap = video_capture(config['videosource'], config['width'],
                        config['height'])
    if cap is None:
        return
    server = config['server']
    port = config['port']
    qo = Queue()
    qi = Queue()
    p = Process(target=ocr, args=(qo, qi))
    p.start()
    atexit.register(ocr_kill, p)
    md = motiondetect.MotionDetect()
    while True:
        try:
            ret, image = cap.read()
            if image is None:
                cap.release()
                cap = video_capture(config['videosource'], config['width'],
                                    config['height'])
                time.sleep(2)
                continue
            img = image[y1:y2, x1:x2]
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            cv2.rectangle(image, (0, 0), (image.shape[1], 50), (255, 255, 255),
                          cv2.FILLED)
            cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
            font = cv2.FONT_HERSHEY_SIMPLEX
            evc_l, delta, count = md.evc_detect(img.copy())
            cv2.putText(image,
                        str(evc_l) + ' ' + str(delta) + ' ' + str(count),
                        (620, 200), font, 1, (255, 100, 0), 2, cv2.LINE_AA)
            if evc_l >= 3.0 and _img_count < 10:
                _img_count += 1
                cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
                qi.put(img)
            if qo.qsize() > 0:
                ocr_data = qo.get()
                _img_count -= 1
                if ocr_data[0]:
                    number = ocr_data[2] + '&' + ocr_data[1].strftime(
                        '%d.%m.%Y %H:%M:%S')
                    cv2.rectangle(image, (0, 0), (image.shape[1], 50),
                                  (0, 255, 0), 2)
                    t = Thread(target=tcp_client,
                               args=(server, port, str(args.config) + '&' +
                                     number + '&' + ocr_data[3] + '&'))
                    t.start()
                else:
                    cv2.rectangle(image, (0, 0), (image.shape[1], 50),
                                  (0, 0, 255), 2)
            cv2.putText(image, number, (10, 35), font, 1, (255, 100, 0), 2,
                        cv2.LINE_AA)
            # #image[y1:y2, x1:x2] = imgc
            cv2.imshow('Video', image)
            if cv2.waitKey(1) == 27:
                cap.release()
                cv2.destroyAllWindows()
                exit(0)
            if not p.is_alive():
                p = Process(target=ocr, args=(qo, qi))
                p.start()
        except:
            logging.exception('')
            cap.release()
            cv2.destroyAllWindows()
コード例 #58
0
class KittiLoader(object):

    # return:
    # tag (N)
    # label (N) (N')
    # rgb (N, H, W, C)
    # raw_lidar (N) (N', 4)
    # vox_feature
    # vox_number
    # vox_coordinate

    def __init__(self,
                 object_dir='.',
                 queue_size=20,
                 require_shuffle=False,
                 is_testset=True,
                 batch_size=1,
                 use_multi_process_num=0,
                 split_file='',
                 multi_gpu_sum=1,
                 aug=False):
        assert (use_multi_process_num >= 0)
        self.object_dir = object_dir
        self.is_testset = is_testset
        self.use_multi_process_num = use_multi_process_num if not self.is_testset else 1
        self.require_shuffle = require_shuffle if not self.is_testset else False
        self.batch_size = batch_size
        self.split_file = split_file
        self.multi_gpu_sum = multi_gpu_sum
        self.aug = aug

        if self.split_file != '':
            # use split file
            _tag = []
            self.f_rgb, self.f_lidar, self.f_label = [], [], []
            for line in open(self.split_file, 'r').readlines():
                line = line[:-1]  # remove '\n'
                _tag.append(line)
                self.f_rgb.append(
                    os.path.join(self.object_dir, 'image_2', line + '.png'))
                self.f_lidar.append(
                    os.path.join(self.object_dir, 'velodyne', line + '.bin'))
                self.f_label.append(
                    os.path.join(self.object_dir, 'label_2', line + '.txt'))
        else:
            self.f_rgb = glob.glob(
                os.path.join(self.object_dir, 'image_2', '*.png'))
            self.f_rgb.sort()
            self.f_lidar = glob.glob(
                os.path.join(self.object_dir, 'velodyne', '*.bin'))
            self.f_lidar.sort()
            self.f_label = glob.glob(
                os.path.join(self.object_dir, 'label_2', '*.txt'))
            self.f_label.sort()

        self.data_tag = [
            name.split('/')[-1].split('.')[-2] for name in self.f_rgb
        ]
        assert (len(self.data_tag) == len(self.f_rgb) == len(self.f_lidar))
        self.dataset_size = len(self.f_rgb)
        self.already_extract_data = 0
        self.cur_frame_info = ''

        print("Dataset total length: {}".format(self.dataset_size))
        if self.require_shuffle:
            self.shuffle_dataset()

        self.queue_size = queue_size
        self.require_shuffle = require_shuffle
        # must use the queue provided by multiprocessing module(only this can be shared)
        self.dataset_queue = Queue()

        self.load_index = 0
        if self.use_multi_process_num == 0:
            self.loader_worker = [
                threading.Thread(target=self.loader_worker_main,
                                 args=(self.batch_size, ))
            ]
        else:
            self.loader_worker = [
                Process(target=self.loader_worker_main,
                        args=(self.batch_size, ))
                for i in range(self.use_multi_process_num)
            ]
        self.work_exit = Value('i', 0)
        [i.start() for i in self.loader_worker]

        # This operation is not thread-safe
        self.rgb_shape = (cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.work_exit.value = True

    def __len__(self):
        return self.dataset_size

    def fill_queue(self, batch_size=0):
        load_index = self.load_index
        self.load_index += batch_size
        if self.load_index >= self.dataset_size:
            if not self.is_testset:  # test set just end
                if self.require_shuffle:
                    self.shuffle_dataset()
                load_index = 0
                self.load_index = load_index + batch_size
            else:
                self.work_exit.value = True

        labels, tag, voxel, rgb, raw_lidar = [], [], [], [], []
        for _ in range(batch_size):
            try:
                if self.aug:
                    ret = aug_data(self.data_tag[load_index], self.object_dir)
                    tag.append(ret[0])
                    rgb.append(ret[1])
                    raw_lidar.append(ret[2])
                    voxel.append(ret[3])
                    labels.append(ret[4])
                else:
                    rgb.append(
                        cv2.resize(cv2.imread(self.f_rgb[load_index]),
                                   (cfg.IMAGE_WIDTH, cfg.IMAGE_HEIGHT)))
                    raw_lidar.append(
                        np.fromfile(self.f_lidar[load_index],
                                    dtype=np.float32).reshape((-1, 4)))
                    if not self.is_testset:
                        labels.append([
                            line for line in open(self.f_label[load_index],
                                                  'r').readlines()
                        ])
                    else:
                        labels.append([''])
                    tag.append(self.data_tag[load_index])

                    ######################################################
                    # TODO:
                    # we need to change the preprocessing of point cloud for point fusion
                    voxel.append(process_pointcloud(raw_lidar[-1]))

                load_index += 1
            except:
                if not self.is_testset:  # test set just end
                    self.load_index = 0
                    if self.require_shuffle:
                        self.shuffle_dataset()
                else:
                    self.work_exit.value = True

        # only for voxel -> [gpu, k_single_batch, ...]
        vox_feature, vox_number, vox_coordinate = [], [], []
        single_batch_size = int(self.batch_size / self.multi_gpu_sum)
        for idx in range(self.multi_gpu_sum):
            _, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(
                voxel[idx * single_batch_size:(idx + 1) * single_batch_size])
            vox_feature.append(per_vox_feature)
            vox_number.append(per_vox_number)
            vox_coordinate.append(per_vox_coordinate)

        self.dataset_queue.put_nowait(
            (labels, (vox_feature, vox_number, vox_coordinate), rgb, raw_lidar,
             tag))

    def load(self):
        try:
            if self.is_testset and self.already_extract_data >= self.dataset_size:
                return None

            buff = self.dataset_queue.get()
            label = buff[0]
            vox_feature = buff[1][0]
            vox_number = buff[1][1]
            vox_coordinate = buff[1][2]
            rgb = buff[2]
            raw_lidar = buff[3]
            tag = buff[4]
            self.cur_frame_info = buff[4]

            self.already_extract_data += self.batch_size

            ret = (np.array(tag), np.array(label), np.array(vox_feature),
                   np.array(vox_number), np.array(vox_coordinate),
                   np.array(rgb), np.array(raw_lidar))
        except:
            print("Dataset empty!")
            ret = None
        return ret

    def load_specified(self, index=0):
        rgb = cv2.resize(cv2.imread(self.f_rgb[index]),
                         (cfg.IMAGE_WIDTH, cfg.IMAGE_HEIGHT))
        raw_lidar = np.fromfile(self.f_lidar[index], dtype=np.float32).reshape(
            (-1, 4))
        labels = [line for line in open(self.f_label[index], 'r').readlines()]
        tag = self.data_tag[index]

        if self.is_testset:
            ret = (
                np.array([tag]),
                np.array([rgb]),
                np.array([raw_lidar]),
            )
        else:
            ret = (
                np.array([tag]),
                np.array([labels]),
                np.array([rgb]),
                np.array([raw_lidar]),
            )
        return ret

    def loader_worker_main(self, batch_size):
        if self.require_shuffle:
            self.shuffle_dataset()
        while not self.work_exit.value:
            if self.dataset_queue.qsize() >= self.queue_size // 2:
                time.sleep(1)
            else:
                # since we use multiprocessing, 1 is ok
                self.fill_queue(batch_size)

    def get_shape(self):
        return self.rgb_shape

    def shuffle_dataset(self):
        # to prevent diff loader load same data
        index = shuffle([i for i in range(len(self.f_label))],
                        random_state=random.randint(
                            0, self.use_multi_process_num**5))
        self.f_label = [self.f_label[i] for i in index]
        self.f_rgb = [self.f_rgb[i] for i in index]
        self.f_lidar = [self.f_lidar[i] for i in index]
        self.data_tag = [self.data_tag[i] for i in index]

    def get_frame_info(self):
        return self.cur_frame_info
コード例 #59
0
class DemoGui(QtWidgets.QWidget):
    def __init__(self):
        super().__init__()

        self._layout = QtWidgets.QVBoxLayout()
        self._number1 = QtWidgets.QLineEdit('5')
        self._number1.setToolTip('Number of Spheres per job')
        self._number2 = QtWidgets.QLineEdit('4')
        self._number2.setToolTip('Number of jobs to generate')
        self._opacity = QtWidgets.QSlider()
        self._opacity.setToolTip(
            'Opacity of Spheres. Evaluated just before render, after polydata has returned from'
            ' the worker. Renderer gets pretty bogged down with opacity and 1000+ spheres.'
        )
        self._opacity.setMinimum(0)
        self._opacity.setMaximum(100)
        self._opacity.setValue(100)
        self._send_button = QtWidgets.QPushButton('send')
        self._stop_button = QtWidgets.QPushButton('stop worker')
        self._clear_button = QtWidgets.QPushButton('clear outputs')

        self._textbox = QtWidgets.QTextEdit()
        self._vtk_plot = VTKPlot()

        self._inputs_queue = Queue()
        self._outputs_queue = Queue()

        self._worker_process1 = Process(target=worker,
                                        args=(self._inputs_queue,
                                              self._outputs_queue, 1))
        self._worker_process2 = Process(target=worker,
                                        args=(self._inputs_queue,
                                              self._outputs_queue, 2))
        self._worker_process3 = Process(target=worker,
                                        args=(self._inputs_queue,
                                              self._outputs_queue, 3))
        self._worker_process4 = Process(target=worker,
                                        args=(self._inputs_queue,
                                              self._outputs_queue, 4))
        self._worker_process1.start()
        self._worker_process2.start()
        self._worker_process3.start()
        self._worker_process4.start()

        self._timer = QtCore.QTimer()
        self._timer.setInterval(100)
        self._timer.timeout.connect(self.display_results)
        self._timer.start()

        self._send_button.clicked.connect(self.send_numbers)
        self._stop_button.clicked.connect(self.stop)
        self._clear_button.clicked.connect(self.clear)

        self._layout.addWidget(self._number1)
        self._layout.addWidget(self._number2)
        self._layout.addWidget(self._opacity)
        self._layout.addWidget(self._send_button)
        self._layout.addWidget(self._stop_button)
        self._layout.addWidget(self._clear_button)
        self._layout.addWidget(self._textbox)
        self._layout.addWidget(self._vtk_plot)
        self.setLayout(self._layout)

        self._time_dict = {}

    def closeEvent(self, event):
        self.stop()

    def clear(self, *args):
        self._textbox.clear()
        self._vtk_plot.ren.Clear()
        for actor in self._vtk_plot.ren.GetActors():
            self._vtk_plot.ren.RemoveActor(actor)

        self._vtk_plot.ren.ResetCamera()
        self._vtk_plot.ren.Render()
        self._vtk_plot.renwin.Render()

    def stop(self, *args):
        print('sending stop')
        for _ in range(4):
            self._inputs_queue.put('STOP')

    def send_numbers(self, *args):
        print('sending numbers')

        try:
            num1 = float(self._number1.text())
            num2 = float(self._number2.text())
        except Exception as e:
            import traceback
            traceback.print_exc()
            return

        meta = {'t1': time()}
        for _ in range(int(num2)):
            self._inputs_queue.put((num1, num2, meta))

    def display_results(self):
        if not self._outputs_queue.empty():
            # Not sure if this is necessary, but I don't want to think about what happens when the timer goes off while
            # the method is in progress.
            self._timer.blockSignals(True)
            t0 = time()
            n = self._outputs_queue.qsize()
            # This is a compromize... I would keep emptying the queue until it's empty, which is more likely to lock
            #  the GUI because the workers may continue catching up while this is operating.
            # If there is no loop here, then items are only read once per timer tick.
            # If the worker puts each sphere in the queue individually, this can cause each sphere's appearance to
            #  be animated which is much slower because Render() is slow... however workers are currently appending all spheres
            #  of a job into an AppendPolyData
            for _ in range(n):
                # print('Displaying results')
                r_message = self._outputs_queue.get()
                result, meta = r_message
                t1 = meta['t1']
                t_work = meta['t_work']
                t3 = time()
                dt = t3 - t1
                t_overhead = dt - t_work
                proc_id = meta['proc_id']
                # self._textbox.append(str(result))
                self._textbox.append(
                    f'Took {dt:1.6f} s total. {t_work:1.6f} working. {t_overhead:1.6f} overhead. Proc: {proc_id}'
                )
                self.plot_shape(result, proc_id)
            t1 = time()
            self._timer.blockSignals(False)
            print(f'Output Processing Time: {t1-t0:1.3f}s')
            self._vtk_plot.ren.ResetCamera()
            self._vtk_plot.ren.Render()
            self._vtk_plot.renwin.Render()

    def plot_shape(self, shape_data, proc_id):

        t0 = time()
        reader = vtk.vtkPolyDataReader()
        reader.ReadFromInputStringOn()
        reader.SetInputString(shape_data)
        reader.Update()
        shape_pd = reader.GetOutput()

        mapper = vtk.vtkPolyDataMapper()
        mapper.SetInputData(shape_pd)
        actor = vtk.vtkActor()
        actor.SetMapper(mapper)
        color = {
            1: (1, 0, 0),
            2: (0, 1, 0),
            3: (0, 0, 1),
            4: (0, 1, 1)
        }[proc_id]
        actor.GetProperty().SetDiffuseColor(color)
        actor.GetProperty().SetOpacity(self._opacity.value() / 100)

        self._vtk_plot.ren.AddActor(actor)

        t1 = time()
コード例 #60
0
def run(args):
    start = datetime.now()
    #print args
    #==#
    verbose = args.verbose
    force = args.force
    #==#
    inputFile = args.input
    breakPointsFile = args.breakpoints
    outputFile = args.output
    numberOfBands = int(args.bands)
    #==#
    rows = int(args.rows)
    numberOfThreads = int(args.threads)
    #==#
    print "=================================="
    print "#==#"
    print "CyberGIS Script / cybergis-script-ittc-stetch.py"
    print "Apply stetch to raster image(s)."
    print "#==#"
    #==#
    if numberOfBands != 1 and numberOfBands != 3:
        print "The number of bands specified is not expected."
        return 0
    if rows <= 0:
        print "You to process at least 1 row at a time."
        return 0
    if numberOfThreads <= 0:
        print "You need at least 1 thread."
        return 0
    if not os.path.exists(inputFile):
        print "Input file does not exist."
        return 0
    if not os.path.exists(breakPointsFile):
        print "Breakpoints file does not exist."
        return 0
    if force == 0 and os.path.exists(outputFile):
        print "Output file already exists."
        return 0
    #==#
    #Load input image file and breakpoints file
    inputDataset = gdal.Open(inputFile, GA_ReadOnly)
    lookUpTables = LookUpTables(breakPointsFile, numberOfBands)
    if inputDataset is None:
        print "Error opening input image file."
        return 0
    if not lookUpTables.isValid():
        print "Error building lookup tables.  Are you sure the breakpoints file matches the right number of bands specified?"
        return 0
    #==#
    #Create output file
    outputFormat = "HFA"
    w = inputDataset.RasterXSize
    h = inputDataset.RasterYSize
    if force > 0 and os.path.isfile(outputFile):
        if verbose > 0:
            print "Deleting existing output file at ", outputFile
        os.remove(outputFile)
    outputDataset = initDataset(outputFile, outputFormat, w, h, numberOfBands)
    outputDataset.SetGeoTransform(list(inputDataset.GetGeoTransform()))
    outputDataset.SetProjection(inputDataset.GetProjection())
    #==#
    #Core Process
    if numberOfThreads == 1:
        for b in range(numberOfBands):
            print "Stretching Band " + str(b + 1)
            lut = numpy.array(lookUpTables.tables[b].table)
            if verbose > 0:
                print "Lookup Table (lut): "
                print lut
            inBand = inputDataset.GetRasterBand(b + 1)
            outBand = outputDataset.GetRasterBand(b + 1)

            r = rows

            if verbose > 0:
                print "Processing", r, "rows at a time."
                print "Processing", int(inBand.YSize / r), "batches of rows."

            for y in range(int(inBand.YSize / r)):
                outBand.WriteArray(
                    lut[inBand.ReadAsArray(0, y * r, inBand.XSize, r,
                                           inBand.XSize, r)], 0, y * r)

            if verbose > 0:
                print "Processing", (inBand.YSize % r), "residual rows."

            y0 = inBand.YSize / rows
            for y in range(inBand.YSize % r):
                outBand.WriteArray(
                    lut[inBand.ReadAsArray(0, y0 + y, inBand.XSize, 1,
                                           inBand.XSize, 1)], 0, y0 + y)

    elif numberOfThreads > 1:
        print "not fully implemented yet"
        global exitFlag
        global queueLock
        global writeLock
        global workQueue
        global tasks
        #==#
        exitFlag = 0
        queueLock = Lock()
        writeLock = Lock()
        workQueue = Queue(0)
        tasks = Tasks()
        #==#
        for b in range(numberOfBands):
            print "Adding tasks for band " + str(b + 1)
            lut = numpy.array(lookUpTables.tables[b].table)
            inBand = inputDataset.GetRasterBand(b + 1)
            outBand = outputDataset.GetRasterBand(b + 1)
            y0 = int(inBand.YSize / r)
            for y in range(int(inBand.YSize / r)):
                task = b + 1, inBand, outBand, lut, y0, y, r, 1
                tasks.add(task)
            for y in range(inBand.YSize % r):
                task = b + 1, inBand, outBand, lut, y0, y, r, 2
                tasks.add(task)

            print "tasks in main queue: " + str(len(tasks.tasks))
            #Add tasks to queue
            queueLock.acquire()
            for taskID in range(len(tasks.tasks)):
                workQueue.put(taskID)
            queueLock.release()

            processes = initProcesses(numberOfThreads)

            print "Queue is full with " + str(workQueue.qsize()) + " tasks."
            print "Rendering threads will now execute."
            while not workQueue.empty():
                pass

            exitFlag = 1  #tell's threads it's time to quit

            for process in processes:
                process.join()
    #==#
    #Post Process
    inputDataset = None
    outputDataset = None
    print datetime.now() - start
    return 0
    print "=================================="